kern_mutex.c revision 118272
1228753Smm/*-
2232153Smm * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3228753Smm *
4228753Smm * Redistribution and use in source and binary forms, with or without
5228753Smm * modification, are permitted provided that the following conditions
6228753Smm * are met:
7228753Smm * 1. Redistributions of source code must retain the above copyright
8228753Smm *    notice, this list of conditions and the following disclaimer.
9228753Smm * 2. Redistributions in binary form must reproduce the above copyright
10228753Smm *    notice, this list of conditions and the following disclaimer in the
11228753Smm *    documentation and/or other materials provided with the distribution.
12228753Smm * 3. Berkeley Software Design Inc's name may not be used to endorse or
13228753Smm *    promote products derived from this software without specific prior
14228753Smm *    written permission.
15228753Smm *
16228753Smm * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17228753Smm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18228753Smm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19228753Smm * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20228753Smm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21228753Smm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22228753Smm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23228753Smm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24228753Smm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25228753Smm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26228753Smm * SUCH DAMAGE.
27228763Smm *
28228753Smm *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29228753Smm *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30228753Smm */
31228753Smm
32228753Smm/*
33228753Smm * Machine independent bits of mutex implementation.
34228753Smm */
35228753Smm
36228753Smm#include <sys/cdefs.h>
37228753Smm__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 118272 2003-07-31 18:52:18Z jhb $");
38228753Smm
39228753Smm#include "opt_adaptive_mutexes.h"
40228753Smm#include "opt_ddb.h"
41228753Smm
42228753Smm#include <sys/param.h>
43228753Smm#include <sys/systm.h>
44228753Smm#include <sys/bus.h>
45228753Smm#include <sys/kernel.h>
46228753Smm#include <sys/ktr.h>
47228753Smm#include <sys/lock.h>
48228753Smm#include <sys/malloc.h>
49228753Smm#include <sys/mutex.h>
50228753Smm#include <sys/proc.h>
51228753Smm#include <sys/resourcevar.h>
52228753Smm#include <sys/sched.h>
53228753Smm#include <sys/sbuf.h>
54228753Smm#include <sys/sysctl.h>
55228753Smm#include <sys/vmmeter.h>
56228753Smm
57228753Smm#include <machine/atomic.h>
58228753Smm#include <machine/bus.h>
59228753Smm#include <machine/clock.h>
60228753Smm#include <machine/cpu.h>
61228753Smm
62228753Smm#include <ddb/ddb.h>
63228753Smm
64228753Smm#include <vm/vm.h>
65228753Smm#include <vm/vm_extern.h>
66228753Smm
67228753Smm/*
68228753Smm * Internal utility macros.
69228753Smm */
70228753Smm#define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
71228753Smm
72228753Smm#define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
73228753Smm	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
74228753Smm
75228753Smm/*
76228753Smm * Lock classes for sleep and spin mutexes.
77228753Smm */
78228753Smmstruct lock_class lock_class_mtx_sleep = {
79228753Smm	"sleep mutex",
80228753Smm	LC_SLEEPLOCK | LC_RECURSABLE
81228753Smm};
82228753Smmstruct lock_class lock_class_mtx_spin = {
83228753Smm	"spin mutex",
84228753Smm	LC_SPINLOCK | LC_RECURSABLE
85228753Smm};
86228753Smm
87228753Smm/*
88228753Smm * System-wide mutexes
89232153Smm */
90232153Smmstruct mtx sched_lock;
91232153Smmstruct mtx Giant;
92232153Smm
93232153Smm/*
94232153Smm * Prototypes for non-exported routines.
95232153Smm */
96232153Smmstatic void	propagate_priority(struct thread *);
97232153Smm
98232153Smmstatic void
99232153Smmpropagate_priority(struct thread *td)
100228753Smm{
101232153Smm	int pri = td->td_priority;
102232153Smm	struct mtx *m = td->td_blocked;
103232153Smm
104228753Smm	mtx_assert(&sched_lock, MA_OWNED);
105228753Smm	for (;;) {
106228753Smm		struct thread *td1;
107232153Smm
108232153Smm		td = mtx_owner(m);
109228753Smm
110228753Smm		if (td == NULL) {
111228753Smm			/*
112232153Smm			 * This really isn't quite right. Really
113228753Smm			 * ought to bump priority of thread that
114232153Smm			 * next acquires the mutex.
115228753Smm			 */
116232153Smm			MPASS(m->mtx_lock == MTX_CONTESTED);
117228753Smm			return;
118228753Smm		}
119228753Smm
120232153Smm		MPASS(td->td_proc != NULL);
121232153Smm		MPASS(td->td_proc->p_magic == P_MAGIC);
122232153Smm		KASSERT(!TD_IS_SLEEPING(td), (
123232153Smm		    "sleeping thread (pid %d) owns a mutex",
124228753Smm		    td->td_proc->p_pid));
125228753Smm		if (td->td_priority <= pri) /* lower is higher priority */
126228753Smm			return;
127228753Smm
128232153Smm
129228753Smm		/*
130228753Smm		 * If lock holder is actually running, just bump priority.
131228753Smm		 */
132232153Smm		if (TD_IS_RUNNING(td)) {
133232153Smm			td->td_priority = pri;
134232153Smm			return;
135232153Smm		}
136232153Smm
137232153Smm#ifndef SMP
138232153Smm		/*
139232153Smm		 * For UP, we check to see if td is curthread (this shouldn't
140232153Smm		 * ever happen however as it would mean we are in a deadlock.)
141232153Smm		 */
142232153Smm		KASSERT(td != curthread, ("Deadlock detected"));
143232153Smm#endif
144232153Smm
145228753Smm		/*
146232153Smm		 * If on run queue move to new run queue, and quit.
147228753Smm		 * XXXKSE this gets a lot more complicated under threads
148228753Smm		 * but try anyhow.
149228753Smm		 */
150232153Smm		if (TD_ON_RUNQ(td)) {
151232153Smm			MPASS(td->td_blocked == NULL);
152232153Smm			sched_prio(td, pri);
153232153Smm			return;
154232153Smm		}
155232153Smm		/*
156232153Smm		 * Adjust for any other cases.
157232153Smm		 */
158232153Smm		td->td_priority = pri;
159228753Smm
160228753Smm		/*
161232153Smm		 * If we aren't blocked on a mutex, we should be.
162232153Smm		 */
163232153Smm		KASSERT(TD_ON_LOCK(td), (
164232153Smm		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
165232153Smm		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
166232153Smm		    m->mtx_object.lo_name));
167232153Smm
168232153Smm		/*
169232153Smm		 * Pick up the mutex that td is blocked on.
170232153Smm		 */
171232153Smm		m = td->td_blocked;
172228753Smm		MPASS(m != NULL);
173232153Smm
174228753Smm		/*
175		 * Check if the thread needs to be moved up on
176		 * the blocked chain
177		 */
178		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
179			continue;
180		}
181
182		td1 = TAILQ_PREV(td, threadqueue, td_lockq);
183		if (td1->td_priority <= pri) {
184			continue;
185		}
186
187		/*
188		 * Remove thread from blocked chain and determine where
189		 * it should be moved up to.  Since we know that td1 has
190		 * a lower priority than td, we know that at least one
191		 * thread in the chain has a lower priority and that
192		 * td1 will thus not be NULL after the loop.
193		 */
194		TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq);
195		TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) {
196			MPASS(td1->td_proc->p_magic == P_MAGIC);
197			if (td1->td_priority > pri)
198				break;
199		}
200
201		MPASS(td1 != NULL);
202		TAILQ_INSERT_BEFORE(td1, td, td_lockq);
203		CTR4(KTR_LOCK,
204		    "propagate_priority: p %p moved before %p on [%p] %s",
205		    td, td1, m, m->mtx_object.lo_name);
206	}
207}
208
209#ifdef MUTEX_PROFILING
210SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
211SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
212static int mutex_prof_enable = 0;
213SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
214    &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
215
216struct mutex_prof {
217	const char	*name;
218	const char	*file;
219	int		line;
220	uintmax_t	cnt_max;
221	uintmax_t	cnt_tot;
222	uintmax_t	cnt_cur;
223	struct mutex_prof *next;
224};
225
226/*
227 * mprof_buf is a static pool of profiling records to avoid possible
228 * reentrance of the memory allocation functions.
229 *
230 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
231 */
232#define	NUM_MPROF_BUFFERS	1000
233static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
234static int first_free_mprof_buf;
235#define	MPROF_HASH_SIZE		1009
236static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
237/* SWAG: sbuf size = avg stat. line size * number of locks */
238#define MPROF_SBUF_SIZE		256 * 400
239
240static int mutex_prof_acquisitions;
241SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
242    &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
243static int mutex_prof_records;
244SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
245    &mutex_prof_records, 0, "Number of profiling records");
246static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
247SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
248    &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
249static int mutex_prof_rejected;
250SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
251    &mutex_prof_rejected, 0, "Number of rejected profiling records");
252static int mutex_prof_hashsize = MPROF_HASH_SIZE;
253SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
254    &mutex_prof_hashsize, 0, "Hash size");
255static int mutex_prof_collisions = 0;
256SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
257    &mutex_prof_collisions, 0, "Number of hash collisions");
258
259/*
260 * mprof_mtx protects the profiling buffers and the hash.
261 */
262static struct mtx mprof_mtx;
263MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
264
265static u_int64_t
266nanoseconds(void)
267{
268	struct timespec tv;
269
270	nanotime(&tv);
271	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
272}
273
274static int
275dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
276{
277	struct sbuf *sb;
278	int error, i;
279	static int multiplier = 1;
280
281	if (first_free_mprof_buf == 0)
282		return (SYSCTL_OUT(req, "No locking recorded",
283		    sizeof("No locking recorded")));
284
285retry_sbufops:
286	sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
287	sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
288	    "max", "total", "count", "avg", "name");
289	/*
290	 * XXX this spinlock seems to be by far the largest perpetrator
291	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
292	 * even before I pessimized it further by moving the average
293	 * computation here).
294	 */
295	mtx_lock_spin(&mprof_mtx);
296	for (i = 0; i < first_free_mprof_buf; ++i) {
297		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
298		    mprof_buf[i].cnt_max / 1000,
299		    mprof_buf[i].cnt_tot / 1000,
300		    mprof_buf[i].cnt_cur,
301		    mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
302			mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
303		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
304		if (sbuf_overflowed(sb)) {
305			mtx_unlock_spin(&mprof_mtx);
306			sbuf_delete(sb);
307			multiplier++;
308			goto retry_sbufops;
309		}
310	}
311	mtx_unlock_spin(&mprof_mtx);
312	sbuf_finish(sb);
313	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
314	sbuf_delete(sb);
315	return (error);
316}
317SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
318    NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
319#endif
320
321/*
322 * Function versions of the inlined __mtx_* macros.  These are used by
323 * modules and can also be called from assembly language if needed.
324 */
325void
326_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
327{
328
329	MPASS(curthread != NULL);
330	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
331	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
332	    file, line));
333	_get_sleep_lock(m, curthread, opts, file, line);
334	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
335	    line);
336	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
337#ifdef MUTEX_PROFILING
338	/* don't reset the timer when/if recursing */
339	if (m->mtx_acqtime == 0) {
340		m->mtx_filename = file;
341		m->mtx_lineno = line;
342		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
343		++mutex_prof_acquisitions;
344	}
345#endif
346}
347
348void
349_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
350{
351
352	MPASS(curthread != NULL);
353	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
354	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
355	    file, line));
356	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
357	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
358	    line);
359	mtx_assert(m, MA_OWNED);
360#ifdef MUTEX_PROFILING
361	if (m->mtx_acqtime != 0) {
362		static const char *unknown = "(unknown)";
363		struct mutex_prof *mpp;
364		u_int64_t acqtime, now;
365		const char *p, *q;
366		volatile u_int hash;
367
368		now = nanoseconds();
369		acqtime = m->mtx_acqtime;
370		m->mtx_acqtime = 0;
371		if (now <= acqtime)
372			goto out;
373		for (p = m->mtx_filename;
374		    p != NULL && strncmp(p, "../", 3) == 0; p += 3)
375			/* nothing */ ;
376		if (p == NULL || *p == '\0')
377			p = unknown;
378		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
379			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
380		mtx_lock_spin(&mprof_mtx);
381		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
382			if (mpp->line == m->mtx_lineno &&
383			    strcmp(mpp->file, p) == 0)
384				break;
385		if (mpp == NULL) {
386			/* Just exit if we cannot get a trace buffer */
387			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
388				++mutex_prof_rejected;
389				goto unlock;
390			}
391			mpp = &mprof_buf[first_free_mprof_buf++];
392			mpp->name = mtx_name(m);
393			mpp->file = p;
394			mpp->line = m->mtx_lineno;
395			mpp->next = mprof_hash[hash];
396			if (mprof_hash[hash] != NULL)
397				++mutex_prof_collisions;
398			mprof_hash[hash] = mpp;
399			++mutex_prof_records;
400		}
401		/*
402		 * Record if the mutex has been held longer now than ever
403		 * before.
404		 */
405		if (now - acqtime > mpp->cnt_max)
406			mpp->cnt_max = now - acqtime;
407		mpp->cnt_tot += now - acqtime;
408		mpp->cnt_cur++;
409unlock:
410		mtx_unlock_spin(&mprof_mtx);
411	}
412out:
413#endif
414	_rel_sleep_lock(m, curthread, opts, file, line);
415}
416
417void
418_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
419{
420
421	MPASS(curthread != NULL);
422	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
423	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
424	    m->mtx_object.lo_name, file, line));
425#if defined(SMP) || LOCK_DEBUG > 0 || 1
426	_get_spin_lock(m, curthread, opts, file, line);
427#else
428	critical_enter();
429#endif
430	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
431	    line);
432	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
433}
434
435void
436_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
437{
438
439	MPASS(curthread != NULL);
440	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
441	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
442	    m->mtx_object.lo_name, file, line));
443	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
444	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
445	    line);
446	mtx_assert(m, MA_OWNED);
447#if defined(SMP) || LOCK_DEBUG > 0 || 1
448	_rel_spin_lock(m);
449#else
450	critical_exit();
451#endif
452}
453
454/*
455 * The important part of mtx_trylock{,_flags}()
456 * Tries to acquire lock `m.' We do NOT handle recursion here.  If this
457 * function is called on a recursed mutex, it will return failure and
458 * will not recursively acquire the lock.  You are expected to know what
459 * you are doing.
460 */
461int
462_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
463{
464	int rval;
465
466	MPASS(curthread != NULL);
467
468	rval = _obtain_lock(m, curthread);
469
470	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
471	if (rval)
472		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
473		    file, line);
474
475	return (rval);
476}
477
478/*
479 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
480 *
481 * We call this if the lock is either contested (i.e. we need to go to
482 * sleep waiting for it), or if we need to recurse on it.
483 */
484void
485_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
486{
487	struct thread *td = curthread;
488	struct thread *td1;
489#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
490	struct thread *owner;
491#endif
492	uintptr_t v;
493#ifdef KTR
494	int cont_logged = 0;
495#endif
496
497	if (mtx_owned(m)) {
498		m->mtx_recurse++;
499		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
500		if (LOCK_LOG_TEST(&m->mtx_object, opts))
501			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
502		return;
503	}
504
505	if (LOCK_LOG_TEST(&m->mtx_object, opts))
506		CTR4(KTR_LOCK,
507		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
508		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
509
510	while (!_obtain_lock(m, td)) {
511
512		mtx_lock_spin(&sched_lock);
513		v = m->mtx_lock;
514
515		/*
516		 * Check if the lock has been released while spinning for
517		 * the sched_lock.
518		 */
519		if (v == MTX_UNOWNED) {
520			mtx_unlock_spin(&sched_lock);
521#ifdef __i386__
522			ia32_pause();
523#endif
524			continue;
525		}
526
527		/*
528		 * The mutex was marked contested on release. This means that
529		 * there are other threads blocked on it.  Grab ownership of
530		 * it and propagate its priority to the current thread if
531		 * necessary.
532		 */
533		if (v == MTX_CONTESTED) {
534			td1 = TAILQ_FIRST(&m->mtx_blocked);
535			MPASS(td1 != NULL);
536			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
537			LIST_INSERT_HEAD(&td->td_contested, m, mtx_contested);
538
539			if (td1->td_priority < td->td_priority)
540				td->td_priority = td1->td_priority;
541			mtx_unlock_spin(&sched_lock);
542			return;
543		}
544
545		/*
546		 * If the mutex isn't already contested and a failure occurs
547		 * setting the contested bit, the mutex was either released
548		 * or the state of the MTX_RECURSED bit changed.
549		 */
550		if ((v & MTX_CONTESTED) == 0 &&
551		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
552			(void *)(v | MTX_CONTESTED))) {
553			mtx_unlock_spin(&sched_lock);
554#ifdef __i386__
555			ia32_pause();
556#endif
557			continue;
558		}
559
560#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
561		/*
562		 * If the current owner of the lock is executing on another
563		 * CPU, spin instead of blocking.
564		 */
565		owner = (struct thread *)(v & MTX_FLAGMASK);
566		if (m != &Giant && TD_IS_RUNNING(owner)) {
567			mtx_unlock_spin(&sched_lock);
568			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
569#ifdef __i386__
570				ia32_pause();
571#endif
572			}
573			continue;
574		}
575#endif	/* SMP && ADAPTIVE_MUTEXES */
576
577		/*
578		 * We definitely must sleep for this lock.
579		 */
580		mtx_assert(m, MA_NOTOWNED);
581
582#ifdef notyet
583		/*
584		 * If we're borrowing an interrupted thread's VM context, we
585		 * must clean up before going to sleep.
586		 */
587		if (td->td_ithd != NULL) {
588			struct ithd *it = td->td_ithd;
589
590			if (it->it_interrupted) {
591				if (LOCK_LOG_TEST(&m->mtx_object, opts))
592					CTR2(KTR_LOCK,
593				    "_mtx_lock_sleep: %p interrupted %p",
594					    it, it->it_interrupted);
595				intr_thd_fixup(it);
596			}
597		}
598#endif
599
600		/*
601		 * Put us on the list of threads blocked on this mutex
602		 * and add this mutex to the owning thread's list of
603		 * contested mutexes if needed.
604		 */
605		if (TAILQ_EMPTY(&m->mtx_blocked)) {
606			td1 = mtx_owner(m);
607			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
608			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
609		} else {
610			TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq)
611				if (td1->td_priority > td->td_priority)
612					break;
613			if (td1)
614				TAILQ_INSERT_BEFORE(td1, td, td_lockq);
615			else
616				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
617		}
618#ifdef KTR
619		if (!cont_logged) {
620			CTR6(KTR_CONTENTION,
621			    "contention: %p at %s:%d wants %s, taken by %s:%d",
622			    td, file, line, m->mtx_object.lo_name,
623			    WITNESS_FILE(&m->mtx_object),
624			    WITNESS_LINE(&m->mtx_object));
625			cont_logged = 1;
626		}
627#endif
628
629		/*
630		 * Save who we're blocked on.
631		 */
632		td->td_blocked = m;
633		td->td_lockname = m->mtx_object.lo_name;
634		TD_SET_LOCK(td);
635		propagate_priority(td);
636
637		if (LOCK_LOG_TEST(&m->mtx_object, opts))
638			CTR3(KTR_LOCK,
639			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
640			    m->mtx_object.lo_name);
641
642		td->td_proc->p_stats->p_ru.ru_nvcsw++;
643		mi_switch();
644
645		if (LOCK_LOG_TEST(&m->mtx_object, opts))
646			CTR3(KTR_LOCK,
647			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
648			  td, m, m->mtx_object.lo_name);
649
650		mtx_unlock_spin(&sched_lock);
651	}
652
653#ifdef KTR
654	if (cont_logged) {
655		CTR4(KTR_CONTENTION,
656		    "contention end: %s acquired by %p at %s:%d",
657		    m->mtx_object.lo_name, td, file, line);
658	}
659#endif
660	return;
661}
662
663/*
664 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
665 *
666 * This is only called if we need to actually spin for the lock. Recursion
667 * is handled inline.
668 */
669void
670_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
671{
672	int i = 0;
673
674	if (LOCK_LOG_TEST(&m->mtx_object, opts))
675		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
676
677	for (;;) {
678		if (_obtain_lock(m, curthread))
679			break;
680
681		/* Give interrupts a chance while we spin. */
682		critical_exit();
683		while (m->mtx_lock != MTX_UNOWNED) {
684			if (i++ < 10000000) {
685#ifdef __i386__
686				ia32_pause();
687#endif
688				continue;
689			}
690			if (i < 60000000)
691				DELAY(1);
692#ifdef DDB
693			else if (!db_active) {
694#else
695			else {
696#endif
697				printf("spin lock %s held by %p for > 5 seconds\n",
698				    m->mtx_object.lo_name, (void *)m->mtx_lock);
699#ifdef WITNESS
700				witness_display_spinlock(&m->mtx_object,
701				    mtx_owner(m));
702#endif
703				panic("spin lock held too long");
704			}
705#ifdef __i386__
706			ia32_pause();
707#endif
708		}
709		critical_enter();
710	}
711
712	if (LOCK_LOG_TEST(&m->mtx_object, opts))
713		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
714
715	return;
716}
717
718/*
719 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
720 *
721 * We are only called here if the lock is recursed or contested (i.e. we
722 * need to wake up a blocked thread).
723 */
724void
725_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
726{
727	struct thread *td, *td1;
728	struct mtx *m1;
729	int pri;
730
731	td = curthread;
732
733	if (mtx_recursed(m)) {
734		if (--(m->mtx_recurse) == 0)
735			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
736		if (LOCK_LOG_TEST(&m->mtx_object, opts))
737			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
738		return;
739	}
740
741	mtx_lock_spin(&sched_lock);
742	if (LOCK_LOG_TEST(&m->mtx_object, opts))
743		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
744
745	td1 = TAILQ_FIRST(&m->mtx_blocked);
746#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
747	if (td1 == NULL) {
748		_release_lock_quick(m);
749		if (LOCK_LOG_TEST(&m->mtx_object, opts))
750			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
751		mtx_unlock_spin(&sched_lock);
752		return;
753	}
754#endif
755	MPASS(td->td_proc->p_magic == P_MAGIC);
756	MPASS(td1->td_proc->p_magic == P_MAGIC);
757
758	TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq);
759
760	LIST_REMOVE(m, mtx_contested);
761	if (TAILQ_EMPTY(&m->mtx_blocked)) {
762		_release_lock_quick(m);
763		if (LOCK_LOG_TEST(&m->mtx_object, opts))
764			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
765	} else
766		m->mtx_lock = MTX_CONTESTED;
767
768	pri = PRI_MAX;
769	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
770		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
771		if (cp < pri)
772			pri = cp;
773	}
774
775	if (pri > td->td_base_pri)
776		pri = td->td_base_pri;
777	td->td_priority = pri;
778
779	if (LOCK_LOG_TEST(&m->mtx_object, opts))
780		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
781		    m, td1);
782
783	td1->td_blocked = NULL;
784	TD_CLR_LOCK(td1);
785	if (!TD_CAN_RUN(td1)) {
786		mtx_unlock_spin(&sched_lock);
787		return;
788	}
789	setrunqueue(td1);
790
791	if (td->td_critnest == 1 && td1->td_priority < pri) {
792#ifdef notyet
793		if (td->td_ithd != NULL) {
794			struct ithd *it = td->td_ithd;
795
796			if (it->it_interrupted) {
797				if (LOCK_LOG_TEST(&m->mtx_object, opts))
798					CTR2(KTR_LOCK,
799				    "_mtx_unlock_sleep: %p interrupted %p",
800					    it, it->it_interrupted);
801				intr_thd_fixup(it);
802			}
803		}
804#endif
805		if (LOCK_LOG_TEST(&m->mtx_object, opts))
806			CTR2(KTR_LOCK,
807			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
808			    (void *)m->mtx_lock);
809
810		td->td_proc->p_stats->p_ru.ru_nivcsw++;
811		mi_switch();
812		if (LOCK_LOG_TEST(&m->mtx_object, opts))
813			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
814			    m, (void *)m->mtx_lock);
815	}
816
817	mtx_unlock_spin(&sched_lock);
818
819	return;
820}
821
822/*
823 * All the unlocking of MTX_SPIN locks is done inline.
824 * See the _rel_spin_lock() macro for the details.
825 */
826
827/*
828 * The backing function for the INVARIANTS-enabled mtx_assert()
829 */
830#ifdef INVARIANT_SUPPORT
831void
832_mtx_assert(struct mtx *m, int what, const char *file, int line)
833{
834
835	if (panicstr != NULL)
836		return;
837	switch (what) {
838	case MA_OWNED:
839	case MA_OWNED | MA_RECURSED:
840	case MA_OWNED | MA_NOTRECURSED:
841		if (!mtx_owned(m))
842			panic("mutex %s not owned at %s:%d",
843			    m->mtx_object.lo_name, file, line);
844		if (mtx_recursed(m)) {
845			if ((what & MA_NOTRECURSED) != 0)
846				panic("mutex %s recursed at %s:%d",
847				    m->mtx_object.lo_name, file, line);
848		} else if ((what & MA_RECURSED) != 0) {
849			panic("mutex %s unrecursed at %s:%d",
850			    m->mtx_object.lo_name, file, line);
851		}
852		break;
853	case MA_NOTOWNED:
854		if (mtx_owned(m))
855			panic("mutex %s owned at %s:%d",
856			    m->mtx_object.lo_name, file, line);
857		break;
858	default:
859		panic("unknown mtx_assert at %s:%d", file, line);
860	}
861}
862#endif
863
864/*
865 * The MUTEX_DEBUG-enabled mtx_validate()
866 *
867 * Most of these checks have been moved off into the LO_INITIALIZED flag
868 * maintained by the witness code.
869 */
870#ifdef MUTEX_DEBUG
871
872void	mtx_validate(struct mtx *);
873
874void
875mtx_validate(struct mtx *m)
876{
877
878/*
879 * XXX: When kernacc() does not require Giant we can reenable this check
880 */
881#ifdef notyet
882/*
883 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
884 * we can re-enable the kernacc() checks.
885 */
886#ifndef __alpha__
887	/*
888	 * Can't call kernacc() from early init386(), especially when
889	 * initializing Giant mutex, because some stuff in kernacc()
890	 * requires Giant itself.
891	 */
892	if (!cold)
893		if (!kernacc((caddr_t)m, sizeof(m),
894		    VM_PROT_READ | VM_PROT_WRITE))
895			panic("Can't read and write to mutex %p", m);
896#endif
897#endif
898}
899#endif
900
901/*
902 * General init routine used by the MTX_SYSINIT() macro.
903 */
904void
905mtx_sysinit(void *arg)
906{
907	struct mtx_args *margs = arg;
908
909	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
910}
911
912/*
913 * Mutex initialization routine; initialize lock `m' of type contained in
914 * `opts' with options contained in `opts' and name `name.'  The optional
915 * lock type `type' is used as a general lock category name for use with
916 * witness.
917 */
918void
919mtx_init(struct mtx *m, const char *name, const char *type, int opts)
920{
921	struct lock_object *lock;
922
923	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
924	    MTX_NOWITNESS | MTX_DUPOK)) == 0);
925
926#ifdef MUTEX_DEBUG
927	/* Diagnostic and error correction */
928	mtx_validate(m);
929#endif
930
931	lock = &m->mtx_object;
932	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
933	    ("mutex \"%s\" %p already initialized", name, m));
934	bzero(m, sizeof(*m));
935	if (opts & MTX_SPIN)
936		lock->lo_class = &lock_class_mtx_spin;
937	else
938		lock->lo_class = &lock_class_mtx_sleep;
939	lock->lo_name = name;
940	lock->lo_type = type != NULL ? type : name;
941	if (opts & MTX_QUIET)
942		lock->lo_flags = LO_QUIET;
943	if (opts & MTX_RECURSE)
944		lock->lo_flags |= LO_RECURSABLE;
945	if ((opts & MTX_NOWITNESS) == 0)
946		lock->lo_flags |= LO_WITNESS;
947	if (opts & MTX_DUPOK)
948		lock->lo_flags |= LO_DUPOK;
949
950	m->mtx_lock = MTX_UNOWNED;
951	TAILQ_INIT(&m->mtx_blocked);
952
953	LOCK_LOG_INIT(lock, opts);
954
955	WITNESS_INIT(lock);
956}
957
958/*
959 * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
960 * passed in as a flag here because if the corresponding mtx_init() was
961 * called with MTX_QUIET set, then it will already be set in the mutex's
962 * flags.
963 */
964void
965mtx_destroy(struct mtx *m)
966{
967
968	LOCK_LOG_DESTROY(&m->mtx_object, 0);
969
970	if (!mtx_owned(m))
971		MPASS(mtx_unowned(m));
972	else {
973		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
974
975		/* Tell witness this isn't locked to make it happy. */
976		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
977		    __LINE__);
978	}
979
980	WITNESS_DESTROY(&m->mtx_object);
981}
982
983/*
984 * Intialize the mutex code and system mutexes.  This is called from the MD
985 * startup code prior to mi_startup().  The per-CPU data space needs to be
986 * setup before this is called.
987 */
988void
989mutex_init(void)
990{
991
992	/* Setup thread0 so that mutexes work. */
993	LIST_INIT(&thread0.td_contested);
994
995	/*
996	 * Initialize mutexes.
997	 */
998	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
999	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
1000	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1001	mtx_lock(&Giant);
1002}
1003