kern_lock.c revision 161337
1139804Simp/*-
224269Speter * Copyright (c) 1995
324269Speter *	The Regents of the University of California.  All rights reserved.
424269Speter *
528345Sdyson * Copyright (C) 1997
628345Sdyson *	John S. Dyson.  All rights reserved.
728345Sdyson *
824269Speter * This code contains ideas from software contributed to Berkeley by
924269Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
1024269Speter * System project at Carnegie-Mellon University.
1124269Speter *
1224269Speter * Redistribution and use in source and binary forms, with or without
1324269Speter * modification, are permitted provided that the following conditions
1424269Speter * are met:
1524269Speter * 1. Redistributions of source code must retain the above copyright
1624269Speter *    notice, this list of conditions and the following disclaimer.
1724269Speter * 2. Redistributions in binary form must reproduce the above copyright
1824269Speter *    notice, this list of conditions and the following disclaimer in the
1924269Speter *    documentation and/or other materials provided with the distribution.
2024269Speter * 3. All advertising materials mentioning features or use of this software
2124269Speter *    must display the following acknowledgement:
2224269Speter *	This product includes software developed by the University of
2324269Speter *	California, Berkeley and its contributors.
2424269Speter * 4. Neither the name of the University nor the names of its contributors
2524269Speter *    may be used to endorse or promote products derived from this software
2624269Speter *    without specific prior written permission.
2724269Speter *
2824269Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2924269Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3024269Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3124269Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3224269Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3324269Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3424269Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3524269Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3624269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3724269Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3824269Speter * SUCH DAMAGE.
3924269Speter *
4024269Speter *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
4124269Speter */
4224269Speter
43116182Sobrien#include <sys/cdefs.h>
44116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 161337 2006-08-15 18:29:01Z jhb $");
45116182Sobrien
46161322Sjhb#include "opt_ddb.h"
47161322Sjhb
4824269Speter#include <sys/param.h>
49150807Srwatson#include <sys/kdb.h>
5067046Sjasone#include <sys/kernel.h>
5184812Sjhb#include <sys/ktr.h>
5224269Speter#include <sys/lock.h>
53102477Sbde#include <sys/lockmgr.h>
5467353Sjhb#include <sys/mutex.h>
55102477Sbde#include <sys/proc.h>
5624273Speter#include <sys/systm.h>
57148668Sjeff#ifdef DEBUG_LOCKS
58148668Sjeff#include <sys/stack.h>
59148668Sjeff#endif
6024269Speter
61161322Sjhb#ifdef DDB
62161322Sjhb#include <ddb/ddb.h>
63161322Sjhb#endif
64161322Sjhb
6524269Speter/*
6624269Speter * Locking primitives implementation.
6724269Speter * Locks provide shared/exclusive sychronization.
6824269Speter */
6924269Speter
70144082Sjeff#define	COUNT(td, x)	if ((td)) (td)->td_locks += (x)
7129653Sdyson#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
7229653Sdyson	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
7329653Sdyson
74107414Smckusickstatic int acquire(struct lock **lkpp, int extflags, int wanted);
7529653Sdysonstatic int acquiredrain(struct lock *lkp, int extflags) ;
7624269Speter
77144705Sjeffstatic __inline void
78144082Sjeffsharelock(struct thread *td, struct lock *lkp, int incr) {
7928345Sdyson	lkp->lk_flags |= LK_SHARE_NONZERO;
8028345Sdyson	lkp->lk_sharecount += incr;
81144082Sjeff	COUNT(td, incr);
8228345Sdyson}
8324269Speter
84144705Sjeffstatic __inline void
85144082Sjeffshareunlock(struct thread *td, struct lock *lkp, int decr) {
8642453Seivind
8742408Seivind	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
8824269Speter
89144082Sjeff	COUNT(td, -decr);
9034194Sdyson	if (lkp->lk_sharecount == decr) {
9128345Sdyson		lkp->lk_flags &= ~LK_SHARE_NONZERO;
9234194Sdyson		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
9334194Sdyson			wakeup(lkp);
9434194Sdyson		}
9534194Sdyson		lkp->lk_sharecount = 0;
9634194Sdyson	} else {
9734194Sdyson		lkp->lk_sharecount -= decr;
9834194Sdyson	}
9928345Sdyson}
10028345Sdyson
10128345Sdysonstatic int
102140711Sjeffacquire(struct lock **lkpp, int extflags, int wanted)
103140711Sjeff{
104107414Smckusick	struct lock *lkp = *lkpp;
105144589Sjeff	int error;
106112106Sjhb	CTR3(KTR_LOCK,
107132587Srwatson	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
10866615Sjasone	    lkp, extflags, wanted);
10966615Sjasone
110144589Sjeff	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
11128345Sdyson		return EBUSY;
112144589Sjeff	error = 0;
11328345Sdyson	while ((lkp->lk_flags & wanted) != 0) {
114144589Sjeff		CTR2(KTR_LOCK,
115144589Sjeff		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
116144589Sjeff		    lkp, lkp->lk_flags);
11728345Sdyson		lkp->lk_flags |= LK_WAIT_NONZERO;
11828345Sdyson		lkp->lk_waitcount++;
11969432Sjake		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
12088318Sdillon		    lkp->lk_wmesg,
12188318Sdillon		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
122144589Sjeff		lkp->lk_waitcount--;
123144589Sjeff		if (lkp->lk_waitcount == 0)
12428345Sdyson			lkp->lk_flags &= ~LK_WAIT_NONZERO;
125144589Sjeff		if (error)
126144589Sjeff			break;
12728345Sdyson		if (extflags & LK_SLEEPFAIL) {
128144589Sjeff			error = ENOLCK;
129144589Sjeff			break;
13028345Sdyson		}
131107414Smckusick		if (lkp->lk_newlock != NULL) {
132107414Smckusick			mtx_lock(lkp->lk_newlock->lk_interlock);
133107414Smckusick			mtx_unlock(lkp->lk_interlock);
134107414Smckusick			if (lkp->lk_waitcount == 0)
135107414Smckusick				wakeup((void *)(&lkp->lk_newlock));
136107414Smckusick			*lkpp = lkp = lkp->lk_newlock;
137107414Smckusick		}
13828345Sdyson	}
139144589Sjeff	mtx_assert(lkp->lk_interlock, MA_OWNED);
140144589Sjeff	return (error);
14128345Sdyson}
14228345Sdyson
14324269Speter/*
14424269Speter * Set, change, or release a lock.
14524269Speter *
14624269Speter * Shared requests increment the shared count. Exclusive requests set the
14724269Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
14824269Speter * accepted shared locks and shared-to-exclusive upgrades to go away.
14924269Speter */
15024269Speterint
15183366Sjulianlockmgr(lkp, flags, interlkp, td)
15227894Sfsmp	struct lock *lkp;
15324269Speter	u_int flags;
15466615Sjasone	struct mtx *interlkp;
15583366Sjulian	struct thread *td;
15624269Speter{
15724269Speter	int error;
158110414Sjulian	struct thread *thr;
15972227Sjhb	int extflags, lockflags;
16024269Speter
16124269Speter	error = 0;
16283366Sjulian	if (td == NULL)
163110414Sjulian		thr = LK_KERNPROC;
16428393Sdyson	else
165110414Sjulian		thr = td;
16628345Sdyson
167111463Sjeff	if ((flags & LK_INTERNAL) == 0)
168111463Sjeff		mtx_lock(lkp->lk_interlock);
169140711Sjeff	CTR6(KTR_LOCK,
170140711Sjeff	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
171140711Sjeff	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
172140711Sjeff	    lkp->lk_exclusivecount, flags, td);
173148668Sjeff#ifdef DEBUG_LOCKS
174148668Sjeff	{
175148668Sjeff		struct stack stack; /* XXX */
176148668Sjeff		stack_save(&stack);
177149574Spjd		CTRSTACK(KTR_LOCK, &stack, 0, 1);
178148668Sjeff	}
179140711Sjeff#endif
180140711Sjeff
18175740Salfred	if (flags & LK_INTERLOCK) {
18276100Salfred		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
18372200Sbmilekic		mtx_unlock(interlkp);
18475740Salfred	}
18528345Sdyson
186111463Sjeff	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
187111883Sjhb		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
188111883Sjhb		    &lkp->lk_interlock->mtx_object,
189111883Sjhb		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
190111463Sjeff
19181506Sjhb	if (panicstr != NULL) {
19281506Sjhb		mtx_unlock(lkp->lk_interlock);
19381506Sjhb		return (0);
19481506Sjhb	}
195144372Sjeff	if ((lkp->lk_flags & LK_NOSHARE) &&
196144372Sjeff	    (flags & LK_TYPE_MASK) == LK_SHARED) {
197144372Sjeff		flags &= ~LK_TYPE_MASK;
198144372Sjeff		flags |= LK_EXCLUSIVE;
199144372Sjeff	}
20024269Speter	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
20124269Speter
20224269Speter	switch (flags & LK_TYPE_MASK) {
20324269Speter
20424269Speter	case LK_SHARED:
20544681Sjulian		/*
20644681Sjulian		 * If we are not the exclusive lock holder, we have to block
20744681Sjulian		 * while there is an exclusive lock holder or while an
20844681Sjulian		 * exclusive lock request or upgrade request is in progress.
20944681Sjulian		 *
210130023Stjr		 * However, if TDP_DEADLKTREAT is set, we override exclusive
21144681Sjulian		 * lock requests or upgrade requests ( but not the exclusive
21244681Sjulian		 * lock itself ).
21344681Sjulian		 */
214110414Sjulian		if (lkp->lk_lockholder != thr) {
21572227Sjhb			lockflags = LK_HAVE_EXCL;
216130023Stjr			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
21783420Sjhb				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
218107414Smckusick			error = acquire(&lkp, extflags, lockflags);
21924269Speter			if (error)
22024269Speter				break;
221144082Sjeff			sharelock(td, lkp, 1);
22297540Sjeff#if defined(DEBUG_LOCKS)
223148669Sjeff			stack_save(&lkp->lk_stack);
22497540Sjeff#endif
22524269Speter			break;
22624269Speter		}
22724269Speter		/*
22824269Speter		 * We hold an exclusive lock, so downgrade it to shared.
22924269Speter		 * An alternative would be to fail with EDEADLK.
23024269Speter		 */
231144082Sjeff		sharelock(td, lkp, 1);
232102412Scharnier		/* FALLTHROUGH downgrade */
23324269Speter
23424269Speter	case LK_DOWNGRADE:
235110414Sjulian		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
23675472Salfred			("lockmgr: not holding exclusive lock "
237110414Sjulian			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
238110414Sjulian			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
239144082Sjeff		sharelock(td, lkp, lkp->lk_exclusivecount);
240144082Sjeff		COUNT(td, -lkp->lk_exclusivecount);
24124269Speter		lkp->lk_exclusivecount = 0;
24224269Speter		lkp->lk_flags &= ~LK_HAVE_EXCL;
243110190Sjulian		lkp->lk_lockholder = LK_NOPROC;
24424269Speter		if (lkp->lk_waitcount)
24524269Speter			wakeup((void *)lkp);
24624269Speter		break;
24724269Speter
24824269Speter	case LK_EXCLUPGRADE:
24924269Speter		/*
25024269Speter		 * If another process is ahead of us to get an upgrade,
25124269Speter		 * then we want to fail rather than have an intervening
25224269Speter		 * exclusive access.
25324269Speter		 */
25424269Speter		if (lkp->lk_flags & LK_WANT_UPGRADE) {
255144082Sjeff			shareunlock(td, lkp, 1);
25624269Speter			error = EBUSY;
25724269Speter			break;
25824269Speter		}
259102412Scharnier		/* FALLTHROUGH normal upgrade */
26024269Speter
26124269Speter	case LK_UPGRADE:
26224269Speter		/*
26324269Speter		 * Upgrade a shared lock to an exclusive one. If another
26424269Speter		 * shared lock has already requested an upgrade to an
26524269Speter		 * exclusive lock, our shared lock is released and an
26624269Speter		 * exclusive lock is requested (which will be granted
26724269Speter		 * after the upgrade). If we return an error, the file
26824269Speter		 * will always be unlocked.
26924269Speter		 */
270144928Sjeff		if (lkp->lk_lockholder == thr)
27124269Speter			panic("lockmgr: upgrade exclusive lock");
272144928Sjeff		if (lkp->lk_sharecount <= 0)
273144928Sjeff			panic("lockmgr: upgrade without shared");
274144082Sjeff		shareunlock(td, lkp, 1);
27524269Speter		/*
27624269Speter		 * If we are just polling, check to see if we will block.
27724269Speter		 */
27824269Speter		if ((extflags & LK_NOWAIT) &&
27924269Speter		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
28024269Speter		     lkp->lk_sharecount > 1)) {
28124269Speter			error = EBUSY;
28224269Speter			break;
28324269Speter		}
28424269Speter		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
28524269Speter			/*
28624269Speter			 * We are first shared lock to request an upgrade, so
28724269Speter			 * request upgrade and wait for the shared count to
28824269Speter			 * drop to zero, then take exclusive lock.
28924269Speter			 */
29024269Speter			lkp->lk_flags |= LK_WANT_UPGRADE;
291107414Smckusick			error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
29224269Speter			lkp->lk_flags &= ~LK_WANT_UPGRADE;
29334194Sdyson
294134365Skan			if (error) {
295134365Skan			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
296134365Skan			                   wakeup((void *)lkp);
297134365Skan			         break;
298134365Skan			}
299138203Sps			if (lkp->lk_exclusivecount != 0)
300138203Sps				panic("lockmgr: non-zero exclusive count");
30124269Speter			lkp->lk_flags |= LK_HAVE_EXCL;
302110414Sjulian			lkp->lk_lockholder = thr;
30324269Speter			lkp->lk_exclusivecount = 1;
304144082Sjeff			COUNT(td, 1);
30542900Seivind#if defined(DEBUG_LOCKS)
306148669Sjeff			stack_save(&lkp->lk_stack);
30742900Seivind#endif
30824269Speter			break;
30924269Speter		}
31024269Speter		/*
31124269Speter		 * Someone else has requested upgrade. Release our shared
31224269Speter		 * lock, awaken upgrade requestor if we are the last shared
31324269Speter		 * lock, then request an exclusive lock.
31424269Speter		 */
31528345Sdyson		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
31628345Sdyson			LK_WAIT_NONZERO)
31724269Speter			wakeup((void *)lkp);
318102412Scharnier		/* FALLTHROUGH exclusive request */
31924269Speter
32024269Speter	case LK_EXCLUSIVE:
321110414Sjulian		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
32224269Speter			/*
32324269Speter			 *	Recursive lock.
32424269Speter			 */
32548301Smckusick			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
32624269Speter				panic("lockmgr: locking against myself");
32748301Smckusick			if ((extflags & LK_CANRECURSE) != 0) {
32848301Smckusick				lkp->lk_exclusivecount++;
329144082Sjeff				COUNT(td, 1);
33048301Smckusick				break;
33148301Smckusick			}
33224269Speter		}
33324269Speter		/*
33424269Speter		 * If we are just polling, check to see if we will sleep.
33524269Speter		 */
33628345Sdyson		if ((extflags & LK_NOWAIT) &&
33728345Sdyson		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
33824269Speter			error = EBUSY;
33924269Speter			break;
34024269Speter		}
34124269Speter		/*
34224269Speter		 * Try to acquire the want_exclusive flag.
34324269Speter		 */
344134187Skan		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
34524269Speter		if (error)
34624269Speter			break;
34724269Speter		lkp->lk_flags |= LK_WANT_EXCL;
34824269Speter		/*
34924269Speter		 * Wait for shared locks and upgrades to finish.
35024269Speter		 */
351134365Skan		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
35224269Speter		lkp->lk_flags &= ~LK_WANT_EXCL;
353134365Skan		if (error) {
354134365Skan			if (lkp->lk_flags & LK_WAIT_NONZERO)
355134365Skan			         wakeup((void *)lkp);
35624269Speter			break;
357134365Skan		}
35824269Speter		lkp->lk_flags |= LK_HAVE_EXCL;
359110414Sjulian		lkp->lk_lockholder = thr;
36024269Speter		if (lkp->lk_exclusivecount != 0)
36124269Speter			panic("lockmgr: non-zero exclusive count");
36224269Speter		lkp->lk_exclusivecount = 1;
363144082Sjeff		COUNT(td, 1);
36442900Seivind#if defined(DEBUG_LOCKS)
365148669Sjeff		stack_save(&lkp->lk_stack);
36642900Seivind#endif
36724269Speter		break;
36824269Speter
36924269Speter	case LK_RELEASE:
37024269Speter		if (lkp->lk_exclusivecount != 0) {
371110414Sjulian			if (lkp->lk_lockholder != thr &&
37251702Sdillon			    lkp->lk_lockholder != LK_KERNPROC) {
373110414Sjulian				panic("lockmgr: thread %p, not %s %p unlocking",
374110414Sjulian				    thr, "exclusive lock holder",
37524269Speter				    lkp->lk_lockholder);
37651702Sdillon			}
377144082Sjeff			if (lkp->lk_lockholder != LK_KERNPROC)
378144082Sjeff				COUNT(td, -1);
37934194Sdyson			if (lkp->lk_exclusivecount == 1) {
38024269Speter				lkp->lk_flags &= ~LK_HAVE_EXCL;
38124269Speter				lkp->lk_lockholder = LK_NOPROC;
38234194Sdyson				lkp->lk_exclusivecount = 0;
38334194Sdyson			} else {
38434194Sdyson				lkp->lk_exclusivecount--;
38524269Speter			}
38671576Sjasone		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
387144082Sjeff			shareunlock(td, lkp, 1);
388149723Sssouhlal		else  {
389149723Sssouhlal			printf("lockmgr: thread %p unlocking unheld lock\n",
390149723Sssouhlal			    thr);
391149723Sssouhlal			kdb_backtrace();
392149723Sssouhlal		}
393149723Sssouhlal
39428345Sdyson		if (lkp->lk_flags & LK_WAIT_NONZERO)
39524269Speter			wakeup((void *)lkp);
39624269Speter		break;
39724269Speter
39824269Speter	case LK_DRAIN:
39924269Speter		/*
40024269Speter		 * Check that we do not already hold the lock, as it can
40124269Speter		 * never drain if we do. Unfortunately, we have no way to
40224269Speter		 * check for holding a shared lock, but at least we can
40324269Speter		 * check for an exclusive one.
40424269Speter		 */
405110414Sjulian		if (lkp->lk_lockholder == thr)
40624269Speter			panic("lockmgr: draining against myself");
40728345Sdyson
40828345Sdyson		error = acquiredrain(lkp, extflags);
40928345Sdyson		if (error)
41024269Speter			break;
41124269Speter		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
412110414Sjulian		lkp->lk_lockholder = thr;
41324269Speter		lkp->lk_exclusivecount = 1;
414144082Sjeff		COUNT(td, 1);
41542900Seivind#if defined(DEBUG_LOCKS)
416148669Sjeff		stack_save(&lkp->lk_stack);
41742900Seivind#endif
41824269Speter		break;
41924269Speter
42024269Speter	default:
42172200Sbmilekic		mtx_unlock(lkp->lk_interlock);
42224269Speter		panic("lockmgr: unknown locktype request %d",
42324269Speter		    flags & LK_TYPE_MASK);
42424269Speter		/* NOTREACHED */
42524269Speter	}
42628345Sdyson	if ((lkp->lk_flags & LK_WAITDRAIN) &&
42728345Sdyson	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
42828345Sdyson		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
42924269Speter		lkp->lk_flags &= ~LK_WAITDRAIN;
43024269Speter		wakeup((void *)&lkp->lk_flags);
43124269Speter	}
43272200Sbmilekic	mtx_unlock(lkp->lk_interlock);
43324269Speter	return (error);
43424269Speter}
43524269Speter
43629653Sdysonstatic int
43729653Sdysonacquiredrain(struct lock *lkp, int extflags) {
43829653Sdyson	int error;
43929653Sdyson
44029653Sdyson	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
44129653Sdyson		return EBUSY;
44229653Sdyson	}
44329653Sdyson	while (lkp->lk_flags & LK_ALL) {
44429653Sdyson		lkp->lk_flags |= LK_WAITDRAIN;
44569432Sjake		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
44688318Sdillon			lkp->lk_wmesg,
44788318Sdillon			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
44829653Sdyson		if (error)
44929653Sdyson			return error;
45029653Sdyson		if (extflags & LK_SLEEPFAIL) {
45129653Sdyson			return ENOLCK;
45229653Sdyson		}
45329653Sdyson	}
45429653Sdyson	return 0;
45529653Sdyson}
45629653Sdyson
45724269Speter/*
458107414Smckusick * Transfer any waiting processes from one lock to another.
459107414Smckusick */
460107414Smckusickvoid
461107414Smckusicktransferlockers(from, to)
462107414Smckusick	struct lock *from;
463107414Smckusick	struct lock *to;
464107414Smckusick{
465107414Smckusick
466107414Smckusick	KASSERT(from != to, ("lock transfer to self"));
467107414Smckusick	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
468143621Sjeff
469143621Sjeff	mtx_lock(from->lk_interlock);
470143621Sjeff	if (from->lk_waitcount == 0) {
471143621Sjeff		mtx_unlock(from->lk_interlock);
472107414Smckusick		return;
473143621Sjeff	}
474107414Smckusick	from->lk_newlock = to;
475107414Smckusick	wakeup((void *)from);
476143621Sjeff	msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
477143621Sjeff	    "lkxfer", 0);
478107414Smckusick	from->lk_newlock = NULL;
479107414Smckusick	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
480107414Smckusick	KASSERT(from->lk_waitcount == 0, ("active lock"));
481143621Sjeff	mtx_unlock(from->lk_interlock);
482107414Smckusick}
483107414Smckusick
484107414Smckusick
485107414Smckusick/*
48629653Sdyson * Initialize a lock; required before use.
48729653Sdyson */
48829653Sdysonvoid
48929653Sdysonlockinit(lkp, prio, wmesg, timo, flags)
49029653Sdyson	struct lock *lkp;
49129653Sdyson	int prio;
49291698Seivind	const char *wmesg;
49329653Sdyson	int timo;
49429653Sdyson	int flags;
49529653Sdyson{
496112106Sjhb	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
49766615Sjasone	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
49829653Sdyson
499117660Struckman	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
50067046Sjasone	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
50129653Sdyson	lkp->lk_sharecount = 0;
50229653Sdyson	lkp->lk_waitcount = 0;
50329653Sdyson	lkp->lk_exclusivecount = 0;
50429653Sdyson	lkp->lk_prio = prio;
50529653Sdyson	lkp->lk_wmesg = wmesg;
50629653Sdyson	lkp->lk_timo = timo;
50729653Sdyson	lkp->lk_lockholder = LK_NOPROC;
508107414Smckusick	lkp->lk_newlock = NULL;
509105370Smckusick#ifdef DEBUG_LOCKS
510148669Sjeff	stack_zero(&lkp->lk_stack);
511105370Smckusick#endif
51229653Sdyson}
51329653Sdyson
51429653Sdyson/*
51566615Sjasone * Destroy a lock.
51666615Sjasone */
51766615Sjasonevoid
51866615Sjasonelockdestroy(lkp)
51966615Sjasone	struct lock *lkp;
52066615Sjasone{
521112106Sjhb	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
52266615Sjasone	    lkp, lkp->lk_wmesg);
52366615Sjasone}
52466615Sjasone
52566615Sjasone/*
52629653Sdyson * Determine the status of a lock.
52729653Sdyson */
52829653Sdysonint
52983366Sjulianlockstatus(lkp, td)
53029653Sdyson	struct lock *lkp;
53183366Sjulian	struct thread *td;
53229653Sdyson{
53329653Sdyson	int lock_type = 0;
534150646Srwatson	int interlocked;
53529653Sdyson
536150646Srwatson	if (!kdb_active) {
537150646Srwatson		interlocked = 1;
538150646Srwatson		mtx_lock(lkp->lk_interlock);
539150646Srwatson	} else
540150646Srwatson		interlocked = 0;
54154444Seivind	if (lkp->lk_exclusivecount != 0) {
542110414Sjulian		if (td == NULL || lkp->lk_lockholder == td)
54354444Seivind			lock_type = LK_EXCLUSIVE;
54454444Seivind		else
54554444Seivind			lock_type = LK_EXCLOTHER;
54654444Seivind	} else if (lkp->lk_sharecount != 0)
54729653Sdyson		lock_type = LK_SHARED;
548150646Srwatson	if (interlocked)
549150646Srwatson		mtx_unlock(lkp->lk_interlock);
55029653Sdyson	return (lock_type);
55129653Sdyson}
55229653Sdyson
55329653Sdyson/*
55448225Smckusick * Determine the number of holders of a lock.
55548225Smckusick */
55648225Smckusickint
55748225Smckusicklockcount(lkp)
55848225Smckusick	struct lock *lkp;
55948225Smckusick{
56048225Smckusick	int count;
56148225Smckusick
56272200Sbmilekic	mtx_lock(lkp->lk_interlock);
56348225Smckusick	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
56472200Sbmilekic	mtx_unlock(lkp->lk_interlock);
56548225Smckusick	return (count);
56648225Smckusick}
56748225Smckusick
56848225Smckusick/*
56924269Speter * Print out information about state of a lock. Used by VOP_PRINT
57028569Sphk * routines to display status about contained locks.
57124269Speter */
57224271Spetervoid
57324269Speterlockmgr_printinfo(lkp)
57424269Speter	struct lock *lkp;
57524269Speter{
57624269Speter
57724269Speter	if (lkp->lk_sharecount)
57824269Speter		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
57924269Speter		    lkp->lk_sharecount);
58024269Speter	else if (lkp->lk_flags & LK_HAVE_EXCL)
581124163Skan		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
582124163Skan		    lkp->lk_wmesg, lkp->lk_exclusivecount,
583124163Skan		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
58424269Speter	if (lkp->lk_waitcount > 0)
58524269Speter		printf(" with %d pending", lkp->lk_waitcount);
586148669Sjeff#ifdef DEBUG_LOCKS
587148669Sjeff	stack_print(&lkp->lk_stack);
588148669Sjeff#endif
58924269Speter}
590161322Sjhb
591161322Sjhb#ifdef DDB
592161337Sjhb/*
593161337Sjhb * Check to see if a thread that is blocked on a sleep queue is actually
594161337Sjhb * blocked on a 'struct lock'.  If so, output some details and return true.
595161337Sjhb * If the lock has an exclusive owner, return that in *ownerp.
596161337Sjhb */
597161337Sjhbint
598161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
599161337Sjhb{
600161337Sjhb	struct lock *lkp;
601161337Sjhb
602161337Sjhb	lkp = td->td_wchan;
603161337Sjhb
604161337Sjhb	/* Simple test to see if wchan points to a lockmgr lock. */
605161337Sjhb	if (lkp->lk_wmesg != td->td_wmesg)
606161337Sjhb		return (0);
607161337Sjhb
608161337Sjhb	/* Ok, we think we have a lockmgr lock, so output some details. */
609161337Sjhb	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
610161337Sjhb	if (lkp->lk_sharecount) {
611161337Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
612161337Sjhb		*ownerp = NULL;
613161337Sjhb	} else {
614161337Sjhb		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
615161337Sjhb		*ownerp = lkp->lk_lockholder;
616161337Sjhb	}
617161337Sjhb	return (1);
618161337Sjhb}
619161337Sjhb
620161322SjhbDB_SHOW_COMMAND(lockmgr, db_show_lockmgr)
621161322Sjhb{
622161322Sjhb	struct thread *td;
623161322Sjhb	struct lock *lkp;
624161322Sjhb
625161322Sjhb	if (!have_addr)
626161322Sjhb		return;
627161322Sjhb	lkp = (struct lock *)addr;
628161322Sjhb
629161322Sjhb	db_printf("lock type: %s\n", lkp->lk_wmesg);
630161322Sjhb	db_printf("state: ");
631161322Sjhb	if (lkp->lk_sharecount)
632161322Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
633161322Sjhb	else if (lkp->lk_flags & LK_HAVE_EXCL) {
634161322Sjhb		td = lkp->lk_lockholder;
635161322Sjhb		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
636161322Sjhb		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
637161322Sjhb		    td->td_proc->p_pid, td->td_proc->p_comm);
638161322Sjhb	} else
639161322Sjhb		db_printf("UNLOCKED\n");
640161322Sjhb	if (lkp->lk_waitcount > 0)
641161322Sjhb		db_printf("waiters: %d\n", lkp->lk_waitcount);
642161322Sjhb}
643161322Sjhb#endif
644