kern_lock.c revision 167366
1139804Simp/*-
224269Speter * Copyright (c) 1995
324269Speter *	The Regents of the University of California.  All rights reserved.
424269Speter *
528345Sdyson * Copyright (C) 1997
628345Sdyson *	John S. Dyson.  All rights reserved.
728345Sdyson *
824269Speter * This code contains ideas from software contributed to Berkeley by
924269Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
1024269Speter * System project at Carnegie-Mellon University.
1124269Speter *
1224269Speter * Redistribution and use in source and binary forms, with or without
1324269Speter * modification, are permitted provided that the following conditions
1424269Speter * are met:
1524269Speter * 1. Redistributions of source code must retain the above copyright
1624269Speter *    notice, this list of conditions and the following disclaimer.
1724269Speter * 2. Redistributions in binary form must reproduce the above copyright
1824269Speter *    notice, this list of conditions and the following disclaimer in the
1924269Speter *    documentation and/or other materials provided with the distribution.
2024269Speter * 3. All advertising materials mentioning features or use of this software
2124269Speter *    must display the following acknowledgement:
2224269Speter *	This product includes software developed by the University of
2324269Speter *	California, Berkeley and its contributors.
2424269Speter * 4. Neither the name of the University nor the names of its contributors
2524269Speter *    may be used to endorse or promote products derived from this software
2624269Speter *    without specific prior written permission.
2724269Speter *
2824269Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2924269Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3024269Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3124269Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3224269Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3324269Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3424269Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3524269Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3624269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3724269Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3824269Speter * SUCH DAMAGE.
3924269Speter *
4024269Speter *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
4124269Speter */
4224269Speter
43116182Sobrien#include <sys/cdefs.h>
44116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 167366 2007-03-09 16:19:34Z jhb $");
45116182Sobrien
46161322Sjhb#include "opt_ddb.h"
47164159Skmacy#include "opt_global.h"
48161322Sjhb
4924269Speter#include <sys/param.h>
50150807Srwatson#include <sys/kdb.h>
5167046Sjasone#include <sys/kernel.h>
5284812Sjhb#include <sys/ktr.h>
5324269Speter#include <sys/lock.h>
54102477Sbde#include <sys/lockmgr.h>
5567353Sjhb#include <sys/mutex.h>
56102477Sbde#include <sys/proc.h>
5724273Speter#include <sys/systm.h>
58164159Skmacy#include <sys/lock_profile.h>
59148668Sjeff#ifdef DEBUG_LOCKS
60148668Sjeff#include <sys/stack.h>
61148668Sjeff#endif
6224269Speter
63161322Sjhb#ifdef DDB
64161322Sjhb#include <ddb/ddb.h>
65164246Skmacystatic void	db_show_lockmgr(struct lock_object *lock);
66161322Sjhb#endif
67161322Sjhb
68164246Skmacy
69164246Skmacystruct lock_class lock_class_lockmgr = {
70167366Sjhb	.lc_name = "lockmgr",
71167366Sjhb	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
72164246Skmacy#ifdef DDB
73167366Sjhb	.lc_ddb_show = db_show_lockmgr
74164246Skmacy#endif
75164246Skmacy};
76164246Skmacy
77164246Skmacy
7824269Speter/*
7924269Speter * Locking primitives implementation.
8024269Speter * Locks provide shared/exclusive sychronization.
8124269Speter */
8224269Speter
83144082Sjeff#define	COUNT(td, x)	if ((td)) (td)->td_locks += (x)
8429653Sdyson#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
8529653Sdyson	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
8629653Sdyson
87167012Skmacystatic int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime);
8829653Sdysonstatic int acquiredrain(struct lock *lkp, int extflags) ;
8924269Speter
90144705Sjeffstatic __inline void
91144082Sjeffsharelock(struct thread *td, struct lock *lkp, int incr) {
9228345Sdyson	lkp->lk_flags |= LK_SHARE_NONZERO;
9328345Sdyson	lkp->lk_sharecount += incr;
94144082Sjeff	COUNT(td, incr);
9528345Sdyson}
9624269Speter
97144705Sjeffstatic __inline void
98144082Sjeffshareunlock(struct thread *td, struct lock *lkp, int decr) {
9942453Seivind
10042408Seivind	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
10124269Speter
102144082Sjeff	COUNT(td, -decr);
10334194Sdyson	if (lkp->lk_sharecount == decr) {
10428345Sdyson		lkp->lk_flags &= ~LK_SHARE_NONZERO;
10534194Sdyson		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
10634194Sdyson			wakeup(lkp);
10734194Sdyson		}
10834194Sdyson		lkp->lk_sharecount = 0;
10934194Sdyson	} else {
11034194Sdyson		lkp->lk_sharecount -= decr;
11134194Sdyson	}
11228345Sdyson}
11328345Sdyson
11428345Sdysonstatic int
115167012Skmacyacquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime)
116140711Sjeff{
117107414Smckusick	struct lock *lkp = *lkpp;
118144589Sjeff	int error;
119112106Sjhb	CTR3(KTR_LOCK,
120132587Srwatson	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
12166615Sjasone	    lkp, extflags, wanted);
12266615Sjasone
123144589Sjeff	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
12428345Sdyson		return EBUSY;
125144589Sjeff	error = 0;
126167012Skmacy	if ((lkp->lk_flags & wanted) != 0)
127167012Skmacy		lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
128167012Skmacy
12928345Sdyson	while ((lkp->lk_flags & wanted) != 0) {
130144589Sjeff		CTR2(KTR_LOCK,
131144589Sjeff		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
132144589Sjeff		    lkp, lkp->lk_flags);
13328345Sdyson		lkp->lk_flags |= LK_WAIT_NONZERO;
13428345Sdyson		lkp->lk_waitcount++;
13569432Sjake		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
13688318Sdillon		    lkp->lk_wmesg,
13788318Sdillon		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
138144589Sjeff		lkp->lk_waitcount--;
139144589Sjeff		if (lkp->lk_waitcount == 0)
14028345Sdyson			lkp->lk_flags &= ~LK_WAIT_NONZERO;
141144589Sjeff		if (error)
142144589Sjeff			break;
14328345Sdyson		if (extflags & LK_SLEEPFAIL) {
144144589Sjeff			error = ENOLCK;
145144589Sjeff			break;
14628345Sdyson		}
147107414Smckusick		if (lkp->lk_newlock != NULL) {
148107414Smckusick			mtx_lock(lkp->lk_newlock->lk_interlock);
149107414Smckusick			mtx_unlock(lkp->lk_interlock);
150107414Smckusick			if (lkp->lk_waitcount == 0)
151107414Smckusick				wakeup((void *)(&lkp->lk_newlock));
152107414Smckusick			*lkpp = lkp = lkp->lk_newlock;
153107414Smckusick		}
15428345Sdyson	}
155144589Sjeff	mtx_assert(lkp->lk_interlock, MA_OWNED);
156144589Sjeff	return (error);
15728345Sdyson}
15828345Sdyson
15924269Speter/*
16024269Speter * Set, change, or release a lock.
16124269Speter *
16224269Speter * Shared requests increment the shared count. Exclusive requests set the
16324269Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
16424269Speter * accepted shared locks and shared-to-exclusive upgrades to go away.
16524269Speter */
16624269Speterint
167164159Skmacy_lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
168164159Skmacy	 struct thread *td, char *file, int line)
169164159Skmacy
17024269Speter{
17124269Speter	int error;
172110414Sjulian	struct thread *thr;
17372227Sjhb	int extflags, lockflags;
174167012Skmacy	int contested = 0;
175167012Skmacy	uint64_t waitstart = 0;
176167012Skmacy
17724269Speter	error = 0;
17883366Sjulian	if (td == NULL)
179110414Sjulian		thr = LK_KERNPROC;
18028393Sdyson	else
181110414Sjulian		thr = td;
18228345Sdyson
183111463Sjeff	if ((flags & LK_INTERNAL) == 0)
184111463Sjeff		mtx_lock(lkp->lk_interlock);
185140711Sjeff	CTR6(KTR_LOCK,
186140711Sjeff	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
187140711Sjeff	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
188140711Sjeff	    lkp->lk_exclusivecount, flags, td);
189148668Sjeff#ifdef DEBUG_LOCKS
190148668Sjeff	{
191148668Sjeff		struct stack stack; /* XXX */
192148668Sjeff		stack_save(&stack);
193149574Spjd		CTRSTACK(KTR_LOCK, &stack, 0, 1);
194148668Sjeff	}
195140711Sjeff#endif
196140711Sjeff
19775740Salfred	if (flags & LK_INTERLOCK) {
19876100Salfred		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
19972200Sbmilekic		mtx_unlock(interlkp);
20075740Salfred	}
20128345Sdyson
202111463Sjeff	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
203111883Sjhb		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
204111883Sjhb		    &lkp->lk_interlock->mtx_object,
205111883Sjhb		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
206111463Sjeff
20781506Sjhb	if (panicstr != NULL) {
20881506Sjhb		mtx_unlock(lkp->lk_interlock);
20981506Sjhb		return (0);
21081506Sjhb	}
211144372Sjeff	if ((lkp->lk_flags & LK_NOSHARE) &&
212144372Sjeff	    (flags & LK_TYPE_MASK) == LK_SHARED) {
213144372Sjeff		flags &= ~LK_TYPE_MASK;
214144372Sjeff		flags |= LK_EXCLUSIVE;
215144372Sjeff	}
21624269Speter	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
21724269Speter
21824269Speter	switch (flags & LK_TYPE_MASK) {
21924269Speter
22024269Speter	case LK_SHARED:
22144681Sjulian		/*
22244681Sjulian		 * If we are not the exclusive lock holder, we have to block
22344681Sjulian		 * while there is an exclusive lock holder or while an
22444681Sjulian		 * exclusive lock request or upgrade request is in progress.
22544681Sjulian		 *
226130023Stjr		 * However, if TDP_DEADLKTREAT is set, we override exclusive
22744681Sjulian		 * lock requests or upgrade requests ( but not the exclusive
22844681Sjulian		 * lock itself ).
22944681Sjulian		 */
230110414Sjulian		if (lkp->lk_lockholder != thr) {
23172227Sjhb			lockflags = LK_HAVE_EXCL;
232130023Stjr			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
23383420Sjhb				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
234167012Skmacy			error = acquire(&lkp, extflags, lockflags, &contested, &waitstart);
23524269Speter			if (error)
23624269Speter				break;
237144082Sjeff			sharelock(td, lkp, 1);
238164159Skmacy			if (lkp->lk_sharecount == 1)
239167012Skmacy				lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
240164159Skmacy
24197540Sjeff#if defined(DEBUG_LOCKS)
242148669Sjeff			stack_save(&lkp->lk_stack);
24397540Sjeff#endif
24424269Speter			break;
24524269Speter		}
24624269Speter		/*
24724269Speter		 * We hold an exclusive lock, so downgrade it to shared.
24824269Speter		 * An alternative would be to fail with EDEADLK.
24924269Speter		 */
250144082Sjeff		sharelock(td, lkp, 1);
251164159Skmacy		if (lkp->lk_sharecount == 1)
252167012Skmacy			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
253102412Scharnier		/* FALLTHROUGH downgrade */
25424269Speter
25524269Speter	case LK_DOWNGRADE:
256110414Sjulian		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
25775472Salfred			("lockmgr: not holding exclusive lock "
258110414Sjulian			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
259110414Sjulian			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
260144082Sjeff		sharelock(td, lkp, lkp->lk_exclusivecount);
261144082Sjeff		COUNT(td, -lkp->lk_exclusivecount);
26224269Speter		lkp->lk_exclusivecount = 0;
26324269Speter		lkp->lk_flags &= ~LK_HAVE_EXCL;
264110190Sjulian		lkp->lk_lockholder = LK_NOPROC;
26524269Speter		if (lkp->lk_waitcount)
26624269Speter			wakeup((void *)lkp);
26724269Speter		break;
26824269Speter
26924269Speter	case LK_EXCLUPGRADE:
27024269Speter		/*
27124269Speter		 * If another process is ahead of us to get an upgrade,
27224269Speter		 * then we want to fail rather than have an intervening
27324269Speter		 * exclusive access.
27424269Speter		 */
27524269Speter		if (lkp->lk_flags & LK_WANT_UPGRADE) {
276144082Sjeff			shareunlock(td, lkp, 1);
27724269Speter			error = EBUSY;
27824269Speter			break;
27924269Speter		}
280102412Scharnier		/* FALLTHROUGH normal upgrade */
28124269Speter
28224269Speter	case LK_UPGRADE:
28324269Speter		/*
28424269Speter		 * Upgrade a shared lock to an exclusive one. If another
28524269Speter		 * shared lock has already requested an upgrade to an
28624269Speter		 * exclusive lock, our shared lock is released and an
28724269Speter		 * exclusive lock is requested (which will be granted
28824269Speter		 * after the upgrade). If we return an error, the file
28924269Speter		 * will always be unlocked.
29024269Speter		 */
291144928Sjeff		if (lkp->lk_lockholder == thr)
29224269Speter			panic("lockmgr: upgrade exclusive lock");
293144928Sjeff		if (lkp->lk_sharecount <= 0)
294144928Sjeff			panic("lockmgr: upgrade without shared");
295144082Sjeff		shareunlock(td, lkp, 1);
296164159Skmacy		if (lkp->lk_sharecount == 0)
297164159Skmacy			lock_profile_release_lock(&lkp->lk_object);
29824269Speter		/*
29924269Speter		 * If we are just polling, check to see if we will block.
30024269Speter		 */
30124269Speter		if ((extflags & LK_NOWAIT) &&
30224269Speter		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
30324269Speter		     lkp->lk_sharecount > 1)) {
30424269Speter			error = EBUSY;
30524269Speter			break;
30624269Speter		}
30724269Speter		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
30824269Speter			/*
30924269Speter			 * We are first shared lock to request an upgrade, so
31024269Speter			 * request upgrade and wait for the shared count to
31124269Speter			 * drop to zero, then take exclusive lock.
31224269Speter			 */
31324269Speter			lkp->lk_flags |= LK_WANT_UPGRADE;
314167012Skmacy			error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart);
31524269Speter			lkp->lk_flags &= ~LK_WANT_UPGRADE;
31634194Sdyson
317134365Skan			if (error) {
318134365Skan			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
319134365Skan			                   wakeup((void *)lkp);
320134365Skan			         break;
321134365Skan			}
322138203Sps			if (lkp->lk_exclusivecount != 0)
323138203Sps				panic("lockmgr: non-zero exclusive count");
32424269Speter			lkp->lk_flags |= LK_HAVE_EXCL;
325110414Sjulian			lkp->lk_lockholder = thr;
32624269Speter			lkp->lk_exclusivecount = 1;
327144082Sjeff			COUNT(td, 1);
328167012Skmacy			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
32942900Seivind#if defined(DEBUG_LOCKS)
330148669Sjeff			stack_save(&lkp->lk_stack);
33142900Seivind#endif
33224269Speter			break;
33324269Speter		}
33424269Speter		/*
33524269Speter		 * Someone else has requested upgrade. Release our shared
33624269Speter		 * lock, awaken upgrade requestor if we are the last shared
33724269Speter		 * lock, then request an exclusive lock.
33824269Speter		 */
33928345Sdyson		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
34028345Sdyson			LK_WAIT_NONZERO)
34124269Speter			wakeup((void *)lkp);
342102412Scharnier		/* FALLTHROUGH exclusive request */
34324269Speter
34424269Speter	case LK_EXCLUSIVE:
345110414Sjulian		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
34624269Speter			/*
34724269Speter			 *	Recursive lock.
34824269Speter			 */
34948301Smckusick			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
35024269Speter				panic("lockmgr: locking against myself");
35148301Smckusick			if ((extflags & LK_CANRECURSE) != 0) {
35248301Smckusick				lkp->lk_exclusivecount++;
353144082Sjeff				COUNT(td, 1);
35448301Smckusick				break;
35548301Smckusick			}
35624269Speter		}
35724269Speter		/*
35824269Speter		 * If we are just polling, check to see if we will sleep.
35924269Speter		 */
36028345Sdyson		if ((extflags & LK_NOWAIT) &&
36128345Sdyson		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
36224269Speter			error = EBUSY;
36324269Speter			break;
36424269Speter		}
36524269Speter		/*
36624269Speter		 * Try to acquire the want_exclusive flag.
36724269Speter		 */
368167012Skmacy		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart);
36924269Speter		if (error)
37024269Speter			break;
37124269Speter		lkp->lk_flags |= LK_WANT_EXCL;
37224269Speter		/*
37324269Speter		 * Wait for shared locks and upgrades to finish.
37424269Speter		 */
375167012Skmacy		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart);
37624269Speter		lkp->lk_flags &= ~LK_WANT_EXCL;
377134365Skan		if (error) {
378134365Skan			if (lkp->lk_flags & LK_WAIT_NONZERO)
379134365Skan			         wakeup((void *)lkp);
38024269Speter			break;
381134365Skan		}
38224269Speter		lkp->lk_flags |= LK_HAVE_EXCL;
383110414Sjulian		lkp->lk_lockholder = thr;
38424269Speter		if (lkp->lk_exclusivecount != 0)
38524269Speter			panic("lockmgr: non-zero exclusive count");
38624269Speter		lkp->lk_exclusivecount = 1;
387144082Sjeff		COUNT(td, 1);
388167012Skmacy		lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
38942900Seivind#if defined(DEBUG_LOCKS)
390148669Sjeff		stack_save(&lkp->lk_stack);
39142900Seivind#endif
39224269Speter		break;
39324269Speter
39424269Speter	case LK_RELEASE:
39524269Speter		if (lkp->lk_exclusivecount != 0) {
396110414Sjulian			if (lkp->lk_lockholder != thr &&
39751702Sdillon			    lkp->lk_lockholder != LK_KERNPROC) {
398110414Sjulian				panic("lockmgr: thread %p, not %s %p unlocking",
399110414Sjulian				    thr, "exclusive lock holder",
40024269Speter				    lkp->lk_lockholder);
40151702Sdillon			}
402144082Sjeff			if (lkp->lk_lockholder != LK_KERNPROC)
403144082Sjeff				COUNT(td, -1);
40434194Sdyson			if (lkp->lk_exclusivecount == 1) {
40524269Speter				lkp->lk_flags &= ~LK_HAVE_EXCL;
40624269Speter				lkp->lk_lockholder = LK_NOPROC;
40734194Sdyson				lkp->lk_exclusivecount = 0;
408164159Skmacy				lock_profile_release_lock(&lkp->lk_object);
40934194Sdyson			} else {
41034194Sdyson				lkp->lk_exclusivecount--;
41124269Speter			}
41271576Sjasone		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
413144082Sjeff			shareunlock(td, lkp, 1);
414149723Sssouhlal		else  {
415149723Sssouhlal			printf("lockmgr: thread %p unlocking unheld lock\n",
416149723Sssouhlal			    thr);
417149723Sssouhlal			kdb_backtrace();
418149723Sssouhlal		}
419149723Sssouhlal
42028345Sdyson		if (lkp->lk_flags & LK_WAIT_NONZERO)
42124269Speter			wakeup((void *)lkp);
42224269Speter		break;
42324269Speter
42424269Speter	case LK_DRAIN:
42524269Speter		/*
42624269Speter		 * Check that we do not already hold the lock, as it can
42724269Speter		 * never drain if we do. Unfortunately, we have no way to
42824269Speter		 * check for holding a shared lock, but at least we can
42924269Speter		 * check for an exclusive one.
43024269Speter		 */
431110414Sjulian		if (lkp->lk_lockholder == thr)
43224269Speter			panic("lockmgr: draining against myself");
43328345Sdyson
43428345Sdyson		error = acquiredrain(lkp, extflags);
43528345Sdyson		if (error)
43624269Speter			break;
43724269Speter		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
438110414Sjulian		lkp->lk_lockholder = thr;
43924269Speter		lkp->lk_exclusivecount = 1;
440144082Sjeff		COUNT(td, 1);
44142900Seivind#if defined(DEBUG_LOCKS)
442148669Sjeff		stack_save(&lkp->lk_stack);
44342900Seivind#endif
44424269Speter		break;
44524269Speter
44624269Speter	default:
44772200Sbmilekic		mtx_unlock(lkp->lk_interlock);
44824269Speter		panic("lockmgr: unknown locktype request %d",
44924269Speter		    flags & LK_TYPE_MASK);
45024269Speter		/* NOTREACHED */
45124269Speter	}
45228345Sdyson	if ((lkp->lk_flags & LK_WAITDRAIN) &&
45328345Sdyson	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
45428345Sdyson		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
45524269Speter		lkp->lk_flags &= ~LK_WAITDRAIN;
45624269Speter		wakeup((void *)&lkp->lk_flags);
45724269Speter	}
45872200Sbmilekic	mtx_unlock(lkp->lk_interlock);
45924269Speter	return (error);
46024269Speter}
46124269Speter
46229653Sdysonstatic int
46329653Sdysonacquiredrain(struct lock *lkp, int extflags) {
46429653Sdyson	int error;
46529653Sdyson
46629653Sdyson	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
46729653Sdyson		return EBUSY;
46829653Sdyson	}
46929653Sdyson	while (lkp->lk_flags & LK_ALL) {
47029653Sdyson		lkp->lk_flags |= LK_WAITDRAIN;
47169432Sjake		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
47288318Sdillon			lkp->lk_wmesg,
47388318Sdillon			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
47429653Sdyson		if (error)
47529653Sdyson			return error;
47629653Sdyson		if (extflags & LK_SLEEPFAIL) {
47729653Sdyson			return ENOLCK;
47829653Sdyson		}
47929653Sdyson	}
48029653Sdyson	return 0;
48129653Sdyson}
48229653Sdyson
48324269Speter/*
484107414Smckusick * Transfer any waiting processes from one lock to another.
485107414Smckusick */
486107414Smckusickvoid
487107414Smckusicktransferlockers(from, to)
488107414Smckusick	struct lock *from;
489107414Smckusick	struct lock *to;
490107414Smckusick{
491107414Smckusick
492107414Smckusick	KASSERT(from != to, ("lock transfer to self"));
493107414Smckusick	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
494143621Sjeff
495143621Sjeff	mtx_lock(from->lk_interlock);
496143621Sjeff	if (from->lk_waitcount == 0) {
497143621Sjeff		mtx_unlock(from->lk_interlock);
498107414Smckusick		return;
499143621Sjeff	}
500107414Smckusick	from->lk_newlock = to;
501107414Smckusick	wakeup((void *)from);
502143621Sjeff	msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
503143621Sjeff	    "lkxfer", 0);
504107414Smckusick	from->lk_newlock = NULL;
505107414Smckusick	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
506107414Smckusick	KASSERT(from->lk_waitcount == 0, ("active lock"));
507143621Sjeff	mtx_unlock(from->lk_interlock);
508107414Smckusick}
509107414Smckusick
510107414Smckusick
511107414Smckusick/*
51229653Sdyson * Initialize a lock; required before use.
51329653Sdyson */
51429653Sdysonvoid
51529653Sdysonlockinit(lkp, prio, wmesg, timo, flags)
51629653Sdyson	struct lock *lkp;
51729653Sdyson	int prio;
51891698Seivind	const char *wmesg;
51929653Sdyson	int timo;
52029653Sdyson	int flags;
52129653Sdyson{
522112106Sjhb	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
52366615Sjasone	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
52429653Sdyson
525117660Struckman	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
52667046Sjasone	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
52729653Sdyson	lkp->lk_sharecount = 0;
52829653Sdyson	lkp->lk_waitcount = 0;
52929653Sdyson	lkp->lk_exclusivecount = 0;
53029653Sdyson	lkp->lk_prio = prio;
53129653Sdyson	lkp->lk_wmesg = wmesg;
53229653Sdyson	lkp->lk_timo = timo;
53329653Sdyson	lkp->lk_lockholder = LK_NOPROC;
534107414Smckusick	lkp->lk_newlock = NULL;
535105370Smckusick#ifdef DEBUG_LOCKS
536148669Sjeff	stack_zero(&lkp->lk_stack);
537105370Smckusick#endif
538164246Skmacy	lock_profile_object_init(&lkp->lk_object, &lock_class_lockmgr, wmesg);
53929653Sdyson}
54029653Sdyson
54129653Sdyson/*
54266615Sjasone * Destroy a lock.
54366615Sjasone */
54466615Sjasonevoid
54566615Sjasonelockdestroy(lkp)
54666615Sjasone	struct lock *lkp;
54766615Sjasone{
548112106Sjhb	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
54966615Sjasone	    lkp, lkp->lk_wmesg);
550164159Skmacy	lock_profile_object_destroy(&lkp->lk_object);
55166615Sjasone}
55266615Sjasone
55366615Sjasone/*
55429653Sdyson * Determine the status of a lock.
55529653Sdyson */
55629653Sdysonint
55783366Sjulianlockstatus(lkp, td)
55829653Sdyson	struct lock *lkp;
55983366Sjulian	struct thread *td;
56029653Sdyson{
56129653Sdyson	int lock_type = 0;
562150646Srwatson	int interlocked;
56329653Sdyson
564150646Srwatson	if (!kdb_active) {
565150646Srwatson		interlocked = 1;
566150646Srwatson		mtx_lock(lkp->lk_interlock);
567150646Srwatson	} else
568150646Srwatson		interlocked = 0;
56954444Seivind	if (lkp->lk_exclusivecount != 0) {
570110414Sjulian		if (td == NULL || lkp->lk_lockholder == td)
57154444Seivind			lock_type = LK_EXCLUSIVE;
57254444Seivind		else
57354444Seivind			lock_type = LK_EXCLOTHER;
57454444Seivind	} else if (lkp->lk_sharecount != 0)
57529653Sdyson		lock_type = LK_SHARED;
576150646Srwatson	if (interlocked)
577150646Srwatson		mtx_unlock(lkp->lk_interlock);
57829653Sdyson	return (lock_type);
57929653Sdyson}
58029653Sdyson
58129653Sdyson/*
58248225Smckusick * Determine the number of holders of a lock.
58348225Smckusick */
58448225Smckusickint
58548225Smckusicklockcount(lkp)
58648225Smckusick	struct lock *lkp;
58748225Smckusick{
58848225Smckusick	int count;
58948225Smckusick
59072200Sbmilekic	mtx_lock(lkp->lk_interlock);
59148225Smckusick	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
59272200Sbmilekic	mtx_unlock(lkp->lk_interlock);
59348225Smckusick	return (count);
59448225Smckusick}
59548225Smckusick
59648225Smckusick/*
597162941Stegge * Determine the number of waiters on a lock.
598162941Stegge */
599162941Steggeint
600162941Steggelockwaiters(lkp)
601162941Stegge	struct lock *lkp;
602162941Stegge{
603162941Stegge	int count;
604162941Stegge
605162941Stegge	mtx_lock(lkp->lk_interlock);
606162941Stegge	count = lkp->lk_waitcount;
607162941Stegge	mtx_unlock(lkp->lk_interlock);
608162941Stegge	return (count);
609162941Stegge}
610162941Stegge
611162941Stegge/*
61224269Speter * Print out information about state of a lock. Used by VOP_PRINT
61328569Sphk * routines to display status about contained locks.
61424269Speter */
61524271Spetervoid
61624269Speterlockmgr_printinfo(lkp)
61724269Speter	struct lock *lkp;
61824269Speter{
61924269Speter
62024269Speter	if (lkp->lk_sharecount)
62124269Speter		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
62224269Speter		    lkp->lk_sharecount);
62324269Speter	else if (lkp->lk_flags & LK_HAVE_EXCL)
624124163Skan		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
625124163Skan		    lkp->lk_wmesg, lkp->lk_exclusivecount,
626124163Skan		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
62724269Speter	if (lkp->lk_waitcount > 0)
62824269Speter		printf(" with %d pending", lkp->lk_waitcount);
629148669Sjeff#ifdef DEBUG_LOCKS
630148669Sjeff	stack_print(&lkp->lk_stack);
631148669Sjeff#endif
63224269Speter}
633161322Sjhb
634161322Sjhb#ifdef DDB
635161337Sjhb/*
636161337Sjhb * Check to see if a thread that is blocked on a sleep queue is actually
637161337Sjhb * blocked on a 'struct lock'.  If so, output some details and return true.
638161337Sjhb * If the lock has an exclusive owner, return that in *ownerp.
639161337Sjhb */
640161337Sjhbint
641161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
642161337Sjhb{
643161337Sjhb	struct lock *lkp;
644161337Sjhb
645161337Sjhb	lkp = td->td_wchan;
646161337Sjhb
647161337Sjhb	/* Simple test to see if wchan points to a lockmgr lock. */
648161337Sjhb	if (lkp->lk_wmesg != td->td_wmesg)
649161337Sjhb		return (0);
650161337Sjhb
651161337Sjhb	/* Ok, we think we have a lockmgr lock, so output some details. */
652161337Sjhb	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
653161337Sjhb	if (lkp->lk_sharecount) {
654161337Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
655161337Sjhb		*ownerp = NULL;
656161337Sjhb	} else {
657161337Sjhb		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
658161337Sjhb		*ownerp = lkp->lk_lockholder;
659161337Sjhb	}
660161337Sjhb	return (1);
661161337Sjhb}
662161337Sjhb
663164246Skmacyvoid
664164246Skmacydb_show_lockmgr(struct lock_object *lock)
665161322Sjhb{
666161322Sjhb	struct thread *td;
667161322Sjhb	struct lock *lkp;
668161322Sjhb
669164246Skmacy	lkp = (struct lock *)lock;
670161322Sjhb
671161322Sjhb	db_printf("lock type: %s\n", lkp->lk_wmesg);
672161322Sjhb	db_printf("state: ");
673161322Sjhb	if (lkp->lk_sharecount)
674161322Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
675161322Sjhb	else if (lkp->lk_flags & LK_HAVE_EXCL) {
676161322Sjhb		td = lkp->lk_lockholder;
677161322Sjhb		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
678161322Sjhb		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
679161322Sjhb		    td->td_proc->p_pid, td->td_proc->p_comm);
680161322Sjhb	} else
681161322Sjhb		db_printf("UNLOCKED\n");
682161322Sjhb	if (lkp->lk_waitcount > 0)
683161322Sjhb		db_printf("waiters: %d\n", lkp->lk_waitcount);
684161322Sjhb}
685161322Sjhb#endif
686