kern_lock.c revision 176014
1139804Simp/*-
224269Speter * Copyright (c) 1995
324269Speter *	The Regents of the University of California.  All rights reserved.
424269Speter *
528345Sdyson * Copyright (C) 1997
628345Sdyson *	John S. Dyson.  All rights reserved.
728345Sdyson *
824269Speter * This code contains ideas from software contributed to Berkeley by
924269Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
1024269Speter * System project at Carnegie-Mellon University.
1124269Speter *
1224269Speter * Redistribution and use in source and binary forms, with or without
1324269Speter * modification, are permitted provided that the following conditions
1424269Speter * are met:
1524269Speter * 1. Redistributions of source code must retain the above copyright
1624269Speter *    notice, this list of conditions and the following disclaimer.
1724269Speter * 2. Redistributions in binary form must reproduce the above copyright
1824269Speter *    notice, this list of conditions and the following disclaimer in the
1924269Speter *    documentation and/or other materials provided with the distribution.
2024269Speter * 3. All advertising materials mentioning features or use of this software
2124269Speter *    must display the following acknowledgement:
2224269Speter *	This product includes software developed by the University of
2324269Speter *	California, Berkeley and its contributors.
2424269Speter * 4. Neither the name of the University nor the names of its contributors
2524269Speter *    may be used to endorse or promote products derived from this software
2624269Speter *    without specific prior written permission.
2724269Speter *
2824269Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2924269Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3024269Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3124269Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3224269Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3324269Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3424269Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3524269Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3624269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3724269Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3824269Speter * SUCH DAMAGE.
3924269Speter *
4024269Speter *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
4124269Speter */
4224269Speter
43116182Sobrien#include <sys/cdefs.h>
44116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 176014 2008-02-06 00:37:14Z attilio $");
45116182Sobrien
46161322Sjhb#include "opt_ddb.h"
47164159Skmacy#include "opt_global.h"
48161322Sjhb
4924269Speter#include <sys/param.h>
50150807Srwatson#include <sys/kdb.h>
5167046Sjasone#include <sys/kernel.h>
5284812Sjhb#include <sys/ktr.h>
5324269Speter#include <sys/lock.h>
54102477Sbde#include <sys/lockmgr.h>
5567353Sjhb#include <sys/mutex.h>
56102477Sbde#include <sys/proc.h>
5724273Speter#include <sys/systm.h>
58164159Skmacy#include <sys/lock_profile.h>
59148668Sjeff#ifdef DEBUG_LOCKS
60148668Sjeff#include <sys/stack.h>
61148668Sjeff#endif
6224269Speter
63176014Sattilio#define	LOCKMGR_TRYOP(x)	((x) & LK_NOWAIT)
64176014Sattilio#define	LOCKMGR_TRYW(x)		(LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
65176014Sattilio
66173733Sattiliostatic void	assert_lockmgr(struct lock_object *lock, int what);
67161322Sjhb#ifdef DDB
68161322Sjhb#include <ddb/ddb.h>
69164246Skmacystatic void	db_show_lockmgr(struct lock_object *lock);
70161322Sjhb#endif
71167368Sjhbstatic void	lock_lockmgr(struct lock_object *lock, int how);
72167368Sjhbstatic int	unlock_lockmgr(struct lock_object *lock);
73161322Sjhb
74164246Skmacystruct lock_class lock_class_lockmgr = {
75167366Sjhb	.lc_name = "lockmgr",
76167366Sjhb	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
77173733Sattilio	.lc_assert = assert_lockmgr,
78164246Skmacy#ifdef DDB
79167368Sjhb	.lc_ddb_show = db_show_lockmgr,
80164246Skmacy#endif
81167368Sjhb	.lc_lock = lock_lockmgr,
82167368Sjhb	.lc_unlock = unlock_lockmgr,
83164246Skmacy};
84164246Skmacy
8524269Speter/*
8624269Speter * Locking primitives implementation.
8724269Speter * Locks provide shared/exclusive sychronization.
8824269Speter */
8924269Speter
90167368Sjhbvoid
91173733Sattilioassert_lockmgr(struct lock_object *lock, int what)
92173733Sattilio{
93173733Sattilio
94173733Sattilio	panic("lockmgr locks do not support assertions");
95173733Sattilio}
96173733Sattilio
97173733Sattiliovoid
98167368Sjhblock_lockmgr(struct lock_object *lock, int how)
99167368Sjhb{
100167368Sjhb
101167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
102167368Sjhb}
103167368Sjhb
104167368Sjhbint
105167368Sjhbunlock_lockmgr(struct lock_object *lock)
106167368Sjhb{
107167368Sjhb
108167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
109167368Sjhb}
110167368Sjhb
111175166Sattilio#define	COUNT(td, x)	((td)->td_locks += (x))
11229653Sdyson#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
11329653Sdyson	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
11429653Sdyson
115167012Skmacystatic int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime);
11629653Sdysonstatic int acquiredrain(struct lock *lkp, int extflags) ;
11724269Speter
118144705Sjeffstatic __inline void
119144082Sjeffsharelock(struct thread *td, struct lock *lkp, int incr) {
12028345Sdyson	lkp->lk_flags |= LK_SHARE_NONZERO;
12128345Sdyson	lkp->lk_sharecount += incr;
122144082Sjeff	COUNT(td, incr);
12328345Sdyson}
12424269Speter
125144705Sjeffstatic __inline void
126144082Sjeffshareunlock(struct thread *td, struct lock *lkp, int decr) {
12742453Seivind
12842408Seivind	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
12924269Speter
130144082Sjeff	COUNT(td, -decr);
13134194Sdyson	if (lkp->lk_sharecount == decr) {
13228345Sdyson		lkp->lk_flags &= ~LK_SHARE_NONZERO;
13334194Sdyson		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
13434194Sdyson			wakeup(lkp);
13534194Sdyson		}
13634194Sdyson		lkp->lk_sharecount = 0;
13734194Sdyson	} else {
13834194Sdyson		lkp->lk_sharecount -= decr;
13934194Sdyson	}
14028345Sdyson}
14128345Sdyson
14228345Sdysonstatic int
143167012Skmacyacquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime)
144140711Sjeff{
145107414Smckusick	struct lock *lkp = *lkpp;
146144589Sjeff	int error;
147112106Sjhb	CTR3(KTR_LOCK,
148132587Srwatson	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
14966615Sjasone	    lkp, extflags, wanted);
15066615Sjasone
151144589Sjeff	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
15228345Sdyson		return EBUSY;
153144589Sjeff	error = 0;
154167012Skmacy	if ((lkp->lk_flags & wanted) != 0)
155167012Skmacy		lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
156167012Skmacy
15728345Sdyson	while ((lkp->lk_flags & wanted) != 0) {
158144589Sjeff		CTR2(KTR_LOCK,
159144589Sjeff		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
160144589Sjeff		    lkp, lkp->lk_flags);
16128345Sdyson		lkp->lk_flags |= LK_WAIT_NONZERO;
16228345Sdyson		lkp->lk_waitcount++;
16369432Sjake		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
16488318Sdillon		    lkp->lk_wmesg,
16588318Sdillon		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
166144589Sjeff		lkp->lk_waitcount--;
167144589Sjeff		if (lkp->lk_waitcount == 0)
16828345Sdyson			lkp->lk_flags &= ~LK_WAIT_NONZERO;
169144589Sjeff		if (error)
170144589Sjeff			break;
17128345Sdyson		if (extflags & LK_SLEEPFAIL) {
172144589Sjeff			error = ENOLCK;
173144589Sjeff			break;
17428345Sdyson		}
175107414Smckusick		if (lkp->lk_newlock != NULL) {
176107414Smckusick			mtx_lock(lkp->lk_newlock->lk_interlock);
177107414Smckusick			mtx_unlock(lkp->lk_interlock);
178107414Smckusick			if (lkp->lk_waitcount == 0)
179107414Smckusick				wakeup((void *)(&lkp->lk_newlock));
180107414Smckusick			*lkpp = lkp = lkp->lk_newlock;
181107414Smckusick		}
18228345Sdyson	}
183144589Sjeff	mtx_assert(lkp->lk_interlock, MA_OWNED);
184144589Sjeff	return (error);
18528345Sdyson}
18628345Sdyson
18724269Speter/*
18824269Speter * Set, change, or release a lock.
18924269Speter *
19024269Speter * Shared requests increment the shared count. Exclusive requests set the
19124269Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
19224269Speter * accepted shared locks and shared-to-exclusive upgrades to go away.
19324269Speter */
19424269Speterint
195175635Sattilio_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
196175635Sattilio    int line)
197164159Skmacy
19824269Speter{
199175635Sattilio	struct thread *td;
20024269Speter	int error;
20172227Sjhb	int extflags, lockflags;
202167012Skmacy	int contested = 0;
203167012Skmacy	uint64_t waitstart = 0;
204174948Sattilio
20524269Speter	error = 0;
206175635Sattilio	td = curthread;
20728345Sdyson
208111463Sjeff	if ((flags & LK_INTERNAL) == 0)
209111463Sjeff		mtx_lock(lkp->lk_interlock);
210140711Sjeff	CTR6(KTR_LOCK,
211140711Sjeff	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
212140711Sjeff	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
213140711Sjeff	    lkp->lk_exclusivecount, flags, td);
214148668Sjeff#ifdef DEBUG_LOCKS
215148668Sjeff	{
216148668Sjeff		struct stack stack; /* XXX */
217148668Sjeff		stack_save(&stack);
218149574Spjd		CTRSTACK(KTR_LOCK, &stack, 0, 1);
219148668Sjeff	}
220140711Sjeff#endif
221140711Sjeff
22275740Salfred	if (flags & LK_INTERLOCK) {
22376100Salfred		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
22472200Sbmilekic		mtx_unlock(interlkp);
22575740Salfred	}
22628345Sdyson
227111463Sjeff	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
228111883Sjhb		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
229167787Sjhb		    &lkp->lk_interlock->lock_object,
230111883Sjhb		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
231111463Sjeff
23281506Sjhb	if (panicstr != NULL) {
23381506Sjhb		mtx_unlock(lkp->lk_interlock);
23481506Sjhb		return (0);
23581506Sjhb	}
236144372Sjeff	if ((lkp->lk_flags & LK_NOSHARE) &&
237144372Sjeff	    (flags & LK_TYPE_MASK) == LK_SHARED) {
238144372Sjeff		flags &= ~LK_TYPE_MASK;
239144372Sjeff		flags |= LK_EXCLUSIVE;
240144372Sjeff	}
24124269Speter	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
24224269Speter
24324269Speter	switch (flags & LK_TYPE_MASK) {
24424269Speter
24524269Speter	case LK_SHARED:
246176014Sattilio		if (!LOCKMGR_TRYOP(extflags))
247176014Sattilio			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file,
248176014Sattilio			    line);
24944681Sjulian		/*
25044681Sjulian		 * If we are not the exclusive lock holder, we have to block
25144681Sjulian		 * while there is an exclusive lock holder or while an
25244681Sjulian		 * exclusive lock request or upgrade request is in progress.
25344681Sjulian		 *
254130023Stjr		 * However, if TDP_DEADLKTREAT is set, we override exclusive
25544681Sjulian		 * lock requests or upgrade requests ( but not the exclusive
25644681Sjulian		 * lock itself ).
25744681Sjulian		 */
258175166Sattilio		if (lkp->lk_lockholder != td) {
25972227Sjhb			lockflags = LK_HAVE_EXCL;
260130023Stjr			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
26183420Sjhb				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
262167012Skmacy			error = acquire(&lkp, extflags, lockflags, &contested, &waitstart);
26324269Speter			if (error)
26424269Speter				break;
265144082Sjeff			sharelock(td, lkp, 1);
266164159Skmacy			if (lkp->lk_sharecount == 1)
267167012Skmacy				lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
268176014Sattilio			WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags),
269176014Sattilio			    file, line);
270164159Skmacy
27197540Sjeff#if defined(DEBUG_LOCKS)
272148669Sjeff			stack_save(&lkp->lk_stack);
27397540Sjeff#endif
27424269Speter			break;
27524269Speter		}
27624269Speter		/*
27724269Speter		 * We hold an exclusive lock, so downgrade it to shared.
27824269Speter		 * An alternative would be to fail with EDEADLK.
27924269Speter		 */
280102412Scharnier		/* FALLTHROUGH downgrade */
28124269Speter
28224269Speter	case LK_DOWNGRADE:
283175166Sattilio		KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
28475472Salfred			("lockmgr: not holding exclusive lock "
285110414Sjulian			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
286175166Sattilio			lkp->lk_lockholder, td, lkp->lk_exclusivecount));
287144082Sjeff		sharelock(td, lkp, lkp->lk_exclusivecount);
288176014Sattilio		WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
289144082Sjeff		COUNT(td, -lkp->lk_exclusivecount);
29024269Speter		lkp->lk_exclusivecount = 0;
29124269Speter		lkp->lk_flags &= ~LK_HAVE_EXCL;
292110190Sjulian		lkp->lk_lockholder = LK_NOPROC;
29324269Speter		if (lkp->lk_waitcount)
29424269Speter			wakeup((void *)lkp);
29524269Speter		break;
29624269Speter
29724269Speter	case LK_UPGRADE:
29824269Speter		/*
29924269Speter		 * Upgrade a shared lock to an exclusive one. If another
30024269Speter		 * shared lock has already requested an upgrade to an
30124269Speter		 * exclusive lock, our shared lock is released and an
30224269Speter		 * exclusive lock is requested (which will be granted
30324269Speter		 * after the upgrade). If we return an error, the file
30424269Speter		 * will always be unlocked.
30524269Speter		 */
306175166Sattilio		if (lkp->lk_lockholder == td)
30724269Speter			panic("lockmgr: upgrade exclusive lock");
308144928Sjeff		if (lkp->lk_sharecount <= 0)
309144928Sjeff			panic("lockmgr: upgrade without shared");
310144082Sjeff		shareunlock(td, lkp, 1);
311164159Skmacy		if (lkp->lk_sharecount == 0)
312164159Skmacy			lock_profile_release_lock(&lkp->lk_object);
31324269Speter		/*
31424269Speter		 * If we are just polling, check to see if we will block.
31524269Speter		 */
31624269Speter		if ((extflags & LK_NOWAIT) &&
31724269Speter		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
31824269Speter		     lkp->lk_sharecount > 1)) {
31924269Speter			error = EBUSY;
320176014Sattilio			WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
32124269Speter			break;
32224269Speter		}
32324269Speter		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
32424269Speter			/*
32524269Speter			 * We are first shared lock to request an upgrade, so
32624269Speter			 * request upgrade and wait for the shared count to
32724269Speter			 * drop to zero, then take exclusive lock.
32824269Speter			 */
32924269Speter			lkp->lk_flags |= LK_WANT_UPGRADE;
330167012Skmacy			error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart);
33124269Speter			lkp->lk_flags &= ~LK_WANT_UPGRADE;
33234194Sdyson
333134365Skan			if (error) {
334134365Skan			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
335134365Skan			                   wakeup((void *)lkp);
336176014Sattilio				WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
337134365Skan			         break;
338134365Skan			}
339138203Sps			if (lkp->lk_exclusivecount != 0)
340138203Sps				panic("lockmgr: non-zero exclusive count");
34124269Speter			lkp->lk_flags |= LK_HAVE_EXCL;
342175166Sattilio			lkp->lk_lockholder = td;
34324269Speter			lkp->lk_exclusivecount = 1;
344176014Sattilio			WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE |
345176014Sattilio			    LOP_TRYLOCK, file, line);
346144082Sjeff			COUNT(td, 1);
347167012Skmacy			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
34842900Seivind#if defined(DEBUG_LOCKS)
349148669Sjeff			stack_save(&lkp->lk_stack);
35042900Seivind#endif
35124269Speter			break;
35224269Speter		}
35324269Speter		/*
35424269Speter		 * Someone else has requested upgrade. Release our shared
35524269Speter		 * lock, awaken upgrade requestor if we are the last shared
35624269Speter		 * lock, then request an exclusive lock.
35724269Speter		 */
358176014Sattilio		WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
35928345Sdyson		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
36028345Sdyson			LK_WAIT_NONZERO)
36124269Speter			wakeup((void *)lkp);
362102412Scharnier		/* FALLTHROUGH exclusive request */
36324269Speter
36424269Speter	case LK_EXCLUSIVE:
365176014Sattilio		if (!LOCKMGR_TRYOP(extflags))
366176014Sattilio			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
367176014Sattilio			    LOP_EXCLUSIVE, file, line);
368175166Sattilio		if (lkp->lk_lockholder == td) {
36924269Speter			/*
37024269Speter			 *	Recursive lock.
37124269Speter			 */
37248301Smckusick			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
37324269Speter				panic("lockmgr: locking against myself");
37448301Smckusick			if ((extflags & LK_CANRECURSE) != 0) {
37548301Smckusick				lkp->lk_exclusivecount++;
376176014Sattilio				WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
377176014Sattilio				    LOCKMGR_TRYW(extflags), file, line);
378144082Sjeff				COUNT(td, 1);
37948301Smckusick				break;
38048301Smckusick			}
38124269Speter		}
38224269Speter		/*
38324269Speter		 * If we are just polling, check to see if we will sleep.
38424269Speter		 */
38528345Sdyson		if ((extflags & LK_NOWAIT) &&
38628345Sdyson		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
38724269Speter			error = EBUSY;
38824269Speter			break;
38924269Speter		}
39024269Speter		/*
39124269Speter		 * Try to acquire the want_exclusive flag.
39224269Speter		 */
393167012Skmacy		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart);
39424269Speter		if (error)
39524269Speter			break;
39624269Speter		lkp->lk_flags |= LK_WANT_EXCL;
39724269Speter		/*
39824269Speter		 * Wait for shared locks and upgrades to finish.
39924269Speter		 */
400167012Skmacy		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart);
40124269Speter		lkp->lk_flags &= ~LK_WANT_EXCL;
402134365Skan		if (error) {
403134365Skan			if (lkp->lk_flags & LK_WAIT_NONZERO)
404134365Skan			         wakeup((void *)lkp);
40524269Speter			break;
406134365Skan		}
40724269Speter		lkp->lk_flags |= LK_HAVE_EXCL;
408175166Sattilio		lkp->lk_lockholder = td;
40924269Speter		if (lkp->lk_exclusivecount != 0)
41024269Speter			panic("lockmgr: non-zero exclusive count");
41124269Speter		lkp->lk_exclusivecount = 1;
412176014Sattilio		WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
413176014Sattilio		    LOCKMGR_TRYW(extflags), file, line);
414144082Sjeff		COUNT(td, 1);
415167012Skmacy		lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
41642900Seivind#if defined(DEBUG_LOCKS)
417148669Sjeff		stack_save(&lkp->lk_stack);
41842900Seivind#endif
41924269Speter		break;
42024269Speter
42124269Speter	case LK_RELEASE:
42224269Speter		if (lkp->lk_exclusivecount != 0) {
423175166Sattilio			if (lkp->lk_lockholder != td &&
42451702Sdillon			    lkp->lk_lockholder != LK_KERNPROC) {
425110414Sjulian				panic("lockmgr: thread %p, not %s %p unlocking",
426175166Sattilio				    td, "exclusive lock holder",
42724269Speter				    lkp->lk_lockholder);
42851702Sdillon			}
429176014Sattilio			if (lkp->lk_lockholder != LK_KERNPROC) {
430176014Sattilio				WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
431176014Sattilio				    file, line);
432144082Sjeff				COUNT(td, -1);
433176014Sattilio			}
43434194Sdyson			if (lkp->lk_exclusivecount == 1) {
43524269Speter				lkp->lk_flags &= ~LK_HAVE_EXCL;
43624269Speter				lkp->lk_lockholder = LK_NOPROC;
43734194Sdyson				lkp->lk_exclusivecount = 0;
438164159Skmacy				lock_profile_release_lock(&lkp->lk_object);
43934194Sdyson			} else {
44034194Sdyson				lkp->lk_exclusivecount--;
44124269Speter			}
442176014Sattilio		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
443176014Sattilio			WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
444144082Sjeff			shareunlock(td, lkp, 1);
445176014Sattilio		} else  {
446149723Sssouhlal			printf("lockmgr: thread %p unlocking unheld lock\n",
447175166Sattilio			    td);
448149723Sssouhlal			kdb_backtrace();
449149723Sssouhlal		}
450149723Sssouhlal
45128345Sdyson		if (lkp->lk_flags & LK_WAIT_NONZERO)
45224269Speter			wakeup((void *)lkp);
45324269Speter		break;
45424269Speter
45524269Speter	case LK_DRAIN:
45624269Speter		/*
45724269Speter		 * Check that we do not already hold the lock, as it can
45824269Speter		 * never drain if we do. Unfortunately, we have no way to
45924269Speter		 * check for holding a shared lock, but at least we can
46024269Speter		 * check for an exclusive one.
46124269Speter		 */
462176014Sattilio		if (!LOCKMGR_TRYOP(extflags))
463176014Sattilio			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
464176014Sattilio			    LOP_EXCLUSIVE, file, line);
465175166Sattilio		if (lkp->lk_lockholder == td)
46624269Speter			panic("lockmgr: draining against myself");
46728345Sdyson
46828345Sdyson		error = acquiredrain(lkp, extflags);
46928345Sdyson		if (error)
47024269Speter			break;
47124269Speter		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
472175166Sattilio		lkp->lk_lockholder = td;
47324269Speter		lkp->lk_exclusivecount = 1;
474176014Sattilio		WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
475176014Sattilio		    LOCKMGR_TRYW(extflags), file, line);
476144082Sjeff		COUNT(td, 1);
47742900Seivind#if defined(DEBUG_LOCKS)
478148669Sjeff		stack_save(&lkp->lk_stack);
47942900Seivind#endif
48024269Speter		break;
48124269Speter
48224269Speter	default:
48372200Sbmilekic		mtx_unlock(lkp->lk_interlock);
48424269Speter		panic("lockmgr: unknown locktype request %d",
48524269Speter		    flags & LK_TYPE_MASK);
48624269Speter		/* NOTREACHED */
48724269Speter	}
48828345Sdyson	if ((lkp->lk_flags & LK_WAITDRAIN) &&
48928345Sdyson	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
49028345Sdyson		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
49124269Speter		lkp->lk_flags &= ~LK_WAITDRAIN;
49224269Speter		wakeup((void *)&lkp->lk_flags);
49324269Speter	}
49472200Sbmilekic	mtx_unlock(lkp->lk_interlock);
49524269Speter	return (error);
49624269Speter}
49724269Speter
49829653Sdysonstatic int
49929653Sdysonacquiredrain(struct lock *lkp, int extflags) {
50029653Sdyson	int error;
50129653Sdyson
50229653Sdyson	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
50329653Sdyson		return EBUSY;
50429653Sdyson	}
50529653Sdyson	while (lkp->lk_flags & LK_ALL) {
50629653Sdyson		lkp->lk_flags |= LK_WAITDRAIN;
50769432Sjake		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
50888318Sdillon			lkp->lk_wmesg,
50988318Sdillon			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
51029653Sdyson		if (error)
51129653Sdyson			return error;
51229653Sdyson		if (extflags & LK_SLEEPFAIL) {
51329653Sdyson			return ENOLCK;
51429653Sdyson		}
51529653Sdyson	}
51629653Sdyson	return 0;
51729653Sdyson}
51829653Sdyson
51924269Speter/*
52029653Sdyson * Initialize a lock; required before use.
52129653Sdyson */
52229653Sdysonvoid
52329653Sdysonlockinit(lkp, prio, wmesg, timo, flags)
52429653Sdyson	struct lock *lkp;
52529653Sdyson	int prio;
52691698Seivind	const char *wmesg;
52729653Sdyson	int timo;
52829653Sdyson	int flags;
52929653Sdyson{
530176014Sattilio	int iflags;
531176014Sattilio
532112106Sjhb	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
53366615Sjasone	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
53429653Sdyson
535117660Struckman	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
536176014Sattilio	lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_NOWITNESS | LK_NODUP);
53729653Sdyson	lkp->lk_sharecount = 0;
53829653Sdyson	lkp->lk_waitcount = 0;
53929653Sdyson	lkp->lk_exclusivecount = 0;
54029653Sdyson	lkp->lk_prio = prio;
54129653Sdyson	lkp->lk_timo = timo;
54229653Sdyson	lkp->lk_lockholder = LK_NOPROC;
543107414Smckusick	lkp->lk_newlock = NULL;
544176014Sattilio	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
545176014Sattilio	if (!(flags & LK_NODUP))
546176014Sattilio		iflags |= LO_DUPOK;
547176014Sattilio	if (!(flags & LK_NOWITNESS))
548176014Sattilio		iflags |= LO_WITNESS;
549105370Smckusick#ifdef DEBUG_LOCKS
550148669Sjeff	stack_zero(&lkp->lk_stack);
551105370Smckusick#endif
552176014Sattilio	lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags);
55329653Sdyson}
55429653Sdyson
55529653Sdyson/*
55666615Sjasone * Destroy a lock.
55766615Sjasone */
55866615Sjasonevoid
55966615Sjasonelockdestroy(lkp)
56066615Sjasone	struct lock *lkp;
56166615Sjasone{
562169675Sjhb
563112106Sjhb	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
56466615Sjasone	    lkp, lkp->lk_wmesg);
565168070Sjhb	lock_destroy(&lkp->lk_object);
56666615Sjasone}
56766615Sjasone
56866615Sjasone/*
569175166Sattilio * Disown the lockmgr.
570175166Sattilio */
571175166Sattiliovoid
572176014Sattilio_lockmgr_disown(struct lock *lkp, const char *file, int line)
573175166Sattilio{
574175166Sattilio	struct thread *td;
575175166Sattilio
576175166Sattilio	td = curthread;
577175229Sattilio	KASSERT(panicstr != NULL || lkp->lk_exclusivecount,
578175166Sattilio	    ("%s: %p lockmgr must be exclusively locked", __func__, lkp));
579175229Sattilio	KASSERT(panicstr != NULL || lkp->lk_lockholder == td ||
580175229Sattilio	    lkp->lk_lockholder == LK_KERNPROC,
581175166Sattilio	    ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
582175166Sattilio	    td));
583175166Sattilio
584175166Sattilio	/*
585175166Sattilio	 * Drop the lock reference and switch the owner.  This will result
586175166Sattilio	 * in an atomic operation like td_lock is only accessed by curthread
587175229Sattilio	 * and lk_lockholder only needs one write.  Note also that the lock
588175229Sattilio	 * owner can be alredy KERNPROC, so in that case just skip the
589175229Sattilio	 * decrement.
590175166Sattilio	 */
591176014Sattilio	if (lkp->lk_lockholder == td) {
592176014Sattilio		WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line);
593175166Sattilio		td->td_locks--;
594176014Sattilio	}
595175166Sattilio	lkp->lk_lockholder = LK_KERNPROC;
596175166Sattilio}
597175166Sattilio
598175166Sattilio/*
59929653Sdyson * Determine the status of a lock.
60029653Sdyson */
60129653Sdysonint
60283366Sjulianlockstatus(lkp, td)
60329653Sdyson	struct lock *lkp;
60483366Sjulian	struct thread *td;
60529653Sdyson{
60629653Sdyson	int lock_type = 0;
607150646Srwatson	int interlocked;
60829653Sdyson
609175635Sattilio	KASSERT(td == NULL || td == curthread,
610175635Sattilio	    ("%s: thread passed argument (%p) is not valid", __func__, td));
611175635Sattilio
612150646Srwatson	if (!kdb_active) {
613150646Srwatson		interlocked = 1;
614150646Srwatson		mtx_lock(lkp->lk_interlock);
615150646Srwatson	} else
616150646Srwatson		interlocked = 0;
61754444Seivind	if (lkp->lk_exclusivecount != 0) {
618110414Sjulian		if (td == NULL || lkp->lk_lockholder == td)
61954444Seivind			lock_type = LK_EXCLUSIVE;
62054444Seivind		else
62154444Seivind			lock_type = LK_EXCLOTHER;
62254444Seivind	} else if (lkp->lk_sharecount != 0)
62329653Sdyson		lock_type = LK_SHARED;
624150646Srwatson	if (interlocked)
625150646Srwatson		mtx_unlock(lkp->lk_interlock);
62629653Sdyson	return (lock_type);
62729653Sdyson}
62829653Sdyson
62929653Sdyson/*
630162941Stegge * Determine the number of waiters on a lock.
631162941Stegge */
632162941Steggeint
633162941Steggelockwaiters(lkp)
634162941Stegge	struct lock *lkp;
635162941Stegge{
636162941Stegge	int count;
637162941Stegge
638162941Stegge	mtx_lock(lkp->lk_interlock);
639162941Stegge	count = lkp->lk_waitcount;
640162941Stegge	mtx_unlock(lkp->lk_interlock);
641162941Stegge	return (count);
642162941Stegge}
643162941Stegge
644162941Stegge/*
64524269Speter * Print out information about state of a lock. Used by VOP_PRINT
64628569Sphk * routines to display status about contained locks.
64724269Speter */
64824271Spetervoid
64924269Speterlockmgr_printinfo(lkp)
65024269Speter	struct lock *lkp;
65124269Speter{
65224269Speter
65324269Speter	if (lkp->lk_sharecount)
65424269Speter		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
65524269Speter		    lkp->lk_sharecount);
65624269Speter	else if (lkp->lk_flags & LK_HAVE_EXCL)
657124163Skan		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
658124163Skan		    lkp->lk_wmesg, lkp->lk_exclusivecount,
659124163Skan		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
66024269Speter	if (lkp->lk_waitcount > 0)
66124269Speter		printf(" with %d pending", lkp->lk_waitcount);
662148669Sjeff#ifdef DEBUG_LOCKS
663174137Srwatson	stack_print_ddb(&lkp->lk_stack);
664148669Sjeff#endif
66524269Speter}
666161322Sjhb
667161322Sjhb#ifdef DDB
668161337Sjhb/*
669161337Sjhb * Check to see if a thread that is blocked on a sleep queue is actually
670161337Sjhb * blocked on a 'struct lock'.  If so, output some details and return true.
671161337Sjhb * If the lock has an exclusive owner, return that in *ownerp.
672161337Sjhb */
673161337Sjhbint
674161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
675161337Sjhb{
676161337Sjhb	struct lock *lkp;
677161337Sjhb
678161337Sjhb	lkp = td->td_wchan;
679161337Sjhb
680161337Sjhb	/* Simple test to see if wchan points to a lockmgr lock. */
681168070Sjhb	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
682168070Sjhb	    lkp->lk_wmesg == td->td_wmesg)
683167782Sjhb		goto ok;
684161337Sjhb
685167782Sjhb	/*
686167782Sjhb	 * If this thread is doing a DRAIN, then it would be asleep on
687167782Sjhb	 * &lkp->lk_flags rather than lkp.
688167782Sjhb	 */
689167782Sjhb	lkp = (struct lock *)((char *)td->td_wchan -
690167782Sjhb	    offsetof(struct lock, lk_flags));
691168070Sjhb	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
692168070Sjhb	    lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
693167782Sjhb		goto ok;
694167782Sjhb
695167782Sjhb	/* Doen't seem to be a lockmgr lock. */
696167782Sjhb	return (0);
697167782Sjhb
698167782Sjhbok:
699161337Sjhb	/* Ok, we think we have a lockmgr lock, so output some details. */
700161337Sjhb	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
701161337Sjhb	if (lkp->lk_sharecount) {
702161337Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
703161337Sjhb		*ownerp = NULL;
704161337Sjhb	} else {
705161337Sjhb		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
706161337Sjhb		*ownerp = lkp->lk_lockholder;
707161337Sjhb	}
708161337Sjhb	return (1);
709161337Sjhb}
710161337Sjhb
711164246Skmacyvoid
712164246Skmacydb_show_lockmgr(struct lock_object *lock)
713161322Sjhb{
714161322Sjhb	struct thread *td;
715161322Sjhb	struct lock *lkp;
716161322Sjhb
717164246Skmacy	lkp = (struct lock *)lock;
718161322Sjhb
719168070Sjhb	db_printf(" lock type: %s\n", lkp->lk_wmesg);
720168070Sjhb	db_printf(" state: ");
721161322Sjhb	if (lkp->lk_sharecount)
722161322Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
723161322Sjhb	else if (lkp->lk_flags & LK_HAVE_EXCL) {
724161322Sjhb		td = lkp->lk_lockholder;
725161322Sjhb		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
726161322Sjhb		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
727173600Sjulian		    td->td_proc->p_pid, td->td_name);
728161322Sjhb	} else
729161322Sjhb		db_printf("UNLOCKED\n");
730161322Sjhb	if (lkp->lk_waitcount > 0)
731168070Sjhb		db_printf(" waiters: %d\n", lkp->lk_waitcount);
732161322Sjhb}
733161322Sjhb#endif
734