kern_lock.c revision 175635
1139804Simp/*-
224269Speter * Copyright (c) 1995
324269Speter *	The Regents of the University of California.  All rights reserved.
424269Speter *
528345Sdyson * Copyright (C) 1997
628345Sdyson *	John S. Dyson.  All rights reserved.
728345Sdyson *
824269Speter * This code contains ideas from software contributed to Berkeley by
924269Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
1024269Speter * System project at Carnegie-Mellon University.
1124269Speter *
1224269Speter * Redistribution and use in source and binary forms, with or without
1324269Speter * modification, are permitted provided that the following conditions
1424269Speter * are met:
1524269Speter * 1. Redistributions of source code must retain the above copyright
1624269Speter *    notice, this list of conditions and the following disclaimer.
1724269Speter * 2. Redistributions in binary form must reproduce the above copyright
1824269Speter *    notice, this list of conditions and the following disclaimer in the
1924269Speter *    documentation and/or other materials provided with the distribution.
2024269Speter * 3. All advertising materials mentioning features or use of this software
2124269Speter *    must display the following acknowledgement:
2224269Speter *	This product includes software developed by the University of
2324269Speter *	California, Berkeley and its contributors.
2424269Speter * 4. Neither the name of the University nor the names of its contributors
2524269Speter *    may be used to endorse or promote products derived from this software
2624269Speter *    without specific prior written permission.
2724269Speter *
2824269Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2924269Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3024269Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3124269Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3224269Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3324269Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3424269Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3524269Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3624269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3724269Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3824269Speter * SUCH DAMAGE.
3924269Speter *
4024269Speter *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
4124269Speter */
4224269Speter
43116182Sobrien#include <sys/cdefs.h>
44116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 175635 2008-01-24 12:34:30Z attilio $");
45116182Sobrien
46161322Sjhb#include "opt_ddb.h"
47164159Skmacy#include "opt_global.h"
48161322Sjhb
4924269Speter#include <sys/param.h>
50150807Srwatson#include <sys/kdb.h>
5167046Sjasone#include <sys/kernel.h>
5284812Sjhb#include <sys/ktr.h>
5324269Speter#include <sys/lock.h>
54102477Sbde#include <sys/lockmgr.h>
5567353Sjhb#include <sys/mutex.h>
56102477Sbde#include <sys/proc.h>
5724273Speter#include <sys/systm.h>
58164159Skmacy#include <sys/lock_profile.h>
59148668Sjeff#ifdef DEBUG_LOCKS
60148668Sjeff#include <sys/stack.h>
61148668Sjeff#endif
6224269Speter
63173733Sattiliostatic void	assert_lockmgr(struct lock_object *lock, int what);
64161322Sjhb#ifdef DDB
65161322Sjhb#include <ddb/ddb.h>
66164246Skmacystatic void	db_show_lockmgr(struct lock_object *lock);
67161322Sjhb#endif
68167368Sjhbstatic void	lock_lockmgr(struct lock_object *lock, int how);
69167368Sjhbstatic int	unlock_lockmgr(struct lock_object *lock);
70161322Sjhb
71164246Skmacystruct lock_class lock_class_lockmgr = {
72167366Sjhb	.lc_name = "lockmgr",
73167366Sjhb	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
74173733Sattilio	.lc_assert = assert_lockmgr,
75164246Skmacy#ifdef DDB
76167368Sjhb	.lc_ddb_show = db_show_lockmgr,
77164246Skmacy#endif
78167368Sjhb	.lc_lock = lock_lockmgr,
79167368Sjhb	.lc_unlock = unlock_lockmgr,
80164246Skmacy};
81164246Skmacy
8224269Speter/*
8324269Speter * Locking primitives implementation.
8424269Speter * Locks provide shared/exclusive sychronization.
8524269Speter */
8624269Speter
87167368Sjhbvoid
88173733Sattilioassert_lockmgr(struct lock_object *lock, int what)
89173733Sattilio{
90173733Sattilio
91173733Sattilio	panic("lockmgr locks do not support assertions");
92173733Sattilio}
93173733Sattilio
94173733Sattiliovoid
95167368Sjhblock_lockmgr(struct lock_object *lock, int how)
96167368Sjhb{
97167368Sjhb
98167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
99167368Sjhb}
100167368Sjhb
101167368Sjhbint
102167368Sjhbunlock_lockmgr(struct lock_object *lock)
103167368Sjhb{
104167368Sjhb
105167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
106167368Sjhb}
107167368Sjhb
108175166Sattilio#define	COUNT(td, x)	((td)->td_locks += (x))
10929653Sdyson#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
11029653Sdyson	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
11129653Sdyson
112167012Skmacystatic int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime);
11329653Sdysonstatic int acquiredrain(struct lock *lkp, int extflags) ;
11424269Speter
115144705Sjeffstatic __inline void
116144082Sjeffsharelock(struct thread *td, struct lock *lkp, int incr) {
11728345Sdyson	lkp->lk_flags |= LK_SHARE_NONZERO;
11828345Sdyson	lkp->lk_sharecount += incr;
119144082Sjeff	COUNT(td, incr);
12028345Sdyson}
12124269Speter
122144705Sjeffstatic __inline void
123144082Sjeffshareunlock(struct thread *td, struct lock *lkp, int decr) {
12442453Seivind
12542408Seivind	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
12624269Speter
127144082Sjeff	COUNT(td, -decr);
12834194Sdyson	if (lkp->lk_sharecount == decr) {
12928345Sdyson		lkp->lk_flags &= ~LK_SHARE_NONZERO;
13034194Sdyson		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
13134194Sdyson			wakeup(lkp);
13234194Sdyson		}
13334194Sdyson		lkp->lk_sharecount = 0;
13434194Sdyson	} else {
13534194Sdyson		lkp->lk_sharecount -= decr;
13634194Sdyson	}
13728345Sdyson}
13828345Sdyson
13928345Sdysonstatic int
140167012Skmacyacquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime)
141140711Sjeff{
142107414Smckusick	struct lock *lkp = *lkpp;
143144589Sjeff	int error;
144112106Sjhb	CTR3(KTR_LOCK,
145132587Srwatson	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
14666615Sjasone	    lkp, extflags, wanted);
14766615Sjasone
148144589Sjeff	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
14928345Sdyson		return EBUSY;
150144589Sjeff	error = 0;
151167012Skmacy	if ((lkp->lk_flags & wanted) != 0)
152167012Skmacy		lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
153167012Skmacy
15428345Sdyson	while ((lkp->lk_flags & wanted) != 0) {
155144589Sjeff		CTR2(KTR_LOCK,
156144589Sjeff		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
157144589Sjeff		    lkp, lkp->lk_flags);
15828345Sdyson		lkp->lk_flags |= LK_WAIT_NONZERO;
15928345Sdyson		lkp->lk_waitcount++;
16069432Sjake		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
16188318Sdillon		    lkp->lk_wmesg,
16288318Sdillon		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
163144589Sjeff		lkp->lk_waitcount--;
164144589Sjeff		if (lkp->lk_waitcount == 0)
16528345Sdyson			lkp->lk_flags &= ~LK_WAIT_NONZERO;
166144589Sjeff		if (error)
167144589Sjeff			break;
16828345Sdyson		if (extflags & LK_SLEEPFAIL) {
169144589Sjeff			error = ENOLCK;
170144589Sjeff			break;
17128345Sdyson		}
172107414Smckusick		if (lkp->lk_newlock != NULL) {
173107414Smckusick			mtx_lock(lkp->lk_newlock->lk_interlock);
174107414Smckusick			mtx_unlock(lkp->lk_interlock);
175107414Smckusick			if (lkp->lk_waitcount == 0)
176107414Smckusick				wakeup((void *)(&lkp->lk_newlock));
177107414Smckusick			*lkpp = lkp = lkp->lk_newlock;
178107414Smckusick		}
17928345Sdyson	}
180144589Sjeff	mtx_assert(lkp->lk_interlock, MA_OWNED);
181144589Sjeff	return (error);
18228345Sdyson}
18328345Sdyson
18424269Speter/*
18524269Speter * Set, change, or release a lock.
18624269Speter *
18724269Speter * Shared requests increment the shared count. Exclusive requests set the
18824269Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
18924269Speter * accepted shared locks and shared-to-exclusive upgrades to go away.
19024269Speter */
19124269Speterint
192175635Sattilio_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
193175635Sattilio    int line)
194164159Skmacy
19524269Speter{
196175635Sattilio	struct thread *td;
19724269Speter	int error;
19872227Sjhb	int extflags, lockflags;
199167012Skmacy	int contested = 0;
200167012Skmacy	uint64_t waitstart = 0;
201174948Sattilio
20224269Speter	error = 0;
203175635Sattilio	td = curthread;
20428345Sdyson
205111463Sjeff	if ((flags & LK_INTERNAL) == 0)
206111463Sjeff		mtx_lock(lkp->lk_interlock);
207140711Sjeff	CTR6(KTR_LOCK,
208140711Sjeff	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
209140711Sjeff	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
210140711Sjeff	    lkp->lk_exclusivecount, flags, td);
211148668Sjeff#ifdef DEBUG_LOCKS
212148668Sjeff	{
213148668Sjeff		struct stack stack; /* XXX */
214148668Sjeff		stack_save(&stack);
215149574Spjd		CTRSTACK(KTR_LOCK, &stack, 0, 1);
216148668Sjeff	}
217140711Sjeff#endif
218140711Sjeff
21975740Salfred	if (flags & LK_INTERLOCK) {
22076100Salfred		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
22172200Sbmilekic		mtx_unlock(interlkp);
22275740Salfred	}
22328345Sdyson
224111463Sjeff	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
225111883Sjhb		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
226167787Sjhb		    &lkp->lk_interlock->lock_object,
227111883Sjhb		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
228111463Sjeff
22981506Sjhb	if (panicstr != NULL) {
23081506Sjhb		mtx_unlock(lkp->lk_interlock);
23181506Sjhb		return (0);
23281506Sjhb	}
233144372Sjeff	if ((lkp->lk_flags & LK_NOSHARE) &&
234144372Sjeff	    (flags & LK_TYPE_MASK) == LK_SHARED) {
235144372Sjeff		flags &= ~LK_TYPE_MASK;
236144372Sjeff		flags |= LK_EXCLUSIVE;
237144372Sjeff	}
23824269Speter	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
23924269Speter
24024269Speter	switch (flags & LK_TYPE_MASK) {
24124269Speter
24224269Speter	case LK_SHARED:
24344681Sjulian		/*
24444681Sjulian		 * If we are not the exclusive lock holder, we have to block
24544681Sjulian		 * while there is an exclusive lock holder or while an
24644681Sjulian		 * exclusive lock request or upgrade request is in progress.
24744681Sjulian		 *
248130023Stjr		 * However, if TDP_DEADLKTREAT is set, we override exclusive
24944681Sjulian		 * lock requests or upgrade requests ( but not the exclusive
25044681Sjulian		 * lock itself ).
25144681Sjulian		 */
252175166Sattilio		if (lkp->lk_lockholder != td) {
25372227Sjhb			lockflags = LK_HAVE_EXCL;
254130023Stjr			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
25583420Sjhb				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
256167012Skmacy			error = acquire(&lkp, extflags, lockflags, &contested, &waitstart);
25724269Speter			if (error)
25824269Speter				break;
259144082Sjeff			sharelock(td, lkp, 1);
260164159Skmacy			if (lkp->lk_sharecount == 1)
261167012Skmacy				lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
262164159Skmacy
26397540Sjeff#if defined(DEBUG_LOCKS)
264148669Sjeff			stack_save(&lkp->lk_stack);
26597540Sjeff#endif
26624269Speter			break;
26724269Speter		}
26824269Speter		/*
26924269Speter		 * We hold an exclusive lock, so downgrade it to shared.
27024269Speter		 * An alternative would be to fail with EDEADLK.
27124269Speter		 */
272144082Sjeff		sharelock(td, lkp, 1);
273164159Skmacy		if (lkp->lk_sharecount == 1)
274167012Skmacy			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
275102412Scharnier		/* FALLTHROUGH downgrade */
27624269Speter
27724269Speter	case LK_DOWNGRADE:
278175166Sattilio		KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
27975472Salfred			("lockmgr: not holding exclusive lock "
280110414Sjulian			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
281175166Sattilio			lkp->lk_lockholder, td, lkp->lk_exclusivecount));
282144082Sjeff		sharelock(td, lkp, lkp->lk_exclusivecount);
283144082Sjeff		COUNT(td, -lkp->lk_exclusivecount);
28424269Speter		lkp->lk_exclusivecount = 0;
28524269Speter		lkp->lk_flags &= ~LK_HAVE_EXCL;
286110190Sjulian		lkp->lk_lockholder = LK_NOPROC;
28724269Speter		if (lkp->lk_waitcount)
28824269Speter			wakeup((void *)lkp);
28924269Speter		break;
29024269Speter
29124269Speter	case LK_UPGRADE:
29224269Speter		/*
29324269Speter		 * Upgrade a shared lock to an exclusive one. If another
29424269Speter		 * shared lock has already requested an upgrade to an
29524269Speter		 * exclusive lock, our shared lock is released and an
29624269Speter		 * exclusive lock is requested (which will be granted
29724269Speter		 * after the upgrade). If we return an error, the file
29824269Speter		 * will always be unlocked.
29924269Speter		 */
300175166Sattilio		if (lkp->lk_lockholder == td)
30124269Speter			panic("lockmgr: upgrade exclusive lock");
302144928Sjeff		if (lkp->lk_sharecount <= 0)
303144928Sjeff			panic("lockmgr: upgrade without shared");
304144082Sjeff		shareunlock(td, lkp, 1);
305164159Skmacy		if (lkp->lk_sharecount == 0)
306164159Skmacy			lock_profile_release_lock(&lkp->lk_object);
30724269Speter		/*
30824269Speter		 * If we are just polling, check to see if we will block.
30924269Speter		 */
31024269Speter		if ((extflags & LK_NOWAIT) &&
31124269Speter		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
31224269Speter		     lkp->lk_sharecount > 1)) {
31324269Speter			error = EBUSY;
31424269Speter			break;
31524269Speter		}
31624269Speter		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
31724269Speter			/*
31824269Speter			 * We are first shared lock to request an upgrade, so
31924269Speter			 * request upgrade and wait for the shared count to
32024269Speter			 * drop to zero, then take exclusive lock.
32124269Speter			 */
32224269Speter			lkp->lk_flags |= LK_WANT_UPGRADE;
323167012Skmacy			error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart);
32424269Speter			lkp->lk_flags &= ~LK_WANT_UPGRADE;
32534194Sdyson
326134365Skan			if (error) {
327134365Skan			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
328134365Skan			                   wakeup((void *)lkp);
329134365Skan			         break;
330134365Skan			}
331138203Sps			if (lkp->lk_exclusivecount != 0)
332138203Sps				panic("lockmgr: non-zero exclusive count");
33324269Speter			lkp->lk_flags |= LK_HAVE_EXCL;
334175166Sattilio			lkp->lk_lockholder = td;
33524269Speter			lkp->lk_exclusivecount = 1;
336144082Sjeff			COUNT(td, 1);
337167012Skmacy			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
33842900Seivind#if defined(DEBUG_LOCKS)
339148669Sjeff			stack_save(&lkp->lk_stack);
34042900Seivind#endif
34124269Speter			break;
34224269Speter		}
34324269Speter		/*
34424269Speter		 * Someone else has requested upgrade. Release our shared
34524269Speter		 * lock, awaken upgrade requestor if we are the last shared
34624269Speter		 * lock, then request an exclusive lock.
34724269Speter		 */
34828345Sdyson		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
34928345Sdyson			LK_WAIT_NONZERO)
35024269Speter			wakeup((void *)lkp);
351102412Scharnier		/* FALLTHROUGH exclusive request */
35224269Speter
35324269Speter	case LK_EXCLUSIVE:
354175166Sattilio		if (lkp->lk_lockholder == td) {
35524269Speter			/*
35624269Speter			 *	Recursive lock.
35724269Speter			 */
35848301Smckusick			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
35924269Speter				panic("lockmgr: locking against myself");
36048301Smckusick			if ((extflags & LK_CANRECURSE) != 0) {
36148301Smckusick				lkp->lk_exclusivecount++;
362144082Sjeff				COUNT(td, 1);
36348301Smckusick				break;
36448301Smckusick			}
36524269Speter		}
36624269Speter		/*
36724269Speter		 * If we are just polling, check to see if we will sleep.
36824269Speter		 */
36928345Sdyson		if ((extflags & LK_NOWAIT) &&
37028345Sdyson		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
37124269Speter			error = EBUSY;
37224269Speter			break;
37324269Speter		}
37424269Speter		/*
37524269Speter		 * Try to acquire the want_exclusive flag.
37624269Speter		 */
377167012Skmacy		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart);
37824269Speter		if (error)
37924269Speter			break;
38024269Speter		lkp->lk_flags |= LK_WANT_EXCL;
38124269Speter		/*
38224269Speter		 * Wait for shared locks and upgrades to finish.
38324269Speter		 */
384167012Skmacy		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart);
38524269Speter		lkp->lk_flags &= ~LK_WANT_EXCL;
386134365Skan		if (error) {
387134365Skan			if (lkp->lk_flags & LK_WAIT_NONZERO)
388134365Skan			         wakeup((void *)lkp);
38924269Speter			break;
390134365Skan		}
39124269Speter		lkp->lk_flags |= LK_HAVE_EXCL;
392175166Sattilio		lkp->lk_lockholder = td;
39324269Speter		if (lkp->lk_exclusivecount != 0)
39424269Speter			panic("lockmgr: non-zero exclusive count");
39524269Speter		lkp->lk_exclusivecount = 1;
396144082Sjeff		COUNT(td, 1);
397167012Skmacy		lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
39842900Seivind#if defined(DEBUG_LOCKS)
399148669Sjeff		stack_save(&lkp->lk_stack);
40042900Seivind#endif
40124269Speter		break;
40224269Speter
40324269Speter	case LK_RELEASE:
40424269Speter		if (lkp->lk_exclusivecount != 0) {
405175166Sattilio			if (lkp->lk_lockholder != td &&
40651702Sdillon			    lkp->lk_lockholder != LK_KERNPROC) {
407110414Sjulian				panic("lockmgr: thread %p, not %s %p unlocking",
408175166Sattilio				    td, "exclusive lock holder",
40924269Speter				    lkp->lk_lockholder);
41051702Sdillon			}
411144082Sjeff			if (lkp->lk_lockholder != LK_KERNPROC)
412144082Sjeff				COUNT(td, -1);
41334194Sdyson			if (lkp->lk_exclusivecount == 1) {
41424269Speter				lkp->lk_flags &= ~LK_HAVE_EXCL;
41524269Speter				lkp->lk_lockholder = LK_NOPROC;
41634194Sdyson				lkp->lk_exclusivecount = 0;
417164159Skmacy				lock_profile_release_lock(&lkp->lk_object);
41834194Sdyson			} else {
41934194Sdyson				lkp->lk_exclusivecount--;
42024269Speter			}
42171576Sjasone		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
422144082Sjeff			shareunlock(td, lkp, 1);
423149723Sssouhlal		else  {
424149723Sssouhlal			printf("lockmgr: thread %p unlocking unheld lock\n",
425175166Sattilio			    td);
426149723Sssouhlal			kdb_backtrace();
427149723Sssouhlal		}
428149723Sssouhlal
42928345Sdyson		if (lkp->lk_flags & LK_WAIT_NONZERO)
43024269Speter			wakeup((void *)lkp);
43124269Speter		break;
43224269Speter
43324269Speter	case LK_DRAIN:
43424269Speter		/*
43524269Speter		 * Check that we do not already hold the lock, as it can
43624269Speter		 * never drain if we do. Unfortunately, we have no way to
43724269Speter		 * check for holding a shared lock, but at least we can
43824269Speter		 * check for an exclusive one.
43924269Speter		 */
440175166Sattilio		if (lkp->lk_lockholder == td)
44124269Speter			panic("lockmgr: draining against myself");
44228345Sdyson
44328345Sdyson		error = acquiredrain(lkp, extflags);
44428345Sdyson		if (error)
44524269Speter			break;
44624269Speter		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
447175166Sattilio		lkp->lk_lockholder = td;
44824269Speter		lkp->lk_exclusivecount = 1;
449144082Sjeff		COUNT(td, 1);
45042900Seivind#if defined(DEBUG_LOCKS)
451148669Sjeff		stack_save(&lkp->lk_stack);
45242900Seivind#endif
45324269Speter		break;
45424269Speter
45524269Speter	default:
45672200Sbmilekic		mtx_unlock(lkp->lk_interlock);
45724269Speter		panic("lockmgr: unknown locktype request %d",
45824269Speter		    flags & LK_TYPE_MASK);
45924269Speter		/* NOTREACHED */
46024269Speter	}
46128345Sdyson	if ((lkp->lk_flags & LK_WAITDRAIN) &&
46228345Sdyson	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
46328345Sdyson		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
46424269Speter		lkp->lk_flags &= ~LK_WAITDRAIN;
46524269Speter		wakeup((void *)&lkp->lk_flags);
46624269Speter	}
46772200Sbmilekic	mtx_unlock(lkp->lk_interlock);
46824269Speter	return (error);
46924269Speter}
47024269Speter
47129653Sdysonstatic int
47229653Sdysonacquiredrain(struct lock *lkp, int extflags) {
47329653Sdyson	int error;
47429653Sdyson
47529653Sdyson	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
47629653Sdyson		return EBUSY;
47729653Sdyson	}
47829653Sdyson	while (lkp->lk_flags & LK_ALL) {
47929653Sdyson		lkp->lk_flags |= LK_WAITDRAIN;
48069432Sjake		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
48188318Sdillon			lkp->lk_wmesg,
48288318Sdillon			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
48329653Sdyson		if (error)
48429653Sdyson			return error;
48529653Sdyson		if (extflags & LK_SLEEPFAIL) {
48629653Sdyson			return ENOLCK;
48729653Sdyson		}
48829653Sdyson	}
48929653Sdyson	return 0;
49029653Sdyson}
49129653Sdyson
49224269Speter/*
49329653Sdyson * Initialize a lock; required before use.
49429653Sdyson */
49529653Sdysonvoid
49629653Sdysonlockinit(lkp, prio, wmesg, timo, flags)
49729653Sdyson	struct lock *lkp;
49829653Sdyson	int prio;
49991698Seivind	const char *wmesg;
50029653Sdyson	int timo;
50129653Sdyson	int flags;
50229653Sdyson{
503112106Sjhb	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
50466615Sjasone	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
50529653Sdyson
506117660Struckman	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
50767046Sjasone	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
50829653Sdyson	lkp->lk_sharecount = 0;
50929653Sdyson	lkp->lk_waitcount = 0;
51029653Sdyson	lkp->lk_exclusivecount = 0;
51129653Sdyson	lkp->lk_prio = prio;
51229653Sdyson	lkp->lk_timo = timo;
51329653Sdyson	lkp->lk_lockholder = LK_NOPROC;
514107414Smckusick	lkp->lk_newlock = NULL;
515105370Smckusick#ifdef DEBUG_LOCKS
516148669Sjeff	stack_zero(&lkp->lk_stack);
517105370Smckusick#endif
518168070Sjhb	lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL,
519168070Sjhb	    LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
52029653Sdyson}
52129653Sdyson
52229653Sdyson/*
52366615Sjasone * Destroy a lock.
52466615Sjasone */
52566615Sjasonevoid
52666615Sjasonelockdestroy(lkp)
52766615Sjasone	struct lock *lkp;
52866615Sjasone{
529169675Sjhb
530112106Sjhb	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
53166615Sjasone	    lkp, lkp->lk_wmesg);
532168070Sjhb	lock_destroy(&lkp->lk_object);
53366615Sjasone}
53466615Sjasone
53566615Sjasone/*
536175166Sattilio * Disown the lockmgr.
537175166Sattilio */
538175166Sattiliovoid
539175166Sattiliolockmgr_disown(struct lock *lkp)
540175166Sattilio{
541175166Sattilio	struct thread *td;
542175166Sattilio
543175166Sattilio	td = curthread;
544175229Sattilio	KASSERT(panicstr != NULL || lkp->lk_exclusivecount,
545175166Sattilio	    ("%s: %p lockmgr must be exclusively locked", __func__, lkp));
546175229Sattilio	KASSERT(panicstr != NULL || lkp->lk_lockholder == td ||
547175229Sattilio	    lkp->lk_lockholder == LK_KERNPROC,
548175166Sattilio	    ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
549175166Sattilio	    td));
550175166Sattilio
551175166Sattilio	/*
552175166Sattilio	 * Drop the lock reference and switch the owner.  This will result
553175166Sattilio	 * in an atomic operation like td_lock is only accessed by curthread
554175229Sattilio	 * and lk_lockholder only needs one write.  Note also that the lock
555175229Sattilio	 * owner can be alredy KERNPROC, so in that case just skip the
556175229Sattilio	 * decrement.
557175166Sattilio	 */
558175166Sattilio	if (lkp->lk_lockholder == td)
559175166Sattilio		td->td_locks--;
560175166Sattilio	lkp->lk_lockholder = LK_KERNPROC;
561175166Sattilio}
562175166Sattilio
563175166Sattilio/*
56429653Sdyson * Determine the status of a lock.
56529653Sdyson */
56629653Sdysonint
56783366Sjulianlockstatus(lkp, td)
56829653Sdyson	struct lock *lkp;
56983366Sjulian	struct thread *td;
57029653Sdyson{
57129653Sdyson	int lock_type = 0;
572150646Srwatson	int interlocked;
57329653Sdyson
574175635Sattilio	KASSERT(td == NULL || td == curthread,
575175635Sattilio	    ("%s: thread passed argument (%p) is not valid", __func__, td));
576175635Sattilio
577150646Srwatson	if (!kdb_active) {
578150646Srwatson		interlocked = 1;
579150646Srwatson		mtx_lock(lkp->lk_interlock);
580150646Srwatson	} else
581150646Srwatson		interlocked = 0;
58254444Seivind	if (lkp->lk_exclusivecount != 0) {
583110414Sjulian		if (td == NULL || lkp->lk_lockholder == td)
58454444Seivind			lock_type = LK_EXCLUSIVE;
58554444Seivind		else
58654444Seivind			lock_type = LK_EXCLOTHER;
58754444Seivind	} else if (lkp->lk_sharecount != 0)
58829653Sdyson		lock_type = LK_SHARED;
589150646Srwatson	if (interlocked)
590150646Srwatson		mtx_unlock(lkp->lk_interlock);
59129653Sdyson	return (lock_type);
59229653Sdyson}
59329653Sdyson
59429653Sdyson/*
595162941Stegge * Determine the number of waiters on a lock.
596162941Stegge */
597162941Steggeint
598162941Steggelockwaiters(lkp)
599162941Stegge	struct lock *lkp;
600162941Stegge{
601162941Stegge	int count;
602162941Stegge
603162941Stegge	mtx_lock(lkp->lk_interlock);
604162941Stegge	count = lkp->lk_waitcount;
605162941Stegge	mtx_unlock(lkp->lk_interlock);
606162941Stegge	return (count);
607162941Stegge}
608162941Stegge
609162941Stegge/*
61024269Speter * Print out information about state of a lock. Used by VOP_PRINT
61128569Sphk * routines to display status about contained locks.
61224269Speter */
61324271Spetervoid
61424269Speterlockmgr_printinfo(lkp)
61524269Speter	struct lock *lkp;
61624269Speter{
61724269Speter
61824269Speter	if (lkp->lk_sharecount)
61924269Speter		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
62024269Speter		    lkp->lk_sharecount);
62124269Speter	else if (lkp->lk_flags & LK_HAVE_EXCL)
622124163Skan		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
623124163Skan		    lkp->lk_wmesg, lkp->lk_exclusivecount,
624124163Skan		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
62524269Speter	if (lkp->lk_waitcount > 0)
62624269Speter		printf(" with %d pending", lkp->lk_waitcount);
627148669Sjeff#ifdef DEBUG_LOCKS
628174137Srwatson	stack_print_ddb(&lkp->lk_stack);
629148669Sjeff#endif
63024269Speter}
631161322Sjhb
632161322Sjhb#ifdef DDB
633161337Sjhb/*
634161337Sjhb * Check to see if a thread that is blocked on a sleep queue is actually
635161337Sjhb * blocked on a 'struct lock'.  If so, output some details and return true.
636161337Sjhb * If the lock has an exclusive owner, return that in *ownerp.
637161337Sjhb */
638161337Sjhbint
639161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
640161337Sjhb{
641161337Sjhb	struct lock *lkp;
642161337Sjhb
643161337Sjhb	lkp = td->td_wchan;
644161337Sjhb
645161337Sjhb	/* Simple test to see if wchan points to a lockmgr lock. */
646168070Sjhb	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
647168070Sjhb	    lkp->lk_wmesg == td->td_wmesg)
648167782Sjhb		goto ok;
649161337Sjhb
650167782Sjhb	/*
651167782Sjhb	 * If this thread is doing a DRAIN, then it would be asleep on
652167782Sjhb	 * &lkp->lk_flags rather than lkp.
653167782Sjhb	 */
654167782Sjhb	lkp = (struct lock *)((char *)td->td_wchan -
655167782Sjhb	    offsetof(struct lock, lk_flags));
656168070Sjhb	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
657168070Sjhb	    lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
658167782Sjhb		goto ok;
659167782Sjhb
660167782Sjhb	/* Doen't seem to be a lockmgr lock. */
661167782Sjhb	return (0);
662167782Sjhb
663167782Sjhbok:
664161337Sjhb	/* Ok, we think we have a lockmgr lock, so output some details. */
665161337Sjhb	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
666161337Sjhb	if (lkp->lk_sharecount) {
667161337Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
668161337Sjhb		*ownerp = NULL;
669161337Sjhb	} else {
670161337Sjhb		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
671161337Sjhb		*ownerp = lkp->lk_lockholder;
672161337Sjhb	}
673161337Sjhb	return (1);
674161337Sjhb}
675161337Sjhb
676164246Skmacyvoid
677164246Skmacydb_show_lockmgr(struct lock_object *lock)
678161322Sjhb{
679161322Sjhb	struct thread *td;
680161322Sjhb	struct lock *lkp;
681161322Sjhb
682164246Skmacy	lkp = (struct lock *)lock;
683161322Sjhb
684168070Sjhb	db_printf(" lock type: %s\n", lkp->lk_wmesg);
685168070Sjhb	db_printf(" state: ");
686161322Sjhb	if (lkp->lk_sharecount)
687161322Sjhb		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
688161322Sjhb	else if (lkp->lk_flags & LK_HAVE_EXCL) {
689161322Sjhb		td = lkp->lk_lockholder;
690161322Sjhb		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
691161322Sjhb		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
692173600Sjulian		    td->td_proc->p_pid, td->td_name);
693161322Sjhb	} else
694161322Sjhb		db_printf("UNLOCKED\n");
695161322Sjhb	if (lkp->lk_waitcount > 0)
696168070Sjhb		db_printf(" waiters: %d\n", lkp->lk_waitcount);
697161322Sjhb}
698161322Sjhb#endif
699