kern_lock.c revision 34194
124269Speter/*
224269Speter * Copyright (c) 1995
324269Speter *	The Regents of the University of California.  All rights reserved.
424269Speter *
528345Sdyson * Copyright (C) 1997
628345Sdyson *	John S. Dyson.  All rights reserved.
728345Sdyson *
824269Speter * This code contains ideas from software contributed to Berkeley by
924269Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
1024269Speter * System project at Carnegie-Mellon University.
1124269Speter *
1224269Speter * Redistribution and use in source and binary forms, with or without
1324269Speter * modification, are permitted provided that the following conditions
1424269Speter * are met:
1524269Speter * 1. Redistributions of source code must retain the above copyright
1624269Speter *    notice, this list of conditions and the following disclaimer.
1724269Speter * 2. Redistributions in binary form must reproduce the above copyright
1824269Speter *    notice, this list of conditions and the following disclaimer in the
1924269Speter *    documentation and/or other materials provided with the distribution.
2024269Speter * 3. All advertising materials mentioning features or use of this software
2124269Speter *    must display the following acknowledgement:
2224269Speter *	This product includes software developed by the University of
2324269Speter *	California, Berkeley and its contributors.
2424269Speter * 4. Neither the name of the University nor the names of its contributors
2524269Speter *    may be used to endorse or promote products derived from this software
2624269Speter *    without specific prior written permission.
2724269Speter *
2824269Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2924269Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3024269Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3124269Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3224269Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3324269Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3424269Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3524269Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3624269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3724269Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3824269Speter * SUCH DAMAGE.
3924269Speter *
4024269Speter *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
4134194Sdyson * $Id: kern_lock.c,v 1.17 1998/02/11 00:05:26 eivind Exp $
4224269Speter */
4324269Speter
4433232Seivind#include "opt_lint.h"
4533232Seivind
4624269Speter#include <sys/param.h>
4724269Speter#include <sys/proc.h>
4824269Speter#include <sys/lock.h>
4924273Speter#include <sys/systm.h>
5024269Speter
5124269Speter/*
5224269Speter * Locking primitives implementation.
5324269Speter * Locks provide shared/exclusive sychronization.
5424269Speter */
5524269Speter
5624274Speter#ifdef SIMPLELOCK_DEBUG
5724269Speter#define COUNT(p, x) if (p) (p)->p_locks += (x)
5824269Speter#else
5924269Speter#define COUNT(p, x)
6024269Speter#endif
6124269Speter
6228345Sdyson#define LOCK_WAIT_TIME 100
6328345Sdyson#define LOCK_SAMPLE_WAIT 7
6424269Speter
6528345Sdyson#if defined(DIAGNOSTIC)
6628345Sdyson#define LOCK_INLINE
6728345Sdyson#else
6828345Sdyson#define LOCK_INLINE inline
6928345Sdyson#endif
7024269Speter
7129653Sdyson#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
7229653Sdyson	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
7329653Sdyson
7428345Sdysonstatic int acquire(struct lock *lkp, int extflags, int wanted);
7529653Sdysonstatic int apause(struct lock *lkp, int flags);
7629653Sdysonstatic int acquiredrain(struct lock *lkp, int extflags) ;
7724269Speter
7828345Sdysonstatic LOCK_INLINE void
7928345Sdysonsharelock(struct lock *lkp, int incr) {
8028345Sdyson	lkp->lk_flags |= LK_SHARE_NONZERO;
8128345Sdyson	lkp->lk_sharecount += incr;
8228345Sdyson}
8324269Speter
8428345Sdysonstatic LOCK_INLINE void
8528345Sdysonshareunlock(struct lock *lkp, int decr) {
8628345Sdyson#if defined(DIAGNOSTIC)
8728345Sdyson	if (lkp->lk_sharecount < decr)
8828345Sdyson#if defined(DDB)
8928345Sdyson		Debugger("shareunlock: count < decr");
9028345Sdyson#else
9128345Sdyson		panic("shareunlock: count < decr");
9228345Sdyson#endif
9328345Sdyson#endif
9424269Speter
9534194Sdyson	if (lkp->lk_sharecount == decr) {
9628345Sdyson		lkp->lk_flags &= ~LK_SHARE_NONZERO;
9734194Sdyson		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
9834194Sdyson			wakeup(lkp);
9934194Sdyson		}
10034194Sdyson		lkp->lk_sharecount = 0;
10134194Sdyson	} else {
10234194Sdyson		lkp->lk_sharecount -= decr;
10334194Sdyson	}
10428345Sdyson}
10528345Sdyson
10629653Sdyson/*
10729653Sdyson * This is the waitloop optimization, and note for this to work
10829653Sdyson * simple_lock and simple_unlock should be subroutines to avoid
10929653Sdyson * optimization troubles.
11029653Sdyson */
11128345Sdysonstatic int
11228345Sdysonapause(struct lock *lkp, int flags) {
11328345Sdyson	int lock_wait;
11428345Sdyson	lock_wait = LOCK_WAIT_TIME;
11528345Sdyson	for (; lock_wait > 0; lock_wait--) {
11628345Sdyson		int i;
11728345Sdyson		if ((lkp->lk_flags & flags) == 0)
11828345Sdyson			return 0;
11928345Sdyson		simple_unlock(&lkp->lk_interlock);
12028345Sdyson		for (i = LOCK_SAMPLE_WAIT; i > 0; i--) {
12128345Sdyson			if ((lkp->lk_flags & flags) == 0) {
12228345Sdyson				simple_lock(&lkp->lk_interlock);
12328345Sdyson				if ((lkp->lk_flags & flags) == 0)
12428345Sdyson					return 0;
12528345Sdyson				break;
12628345Sdyson			}
12728345Sdyson		}
12824269Speter	}
12928345Sdyson	return 1;
13028345Sdyson}
13124269Speter
13228345Sdysonstatic int
13328345Sdysonacquire(struct lock *lkp, int extflags, int wanted) {
13434194Sdyson	int s, error;
13528345Sdyson
13628345Sdyson	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
13728345Sdyson		return EBUSY;
13828345Sdyson	}
13928345Sdyson
14029653Sdyson	if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) {
14129653Sdyson		error = apause(lkp, wanted);
14229653Sdyson		if (error == 0)
14329653Sdyson			return 0;
14429653Sdyson	}
14528345Sdyson
14634194Sdyson	s = splhigh();
14728345Sdyson	while ((lkp->lk_flags & wanted) != 0) {
14828345Sdyson		lkp->lk_flags |= LK_WAIT_NONZERO;
14928345Sdyson		lkp->lk_waitcount++;
15028345Sdyson		simple_unlock(&lkp->lk_interlock);
15128345Sdyson		error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo);
15228345Sdyson		simple_lock(&lkp->lk_interlock);
15334194Sdyson		if (lkp->lk_waitcount == 1) {
15428345Sdyson			lkp->lk_flags &= ~LK_WAIT_NONZERO;
15534194Sdyson			lkp->lk_waitcount = 0;
15634194Sdyson		} else {
15734194Sdyson			lkp->lk_waitcount--;
15834194Sdyson		}
15934194Sdyson		if (error) {
16034194Sdyson			splx(s);
16128345Sdyson			return error;
16234194Sdyson		}
16328345Sdyson		if (extflags & LK_SLEEPFAIL) {
16434194Sdyson			splx(s);
16528345Sdyson			return ENOLCK;
16628345Sdyson		}
16728345Sdyson	}
16834194Sdyson	splx(s);
16928345Sdyson	return 0;
17028345Sdyson}
17128345Sdyson
17224269Speter/*
17324269Speter * Set, change, or release a lock.
17424269Speter *
17524269Speter * Shared requests increment the shared count. Exclusive requests set the
17624269Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
17724269Speter * accepted shared locks and shared-to-exclusive upgrades to go away.
17824269Speter */
17924269Speterint
18024269Speterlockmgr(lkp, flags, interlkp, p)
18127894Sfsmp	struct lock *lkp;
18224269Speter	u_int flags;
18324269Speter	struct simplelock *interlkp;
18424269Speter	struct proc *p;
18524269Speter{
18624269Speter	int error;
18724269Speter	pid_t pid;
18824269Speter	int extflags;
18924269Speter
19024269Speter	error = 0;
19128345Sdyson	if (p == NULL)
19228393Sdyson		pid = LK_KERNPROC;
19328393Sdyson	else
19428393Sdyson		pid = p->p_pid;
19528345Sdyson
19624269Speter	simple_lock(&lkp->lk_interlock);
19724274Speter	if (flags & LK_INTERLOCK)
19824269Speter		simple_unlock(interlkp);
19928345Sdyson
20024269Speter	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
20124269Speter
20224269Speter	switch (flags & LK_TYPE_MASK) {
20324269Speter
20424269Speter	case LK_SHARED:
20524269Speter		if (lkp->lk_lockholder != pid) {
20628345Sdyson			error = acquire(lkp, extflags,
20728345Sdyson				LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
20824269Speter			if (error)
20924269Speter				break;
21028345Sdyson			sharelock(lkp, 1);
21124269Speter			COUNT(p, 1);
21224269Speter			break;
21324269Speter		}
21424269Speter		/*
21524269Speter		 * We hold an exclusive lock, so downgrade it to shared.
21624269Speter		 * An alternative would be to fail with EDEADLK.
21724269Speter		 */
21828345Sdyson		sharelock(lkp, 1);
21924269Speter		COUNT(p, 1);
22024269Speter		/* fall into downgrade */
22124269Speter
22224269Speter	case LK_DOWNGRADE:
22334194Sdyson#if !defined(MAX_PERF)
22424269Speter		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
22524269Speter			panic("lockmgr: not holding exclusive lock");
22634194Sdyson#endif
22728345Sdyson		sharelock(lkp, lkp->lk_exclusivecount);
22824269Speter		lkp->lk_exclusivecount = 0;
22924269Speter		lkp->lk_flags &= ~LK_HAVE_EXCL;
23024269Speter		lkp->lk_lockholder = LK_NOPROC;
23124269Speter		if (lkp->lk_waitcount)
23224269Speter			wakeup((void *)lkp);
23324269Speter		break;
23424269Speter
23524269Speter	case LK_EXCLUPGRADE:
23624269Speter		/*
23724269Speter		 * If another process is ahead of us to get an upgrade,
23824269Speter		 * then we want to fail rather than have an intervening
23924269Speter		 * exclusive access.
24024269Speter		 */
24124269Speter		if (lkp->lk_flags & LK_WANT_UPGRADE) {
24228345Sdyson			shareunlock(lkp, 1);
24324269Speter			COUNT(p, -1);
24424269Speter			error = EBUSY;
24524269Speter			break;
24624269Speter		}
24724269Speter		/* fall into normal upgrade */
24824269Speter
24924269Speter	case LK_UPGRADE:
25024269Speter		/*
25124269Speter		 * Upgrade a shared lock to an exclusive one. If another
25224269Speter		 * shared lock has already requested an upgrade to an
25324269Speter		 * exclusive lock, our shared lock is released and an
25424269Speter		 * exclusive lock is requested (which will be granted
25524269Speter		 * after the upgrade). If we return an error, the file
25624269Speter		 * will always be unlocked.
25724269Speter		 */
25834194Sdyson#if !defined(MAX_PERF)
25928345Sdyson		if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
26024269Speter			panic("lockmgr: upgrade exclusive lock");
26134194Sdyson#endif
26228345Sdyson		shareunlock(lkp, 1);
26324269Speter		COUNT(p, -1);
26424269Speter		/*
26524269Speter		 * If we are just polling, check to see if we will block.
26624269Speter		 */
26724269Speter		if ((extflags & LK_NOWAIT) &&
26824269Speter		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
26924269Speter		     lkp->lk_sharecount > 1)) {
27024269Speter			error = EBUSY;
27124269Speter			break;
27224269Speter		}
27324269Speter		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
27424269Speter			/*
27524269Speter			 * We are first shared lock to request an upgrade, so
27624269Speter			 * request upgrade and wait for the shared count to
27724269Speter			 * drop to zero, then take exclusive lock.
27824269Speter			 */
27924269Speter			lkp->lk_flags |= LK_WANT_UPGRADE;
28034194Sdyson			error = acquire(lkp, extflags, LK_SHARE_NONZERO);
28124269Speter			lkp->lk_flags &= ~LK_WANT_UPGRADE;
28234194Sdyson
28324269Speter			if (error)
28424269Speter				break;
28524269Speter			lkp->lk_flags |= LK_HAVE_EXCL;
28624269Speter			lkp->lk_lockholder = pid;
28734194Sdyson#if !defined(MAX_PERF)
28824269Speter			if (lkp->lk_exclusivecount != 0)
28924269Speter				panic("lockmgr: non-zero exclusive count");
29034194Sdyson#endif
29124269Speter			lkp->lk_exclusivecount = 1;
29224269Speter			COUNT(p, 1);
29324269Speter			break;
29424269Speter		}
29524269Speter		/*
29624269Speter		 * Someone else has requested upgrade. Release our shared
29724269Speter		 * lock, awaken upgrade requestor if we are the last shared
29824269Speter		 * lock, then request an exclusive lock.
29924269Speter		 */
30028345Sdyson		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
30128345Sdyson			LK_WAIT_NONZERO)
30224269Speter			wakeup((void *)lkp);
30324269Speter		/* fall into exclusive request */
30424269Speter
30524269Speter	case LK_EXCLUSIVE:
30624269Speter		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
30724269Speter			/*
30824269Speter			 *	Recursive lock.
30924269Speter			 */
31034194Sdyson#if !defined(MAX_PERF)
31124269Speter			if ((extflags & LK_CANRECURSE) == 0)
31224269Speter				panic("lockmgr: locking against myself");
31334194Sdyson#endif
31424269Speter			lkp->lk_exclusivecount++;
31524269Speter			COUNT(p, 1);
31624269Speter			break;
31724269Speter		}
31824269Speter		/*
31924269Speter		 * If we are just polling, check to see if we will sleep.
32024269Speter		 */
32128345Sdyson		if ((extflags & LK_NOWAIT) &&
32228345Sdyson		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
32324269Speter			error = EBUSY;
32424269Speter			break;
32524269Speter		}
32624269Speter		/*
32724269Speter		 * Try to acquire the want_exclusive flag.
32824269Speter		 */
32928345Sdyson		error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
33024269Speter		if (error)
33124269Speter			break;
33224269Speter		lkp->lk_flags |= LK_WANT_EXCL;
33324269Speter		/*
33424269Speter		 * Wait for shared locks and upgrades to finish.
33524269Speter		 */
33628345Sdyson		error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
33724269Speter		lkp->lk_flags &= ~LK_WANT_EXCL;
33824269Speter		if (error)
33924269Speter			break;
34024269Speter		lkp->lk_flags |= LK_HAVE_EXCL;
34124269Speter		lkp->lk_lockholder = pid;
34234194Sdyson#if !defined(MAX_PERF)
34324269Speter		if (lkp->lk_exclusivecount != 0)
34424269Speter			panic("lockmgr: non-zero exclusive count");
34534194Sdyson#endif
34624269Speter		lkp->lk_exclusivecount = 1;
34724269Speter		COUNT(p, 1);
34824269Speter		break;
34924269Speter
35024269Speter	case LK_RELEASE:
35124269Speter		if (lkp->lk_exclusivecount != 0) {
35234194Sdyson#if !defined(MAX_PERF)
35324269Speter			if (pid != lkp->lk_lockholder)
35424269Speter				panic("lockmgr: pid %d, not %s %d unlocking",
35524269Speter				    pid, "exclusive lock holder",
35624269Speter				    lkp->lk_lockholder);
35734194Sdyson#endif
35824269Speter			COUNT(p, -1);
35934194Sdyson			if (lkp->lk_exclusivecount == 1) {
36024269Speter				lkp->lk_flags &= ~LK_HAVE_EXCL;
36124269Speter				lkp->lk_lockholder = LK_NOPROC;
36234194Sdyson				lkp->lk_exclusivecount = 0;
36334194Sdyson			} else {
36434194Sdyson				lkp->lk_exclusivecount--;
36524269Speter			}
36628345Sdyson		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
36728345Sdyson			shareunlock(lkp, 1);
36824269Speter			COUNT(p, -1);
36924269Speter		}
37028345Sdyson		if (lkp->lk_flags & LK_WAIT_NONZERO)
37124269Speter			wakeup((void *)lkp);
37224269Speter		break;
37324269Speter
37424269Speter	case LK_DRAIN:
37524269Speter		/*
37624269Speter		 * Check that we do not already hold the lock, as it can
37724269Speter		 * never drain if we do. Unfortunately, we have no way to
37824269Speter		 * check for holding a shared lock, but at least we can
37924269Speter		 * check for an exclusive one.
38024269Speter		 */
38134194Sdyson#if !defined(MAX_PERF)
38224269Speter		if (lkp->lk_lockholder == pid)
38324269Speter			panic("lockmgr: draining against myself");
38434194Sdyson#endif
38528345Sdyson
38628345Sdyson		error = acquiredrain(lkp, extflags);
38728345Sdyson		if (error)
38824269Speter			break;
38924269Speter		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
39024269Speter		lkp->lk_lockholder = pid;
39124269Speter		lkp->lk_exclusivecount = 1;
39224269Speter		COUNT(p, 1);
39324269Speter		break;
39424269Speter
39524269Speter	default:
39634194Sdyson#if !defined(MAX_PERF)
39724269Speter		simple_unlock(&lkp->lk_interlock);
39824269Speter		panic("lockmgr: unknown locktype request %d",
39924269Speter		    flags & LK_TYPE_MASK);
40034194Sdyson#endif
40124269Speter		/* NOTREACHED */
40224269Speter	}
40328345Sdyson	if ((lkp->lk_flags & LK_WAITDRAIN) &&
40428345Sdyson	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
40528345Sdyson		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
40624269Speter		lkp->lk_flags &= ~LK_WAITDRAIN;
40724269Speter		wakeup((void *)&lkp->lk_flags);
40824269Speter	}
40924269Speter	simple_unlock(&lkp->lk_interlock);
41024269Speter	return (error);
41124269Speter}
41224269Speter
41329653Sdysonstatic int
41429653Sdysonacquiredrain(struct lock *lkp, int extflags) {
41529653Sdyson	int error;
41629653Sdyson
41729653Sdyson	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
41829653Sdyson		return EBUSY;
41929653Sdyson	}
42029653Sdyson
42129653Sdyson	error = apause(lkp, LK_ALL);
42229653Sdyson	if (error == 0)
42329653Sdyson		return 0;
42429653Sdyson
42529653Sdyson	while (lkp->lk_flags & LK_ALL) {
42629653Sdyson		lkp->lk_flags |= LK_WAITDRAIN;
42729653Sdyson		simple_unlock(&lkp->lk_interlock);
42829653Sdyson		error = tsleep(&lkp->lk_flags, lkp->lk_prio,
42929653Sdyson			lkp->lk_wmesg, lkp->lk_timo);
43029653Sdyson		simple_lock(&lkp->lk_interlock);
43129653Sdyson		if (error)
43229653Sdyson			return error;
43329653Sdyson		if (extflags & LK_SLEEPFAIL) {
43429653Sdyson			return ENOLCK;
43529653Sdyson		}
43629653Sdyson	}
43729653Sdyson	return 0;
43829653Sdyson}
43929653Sdyson
44024269Speter/*
44129653Sdyson * Initialize a lock; required before use.
44229653Sdyson */
44329653Sdysonvoid
44429653Sdysonlockinit(lkp, prio, wmesg, timo, flags)
44529653Sdyson	struct lock *lkp;
44629653Sdyson	int prio;
44729653Sdyson	char *wmesg;
44829653Sdyson	int timo;
44929653Sdyson	int flags;
45029653Sdyson{
45129653Sdyson
45229653Sdyson	simple_lock_init(&lkp->lk_interlock);
45329653Sdyson	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
45429653Sdyson	lkp->lk_sharecount = 0;
45529653Sdyson	lkp->lk_waitcount = 0;
45629653Sdyson	lkp->lk_exclusivecount = 0;
45729653Sdyson	lkp->lk_prio = prio;
45829653Sdyson	lkp->lk_wmesg = wmesg;
45929653Sdyson	lkp->lk_timo = timo;
46029653Sdyson	lkp->lk_lockholder = LK_NOPROC;
46129653Sdyson}
46229653Sdyson
46329653Sdyson/*
46429653Sdyson * Determine the status of a lock.
46529653Sdyson */
46629653Sdysonint
46729653Sdysonlockstatus(lkp)
46829653Sdyson	struct lock *lkp;
46929653Sdyson{
47029653Sdyson	int lock_type = 0;
47129653Sdyson
47229653Sdyson	simple_lock(&lkp->lk_interlock);
47329653Sdyson	if (lkp->lk_exclusivecount != 0)
47429653Sdyson		lock_type = LK_EXCLUSIVE;
47529653Sdyson	else if (lkp->lk_sharecount != 0)
47629653Sdyson		lock_type = LK_SHARED;
47729653Sdyson	simple_unlock(&lkp->lk_interlock);
47829653Sdyson	return (lock_type);
47929653Sdyson}
48029653Sdyson
48129653Sdyson/*
48224269Speter * Print out information about state of a lock. Used by VOP_PRINT
48328569Sphk * routines to display status about contained locks.
48424269Speter */
48524271Spetervoid
48624269Speterlockmgr_printinfo(lkp)
48724269Speter	struct lock *lkp;
48824269Speter{
48924269Speter
49024269Speter	if (lkp->lk_sharecount)
49124269Speter		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
49224269Speter		    lkp->lk_sharecount);
49324269Speter	else if (lkp->lk_flags & LK_HAVE_EXCL)
49424269Speter		printf(" lock type %s: EXCL (count %d) by pid %d",
49524269Speter		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
49624269Speter	if (lkp->lk_waitcount > 0)
49724269Speter		printf(" with %d pending", lkp->lk_waitcount);
49824269Speter}
49924269Speter
50033232Seivind#if defined(SIMPLELOCK_DEBUG) && (NCPUS == 1 || defined(COMPILING_LINT))
50124269Speter#include <sys/kernel.h>
50224269Speter#include <sys/sysctl.h>
50324480Sbde
50424480Sbdestatic int lockpausetime = 0;
50524480SbdeSYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, "");
50624480Sbde
50724269Speterint simplelockrecurse;
50824480Sbde
50924269Speter/*
51024269Speter * Simple lock functions so that the debugger can see from whence
51124269Speter * they are being called.
51224269Speter */
51324269Spetervoid
51424269Spetersimple_lock_init(alp)
51524269Speter	struct simplelock *alp;
51624269Speter{
51724269Speter
51824269Speter	alp->lock_data = 0;
51924269Speter}
52024269Speter
52124269Spetervoid
52224269Speter_simple_lock(alp, id, l)
52327894Sfsmp	struct simplelock *alp;
52424269Speter	const char *id;
52524269Speter	int l;
52624269Speter{
52724269Speter
52824269Speter	if (simplelockrecurse)
52924269Speter		return;
53024269Speter	if (alp->lock_data == 1) {
53124269Speter		if (lockpausetime == -1)
53224269Speter			panic("%s:%d: simple_lock: lock held", id, l);
53324269Speter		printf("%s:%d: simple_lock: lock held\n", id, l);
53424269Speter		if (lockpausetime == 1) {
53524271Speter			Debugger("simple_lock");
53624271Speter			/*BACKTRACE(curproc); */
53724269Speter		} else if (lockpausetime > 1) {
53824269Speter			printf("%s:%d: simple_lock: lock held...", id, l);
53924269Speter			tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
54024269Speter			    lockpausetime * hz);
54124269Speter			printf(" continuing\n");
54224269Speter		}
54324269Speter	}
54424269Speter	alp->lock_data = 1;
54524269Speter	if (curproc)
54624269Speter		curproc->p_simple_locks++;
54724269Speter}
54824269Speter
54924269Speterint
55024269Speter_simple_lock_try(alp, id, l)
55127894Sfsmp	struct simplelock *alp;
55224269Speter	const char *id;
55324269Speter	int l;
55424269Speter{
55524269Speter
55624269Speter	if (alp->lock_data)
55724269Speter		return (0);
55824269Speter	if (simplelockrecurse)
55924269Speter		return (1);
56024269Speter	alp->lock_data = 1;
56124269Speter	if (curproc)
56224269Speter		curproc->p_simple_locks++;
56324269Speter	return (1);
56424269Speter}
56524269Speter
56624269Spetervoid
56724269Speter_simple_unlock(alp, id, l)
56827894Sfsmp	struct simplelock *alp;
56924269Speter	const char *id;
57024269Speter	int l;
57124269Speter{
57224269Speter
57324269Speter	if (simplelockrecurse)
57424269Speter		return;
57524269Speter	if (alp->lock_data == 0) {
57624269Speter		if (lockpausetime == -1)
57724269Speter			panic("%s:%d: simple_unlock: lock not held", id, l);
57824269Speter		printf("%s:%d: simple_unlock: lock not held\n", id, l);
57924269Speter		if (lockpausetime == 1) {
58024271Speter			Debugger("simple_unlock");
58124271Speter			/* BACKTRACE(curproc); */
58224269Speter		} else if (lockpausetime > 1) {
58324269Speter			printf("%s:%d: simple_unlock: lock not held...", id, l);
58424269Speter			tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
58524269Speter			    lockpausetime * hz);
58624269Speter			printf(" continuing\n");
58724269Speter		}
58824269Speter	}
58924269Speter	alp->lock_data = 0;
59024269Speter	if (curproc)
59124269Speter		curproc->p_simple_locks--;
59224269Speter}
59333232Seivind#elif defined(SIMPLELOCK_DEBUG)
59433232Seivind#error "SIMPLELOCK_DEBUG is not compatible with SMP!"
59524271Speter#endif /* SIMPLELOCK_DEBUG && NCPUS == 1 */
596