kern_lock.c revision 24269
124269Speter/*
224269Speter * Copyright (c) 1995
324269Speter *	The Regents of the University of California.  All rights reserved.
424269Speter *
524269Speter * This code contains ideas from software contributed to Berkeley by
624269Speter * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
724269Speter * System project at Carnegie-Mellon University.
824269Speter *
924269Speter * Redistribution and use in source and binary forms, with or without
1024269Speter * modification, are permitted provided that the following conditions
1124269Speter * are met:
1224269Speter * 1. Redistributions of source code must retain the above copyright
1324269Speter *    notice, this list of conditions and the following disclaimer.
1424269Speter * 2. Redistributions in binary form must reproduce the above copyright
1524269Speter *    notice, this list of conditions and the following disclaimer in the
1624269Speter *    documentation and/or other materials provided with the distribution.
1724269Speter * 3. All advertising materials mentioning features or use of this software
1824269Speter *    must display the following acknowledgement:
1924269Speter *	This product includes software developed by the University of
2024269Speter *	California, Berkeley and its contributors.
2124269Speter * 4. Neither the name of the University nor the names of its contributors
2224269Speter *    may be used to endorse or promote products derived from this software
2324269Speter *    without specific prior written permission.
2424269Speter *
2524269Speter * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2624269Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2724269Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2824269Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2924269Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3024269Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3124269Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3224269Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3324269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3424269Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3524269Speter * SUCH DAMAGE.
3624269Speter *
3724269Speter *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
3824269Speter */
3924269Speter
4024269Speter#include <sys/param.h>
4124269Speter#include <sys/proc.h>
4224269Speter#include <sys/lock.h>
4324269Speter#include <machine/cpu.h>
4424269Speter
4524269Speter/*
4624269Speter * Locking primitives implementation.
4724269Speter * Locks provide shared/exclusive sychronization.
4824269Speter */
4924269Speter
5024269Speter#ifdef DEBUG
5124269Speter#define COUNT(p, x) if (p) (p)->p_locks += (x)
5224269Speter#else
5324269Speter#define COUNT(p, x)
5424269Speter#endif
5524269Speter
5624269Speter#if NCPUS > 1
5724269Speter
5824269Speter/*
5924269Speter * For multiprocessor system, try spin lock first.
6024269Speter *
6124269Speter * This should be inline expanded below, but we cannot have #if
6224269Speter * inside a multiline define.
6324269Speter */
6424269Speterint lock_wait_time = 100;
6524269Speter#define PAUSE(lkp, wanted)						\
6624269Speter		if (lock_wait_time > 0) {				\
6724269Speter			int i;						\
6824269Speter									\
6924269Speter			simple_unlock(&lkp->lk_interlock);		\
7024269Speter			for (i = lock_wait_time; i > 0; i--)		\
7124269Speter				if (!(wanted))				\
7224269Speter					break;				\
7324269Speter			simple_lock(&lkp->lk_interlock);		\
7424269Speter		}							\
7524269Speter		if (!(wanted))						\
7624269Speter			break;
7724269Speter
7824269Speter#else /* NCPUS == 1 */
7924269Speter
8024269Speter/*
8124269Speter * It is an error to spin on a uniprocessor as nothing will ever cause
8224269Speter * the simple lock to clear while we are executing.
8324269Speter */
8424269Speter#define PAUSE(lkp, wanted)
8524269Speter
8624269Speter#endif /* NCPUS == 1 */
8724269Speter
8824269Speter/*
8924269Speter * Acquire a resource.
9024269Speter */
9124269Speter#define ACQUIRE(lkp, error, extflags, wanted)				\
9224269Speter	PAUSE(lkp, wanted);						\
9324269Speter	for (error = 0; wanted; ) {					\
9424269Speter		(lkp)->lk_waitcount++;					\
9524269Speter		simple_unlock(&(lkp)->lk_interlock);			\
9624269Speter		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
9724269Speter		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
9824269Speter		simple_lock(&(lkp)->lk_interlock);			\
9924269Speter		(lkp)->lk_waitcount--;					\
10024269Speter		if (error)						\
10124269Speter			break;						\
10224269Speter		if ((extflags) & LK_SLEEPFAIL) {			\
10324269Speter			error = ENOLCK;					\
10424269Speter			break;						\
10524269Speter		}							\
10624269Speter	}
10724269Speter
10824269Speter/*
10924269Speter * Initialize a lock; required before use.
11024269Speter */
11124269Spetervoid
11224269Speterlockinit(lkp, prio, wmesg, timo, flags)
11324269Speter	struct lock *lkp;
11424269Speter	int prio;
11524269Speter	char *wmesg;
11624269Speter	int timo;
11724269Speter	int flags;
11824269Speter{
11924269Speter
12024269Speter	bzero(lkp, sizeof(struct lock));
12124269Speter	simple_lock_init(&lkp->lk_interlock);
12224269Speter	lkp->lk_flags = flags & LK_EXTFLG_MASK;
12324269Speter	lkp->lk_prio = prio;
12424269Speter	lkp->lk_timo = timo;
12524269Speter	lkp->lk_wmesg = wmesg;
12624269Speter	lkp->lk_lockholder = LK_NOPROC;
12724269Speter}
12824269Speter
12924269Speter/*
13024269Speter * Determine the status of a lock.
13124269Speter */
13224269Speterint
13324269Speterlockstatus(lkp)
13424269Speter	struct lock *lkp;
13524269Speter{
13624269Speter	int lock_type = 0;
13724269Speter
13824269Speter	simple_lock(&lkp->lk_interlock);
13924269Speter	if (lkp->lk_exclusivecount != 0)
14024269Speter		lock_type = LK_EXCLUSIVE;
14124269Speter	else if (lkp->lk_sharecount != 0)
14224269Speter		lock_type = LK_SHARED;
14324269Speter	simple_unlock(&lkp->lk_interlock);
14424269Speter	return (lock_type);
14524269Speter}
14624269Speter
14724269Speter/*
14824269Speter * Set, change, or release a lock.
14924269Speter *
15024269Speter * Shared requests increment the shared count. Exclusive requests set the
15124269Speter * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
15224269Speter * accepted shared locks and shared-to-exclusive upgrades to go away.
15324269Speter */
15424269Speterint
15524269Speterlockmgr(lkp, flags, interlkp, p)
15624269Speter	__volatile struct lock *lkp;
15724269Speter	u_int flags;
15824269Speter	struct simplelock *interlkp;
15924269Speter	struct proc *p;
16024269Speter{
16124269Speter	int error;
16224269Speter	pid_t pid;
16324269Speter	int extflags;
16424269Speter
16524269Speter	error = 0;
16624269Speter	if (p)
16724269Speter		pid = p->p_pid;
16824269Speter	else
16924269Speter		pid = LK_KERNPROC;
17024269Speter	simple_lock(&lkp->lk_interlock);
17124269Speter	if (flags & LK_INTERLOCK)
17224269Speter		simple_unlock(interlkp);
17324269Speter	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
17424269Speter#ifdef DIAGNOSTIC
17524269Speter	/*
17624269Speter	 * Once a lock has drained, the LK_DRAINING flag is set and an
17724269Speter	 * exclusive lock is returned. The only valid operation thereafter
17824269Speter	 * is a single release of that exclusive lock. This final release
17924269Speter	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
18024269Speter	 * further requests of any sort will result in a panic. The bits
18124269Speter	 * selected for these two flags are chosen so that they will be set
18224269Speter	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
18324269Speter	 * The final release is permitted to give a new lease on life to
18424269Speter	 * the lock by specifying LK_REENABLE.
18524269Speter	 */
18624269Speter	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
18724269Speter		if (lkp->lk_flags & LK_DRAINED)
18824269Speter			panic("lockmgr: using decommissioned lock");
18924269Speter		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
19024269Speter		    lkp->lk_lockholder != pid)
19124269Speter			panic("lockmgr: non-release on draining lock: %d\n",
19224269Speter			    flags & LK_TYPE_MASK);
19324269Speter		lkp->lk_flags &= ~LK_DRAINING;
19424269Speter		if ((flags & LK_REENABLE) == 0)
19524269Speter			lkp->lk_flags |= LK_DRAINED;
19624269Speter	}
19724269Speter#endif DIAGNOSTIC
19824269Speter
19924269Speter	switch (flags & LK_TYPE_MASK) {
20024269Speter
20124269Speter	case LK_SHARED:
20224269Speter		if (lkp->lk_lockholder != pid) {
20324269Speter			/*
20424269Speter			 * If just polling, check to see if we will block.
20524269Speter			 */
20624269Speter			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
20724269Speter			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
20824269Speter				error = EBUSY;
20924269Speter				break;
21024269Speter			}
21124269Speter			/*
21224269Speter			 * Wait for exclusive locks and upgrades to clear.
21324269Speter			 */
21424269Speter			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
21524269Speter			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
21624269Speter			if (error)
21724269Speter				break;
21824269Speter			lkp->lk_sharecount++;
21924269Speter			COUNT(p, 1);
22024269Speter			break;
22124269Speter		}
22224269Speter		/*
22324269Speter		 * We hold an exclusive lock, so downgrade it to shared.
22424269Speter		 * An alternative would be to fail with EDEADLK.
22524269Speter		 */
22624269Speter		lkp->lk_sharecount++;
22724269Speter		COUNT(p, 1);
22824269Speter		/* fall into downgrade */
22924269Speter
23024269Speter	case LK_DOWNGRADE:
23124269Speter		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
23224269Speter			panic("lockmgr: not holding exclusive lock");
23324269Speter		lkp->lk_sharecount += lkp->lk_exclusivecount;
23424269Speter		lkp->lk_exclusivecount = 0;
23524269Speter		lkp->lk_flags &= ~LK_HAVE_EXCL;
23624269Speter		lkp->lk_lockholder = LK_NOPROC;
23724269Speter		if (lkp->lk_waitcount)
23824269Speter			wakeup((void *)lkp);
23924269Speter		break;
24024269Speter
24124269Speter	case LK_EXCLUPGRADE:
24224269Speter		/*
24324269Speter		 * If another process is ahead of us to get an upgrade,
24424269Speter		 * then we want to fail rather than have an intervening
24524269Speter		 * exclusive access.
24624269Speter		 */
24724269Speter		if (lkp->lk_flags & LK_WANT_UPGRADE) {
24824269Speter			lkp->lk_sharecount--;
24924269Speter			COUNT(p, -1);
25024269Speter			error = EBUSY;
25124269Speter			break;
25224269Speter		}
25324269Speter		/* fall into normal upgrade */
25424269Speter
25524269Speter	case LK_UPGRADE:
25624269Speter		/*
25724269Speter		 * Upgrade a shared lock to an exclusive one. If another
25824269Speter		 * shared lock has already requested an upgrade to an
25924269Speter		 * exclusive lock, our shared lock is released and an
26024269Speter		 * exclusive lock is requested (which will be granted
26124269Speter		 * after the upgrade). If we return an error, the file
26224269Speter		 * will always be unlocked.
26324269Speter		 */
26424269Speter		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
26524269Speter			panic("lockmgr: upgrade exclusive lock");
26624269Speter		lkp->lk_sharecount--;
26724269Speter		COUNT(p, -1);
26824269Speter		/*
26924269Speter		 * If we are just polling, check to see if we will block.
27024269Speter		 */
27124269Speter		if ((extflags & LK_NOWAIT) &&
27224269Speter		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
27324269Speter		     lkp->lk_sharecount > 1)) {
27424269Speter			error = EBUSY;
27524269Speter			break;
27624269Speter		}
27724269Speter		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
27824269Speter			/*
27924269Speter			 * We are first shared lock to request an upgrade, so
28024269Speter			 * request upgrade and wait for the shared count to
28124269Speter			 * drop to zero, then take exclusive lock.
28224269Speter			 */
28324269Speter			lkp->lk_flags |= LK_WANT_UPGRADE;
28424269Speter			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
28524269Speter			lkp->lk_flags &= ~LK_WANT_UPGRADE;
28624269Speter			if (error)
28724269Speter				break;
28824269Speter			lkp->lk_flags |= LK_HAVE_EXCL;
28924269Speter			lkp->lk_lockholder = pid;
29024269Speter			if (lkp->lk_exclusivecount != 0)
29124269Speter				panic("lockmgr: non-zero exclusive count");
29224269Speter			lkp->lk_exclusivecount = 1;
29324269Speter			COUNT(p, 1);
29424269Speter			break;
29524269Speter		}
29624269Speter		/*
29724269Speter		 * Someone else has requested upgrade. Release our shared
29824269Speter		 * lock, awaken upgrade requestor if we are the last shared
29924269Speter		 * lock, then request an exclusive lock.
30024269Speter		 */
30124269Speter		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
30224269Speter			wakeup((void *)lkp);
30324269Speter		/* fall into exclusive request */
30424269Speter
30524269Speter	case LK_EXCLUSIVE:
30624269Speter		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
30724269Speter			/*
30824269Speter			 *	Recursive lock.
30924269Speter			 */
31024269Speter			if ((extflags & LK_CANRECURSE) == 0)
31124269Speter				panic("lockmgr: locking against myself");
31224269Speter			lkp->lk_exclusivecount++;
31324269Speter			COUNT(p, 1);
31424269Speter			break;
31524269Speter		}
31624269Speter		/*
31724269Speter		 * If we are just polling, check to see if we will sleep.
31824269Speter		 */
31924269Speter		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
32024269Speter		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
32124269Speter		     lkp->lk_sharecount != 0)) {
32224269Speter			error = EBUSY;
32324269Speter			break;
32424269Speter		}
32524269Speter		/*
32624269Speter		 * Try to acquire the want_exclusive flag.
32724269Speter		 */
32824269Speter		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
32924269Speter		    (LK_HAVE_EXCL | LK_WANT_EXCL));
33024269Speter		if (error)
33124269Speter			break;
33224269Speter		lkp->lk_flags |= LK_WANT_EXCL;
33324269Speter		/*
33424269Speter		 * Wait for shared locks and upgrades to finish.
33524269Speter		 */
33624269Speter		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
33724269Speter		       (lkp->lk_flags & LK_WANT_UPGRADE));
33824269Speter		lkp->lk_flags &= ~LK_WANT_EXCL;
33924269Speter		if (error)
34024269Speter			break;
34124269Speter		lkp->lk_flags |= LK_HAVE_EXCL;
34224269Speter		lkp->lk_lockholder = pid;
34324269Speter		if (lkp->lk_exclusivecount != 0)
34424269Speter			panic("lockmgr: non-zero exclusive count");
34524269Speter		lkp->lk_exclusivecount = 1;
34624269Speter		COUNT(p, 1);
34724269Speter		break;
34824269Speter
34924269Speter	case LK_RELEASE:
35024269Speter		if (lkp->lk_exclusivecount != 0) {
35124269Speter			if (pid != lkp->lk_lockholder)
35224269Speter				panic("lockmgr: pid %d, not %s %d unlocking",
35324269Speter				    pid, "exclusive lock holder",
35424269Speter				    lkp->lk_lockholder);
35524269Speter			lkp->lk_exclusivecount--;
35624269Speter			COUNT(p, -1);
35724269Speter			if (lkp->lk_exclusivecount == 0) {
35824269Speter				lkp->lk_flags &= ~LK_HAVE_EXCL;
35924269Speter				lkp->lk_lockholder = LK_NOPROC;
36024269Speter			}
36124269Speter		} else if (lkp->lk_sharecount != 0) {
36224269Speter			lkp->lk_sharecount--;
36324269Speter			COUNT(p, -1);
36424269Speter		}
36524269Speter		if (lkp->lk_waitcount)
36624269Speter			wakeup((void *)lkp);
36724269Speter		break;
36824269Speter
36924269Speter	case LK_DRAIN:
37024269Speter		/*
37124269Speter		 * Check that we do not already hold the lock, as it can
37224269Speter		 * never drain if we do. Unfortunately, we have no way to
37324269Speter		 * check for holding a shared lock, but at least we can
37424269Speter		 * check for an exclusive one.
37524269Speter		 */
37624269Speter		if (lkp->lk_lockholder == pid)
37724269Speter			panic("lockmgr: draining against myself");
37824269Speter		/*
37924269Speter		 * If we are just polling, check to see if we will sleep.
38024269Speter		 */
38124269Speter		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
38224269Speter		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
38324269Speter		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
38424269Speter			error = EBUSY;
38524269Speter			break;
38624269Speter		}
38724269Speter		PAUSE(lkp, ((lkp->lk_flags &
38824269Speter		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
38924269Speter		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
39024269Speter		for (error = 0; ((lkp->lk_flags &
39124269Speter		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
39224269Speter		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
39324269Speter			lkp->lk_flags |= LK_WAITDRAIN;
39424269Speter			simple_unlock(&lkp->lk_interlock);
39524269Speter			if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
39624269Speter			    lkp->lk_wmesg, lkp->lk_timo))
39724269Speter				return (error);
39824269Speter			if ((extflags) & LK_SLEEPFAIL)
39924269Speter				return (ENOLCK);
40024269Speter			simple_lock(&lkp->lk_interlock);
40124269Speter		}
40224269Speter		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
40324269Speter		lkp->lk_lockholder = pid;
40424269Speter		lkp->lk_exclusivecount = 1;
40524269Speter		COUNT(p, 1);
40624269Speter		break;
40724269Speter
40824269Speter	default:
40924269Speter		simple_unlock(&lkp->lk_interlock);
41024269Speter		panic("lockmgr: unknown locktype request %d",
41124269Speter		    flags & LK_TYPE_MASK);
41224269Speter		/* NOTREACHED */
41324269Speter	}
41424269Speter	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
41524269Speter	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
41624269Speter	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
41724269Speter		lkp->lk_flags &= ~LK_WAITDRAIN;
41824269Speter		wakeup((void *)&lkp->lk_flags);
41924269Speter	}
42024269Speter	simple_unlock(&lkp->lk_interlock);
42124269Speter	return (error);
42224269Speter}
42324269Speter
42424269Speter/*
42524269Speter * Print out information about state of a lock. Used by VOP_PRINT
42624269Speter * routines to display ststus about contained locks.
42724269Speter */
42824269Speterlockmgr_printinfo(lkp)
42924269Speter	struct lock *lkp;
43024269Speter{
43124269Speter
43224269Speter	if (lkp->lk_sharecount)
43324269Speter		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
43424269Speter		    lkp->lk_sharecount);
43524269Speter	else if (lkp->lk_flags & LK_HAVE_EXCL)
43624269Speter		printf(" lock type %s: EXCL (count %d) by pid %d",
43724269Speter		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
43824269Speter	if (lkp->lk_waitcount > 0)
43924269Speter		printf(" with %d pending", lkp->lk_waitcount);
44024269Speter}
44124269Speter
44224269Speter#if defined(DEBUG) && NCPUS == 1
44324269Speter#include <sys/kernel.h>
44424269Speter#include <vm/vm.h>
44524269Speter#include <sys/sysctl.h>
44624269Speterint lockpausetime = 0;
44724269Speterstruct ctldebug debug2 = { "lockpausetime", &lockpausetime };
44824269Speterint simplelockrecurse;
44924269Speter/*
45024269Speter * Simple lock functions so that the debugger can see from whence
45124269Speter * they are being called.
45224269Speter */
45324269Spetervoid
45424269Spetersimple_lock_init(alp)
45524269Speter	struct simplelock *alp;
45624269Speter{
45724269Speter
45824269Speter	alp->lock_data = 0;
45924269Speter}
46024269Speter
46124269Spetervoid
46224269Speter_simple_lock(alp, id, l)
46324269Speter	__volatile struct simplelock *alp;
46424269Speter	const char *id;
46524269Speter	int l;
46624269Speter{
46724269Speter
46824269Speter	if (simplelockrecurse)
46924269Speter		return;
47024269Speter	if (alp->lock_data == 1) {
47124269Speter		if (lockpausetime == -1)
47224269Speter			panic("%s:%d: simple_lock: lock held", id, l);
47324269Speter		printf("%s:%d: simple_lock: lock held\n", id, l);
47424269Speter		if (lockpausetime == 1) {
47524269Speter			BACKTRACE(curproc);
47624269Speter		} else if (lockpausetime > 1) {
47724269Speter			printf("%s:%d: simple_lock: lock held...", id, l);
47824269Speter			tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
47924269Speter			    lockpausetime * hz);
48024269Speter			printf(" continuing\n");
48124269Speter		}
48224269Speter	}
48324269Speter	alp->lock_data = 1;
48424269Speter	if (curproc)
48524269Speter		curproc->p_simple_locks++;
48624269Speter}
48724269Speter
48824269Speterint
48924269Speter_simple_lock_try(alp, id, l)
49024269Speter	__volatile struct simplelock *alp;
49124269Speter	const char *id;
49224269Speter	int l;
49324269Speter{
49424269Speter
49524269Speter	if (alp->lock_data)
49624269Speter		return (0);
49724269Speter	if (simplelockrecurse)
49824269Speter		return (1);
49924269Speter	alp->lock_data = 1;
50024269Speter	if (curproc)
50124269Speter		curproc->p_simple_locks++;
50224269Speter	return (1);
50324269Speter}
50424269Speter
50524269Spetervoid
50624269Speter_simple_unlock(alp, id, l)
50724269Speter	__volatile struct simplelock *alp;
50824269Speter	const char *id;
50924269Speter	int l;
51024269Speter{
51124269Speter
51224269Speter	if (simplelockrecurse)
51324269Speter		return;
51424269Speter	if (alp->lock_data == 0) {
51524269Speter		if (lockpausetime == -1)
51624269Speter			panic("%s:%d: simple_unlock: lock not held", id, l);
51724269Speter		printf("%s:%d: simple_unlock: lock not held\n", id, l);
51824269Speter		if (lockpausetime == 1) {
51924269Speter			BACKTRACE(curproc);
52024269Speter		} else if (lockpausetime > 1) {
52124269Speter			printf("%s:%d: simple_unlock: lock not held...", id, l);
52224269Speter			tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
52324269Speter			    lockpausetime * hz);
52424269Speter			printf(" continuing\n");
52524269Speter		}
52624269Speter	}
52724269Speter	alp->lock_data = 0;
52824269Speter	if (curproc)
52924269Speter		curproc->p_simple_locks--;
53024269Speter}
53124269Speter#endif /* DEBUG && NCPUS == 1 */
532