kern_lock.c revision 167782
133965Sjdp/*-
233965Sjdp * Copyright (c) 1995
333965Sjdp *	The Regents of the University of California.  All rights reserved.
433965Sjdp *
533965Sjdp * Copyright (C) 1997
633965Sjdp *	John S. Dyson.  All rights reserved.
733965Sjdp *
833965Sjdp * This code contains ideas from software contributed to Berkeley by
933965Sjdp * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
1033965Sjdp * System project at Carnegie-Mellon University.
1133965Sjdp *
1233965Sjdp * Redistribution and use in source and binary forms, with or without
1333965Sjdp * modification, are permitted provided that the following conditions
1433965Sjdp * are met:
1533965Sjdp * 1. Redistributions of source code must retain the above copyright
1633965Sjdp *    notice, this list of conditions and the following disclaimer.
1733965Sjdp * 2. Redistributions in binary form must reproduce the above copyright
1833965Sjdp *    notice, this list of conditions and the following disclaimer in the
1933965Sjdp *    documentation and/or other materials provided with the distribution.
2033965Sjdp * 3. All advertising materials mentioning features or use of this software
2133965Sjdp *    must display the following acknowledgement:
2233965Sjdp *	This product includes software developed by the University of
2333965Sjdp *	California, Berkeley and its contributors.
2433965Sjdp * 4. Neither the name of the University nor the names of its contributors
2533965Sjdp *    may be used to endorse or promote products derived from this software
2633965Sjdp *    without specific prior written permission.
2733965Sjdp *
2833965Sjdp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2933965Sjdp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3033965Sjdp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3133965Sjdp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3233965Sjdp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3333965Sjdp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3433965Sjdp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3533965Sjdp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3633965Sjdp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3733965Sjdp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3833965Sjdp * SUCH DAMAGE.
3933965Sjdp *
4033965Sjdp *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
4133965Sjdp */
4233965Sjdp
4333965Sjdp#include <sys/cdefs.h>
4433965Sjdp__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 167782 2007-03-21 19:28:20Z jhb $");
4533965Sjdp
4638889Sjdp#include "opt_ddb.h"
4738889Sjdp#include "opt_global.h"
4833965Sjdp
4933965Sjdp#include <sys/param.h>
5033965Sjdp#include <sys/kdb.h>
5133965Sjdp#include <sys/kernel.h>
5233965Sjdp#include <sys/ktr.h>
5333965Sjdp#include <sys/lock.h>
5433965Sjdp#include <sys/lockmgr.h>
5533965Sjdp#include <sys/mutex.h>
5633965Sjdp#include <sys/proc.h>
5733965Sjdp#include <sys/systm.h>
5833965Sjdp#include <sys/lock_profile.h>
5933965Sjdp#ifdef DEBUG_LOCKS
6033965Sjdp#include <sys/stack.h>
6133965Sjdp#endif
6233965Sjdp
6333965Sjdp#ifdef DDB
6433965Sjdp#include <ddb/ddb.h>
6533965Sjdpstatic void	db_show_lockmgr(struct lock_object *lock);
6633965Sjdp#endif
6733965Sjdpstatic void	lock_lockmgr(struct lock_object *lock, int how);
6833965Sjdpstatic int	unlock_lockmgr(struct lock_object *lock);
6933965Sjdp
7033965Sjdpstruct lock_class lock_class_lockmgr = {
7133965Sjdp	.lc_name = "lockmgr",
7233965Sjdp	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
7333965Sjdp#ifdef DDB
7433965Sjdp	.lc_ddb_show = db_show_lockmgr,
7533965Sjdp#endif
7633965Sjdp	.lc_lock = lock_lockmgr,
7733965Sjdp	.lc_unlock = unlock_lockmgr,
7833965Sjdp};
7933965Sjdp
8033965Sjdp/*
8133965Sjdp * Locking primitives implementation.
8233965Sjdp * Locks provide shared/exclusive sychronization.
8333965Sjdp */
8433965Sjdp
8533965Sjdpvoid
8633965Sjdplock_lockmgr(struct lock_object *lock, int how)
8733965Sjdp{
8833965Sjdp
8933965Sjdp	panic("lockmgr locks do not support sleep interlocking");
9033965Sjdp}
9133965Sjdp
9233965Sjdpint
9333965Sjdpunlock_lockmgr(struct lock_object *lock)
9433965Sjdp{
9533965Sjdp
9633965Sjdp	panic("lockmgr locks do not support sleep interlocking");
9733965Sjdp}
9833965Sjdp
9933965Sjdp#define	COUNT(td, x)	if ((td)) (td)->td_locks += (x)
10033965Sjdp#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
10133965Sjdp	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
10233965Sjdp
10333965Sjdpstatic int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime);
10433965Sjdpstatic int acquiredrain(struct lock *lkp, int extflags) ;
10533965Sjdp
10633965Sjdpstatic __inline void
10733965Sjdpsharelock(struct thread *td, struct lock *lkp, int incr) {
10833965Sjdp	lkp->lk_flags |= LK_SHARE_NONZERO;
10933965Sjdp	lkp->lk_sharecount += incr;
11033965Sjdp	COUNT(td, incr);
11133965Sjdp}
11233965Sjdp
11333965Sjdpstatic __inline void
11433965Sjdpshareunlock(struct thread *td, struct lock *lkp, int decr) {
11533965Sjdp
11633965Sjdp	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
11733965Sjdp
11833965Sjdp	COUNT(td, -decr);
11933965Sjdp	if (lkp->lk_sharecount == decr) {
12033965Sjdp		lkp->lk_flags &= ~LK_SHARE_NONZERO;
12133965Sjdp		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
12233965Sjdp			wakeup(lkp);
12333965Sjdp		}
12433965Sjdp		lkp->lk_sharecount = 0;
12533965Sjdp	} else {
12633965Sjdp		lkp->lk_sharecount -= decr;
12733965Sjdp	}
12833965Sjdp}
12933965Sjdp
13033965Sjdpstatic int
13133965Sjdpacquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime)
13233965Sjdp{
13333965Sjdp	struct lock *lkp = *lkpp;
13433965Sjdp	int error;
13533965Sjdp	CTR3(KTR_LOCK,
13633965Sjdp	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
13733965Sjdp	    lkp, extflags, wanted);
13833965Sjdp
13933965Sjdp	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
14033965Sjdp		return EBUSY;
14133965Sjdp	error = 0;
14233965Sjdp	if ((lkp->lk_flags & wanted) != 0)
14333965Sjdp		lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
14433965Sjdp
14533965Sjdp	while ((lkp->lk_flags & wanted) != 0) {
14633965Sjdp		CTR2(KTR_LOCK,
14733965Sjdp		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
14833965Sjdp		    lkp, lkp->lk_flags);
14933965Sjdp		lkp->lk_flags |= LK_WAIT_NONZERO;
15033965Sjdp		lkp->lk_waitcount++;
15133965Sjdp		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
15233965Sjdp		    lkp->lk_wmesg,
15333965Sjdp		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
15433965Sjdp		lkp->lk_waitcount--;
15533965Sjdp		if (lkp->lk_waitcount == 0)
15633965Sjdp			lkp->lk_flags &= ~LK_WAIT_NONZERO;
15733965Sjdp		if (error)
15833965Sjdp			break;
15933965Sjdp		if (extflags & LK_SLEEPFAIL) {
16033965Sjdp			error = ENOLCK;
16133965Sjdp			break;
16233965Sjdp		}
16333965Sjdp		if (lkp->lk_newlock != NULL) {
16433965Sjdp			mtx_lock(lkp->lk_newlock->lk_interlock);
16533965Sjdp			mtx_unlock(lkp->lk_interlock);
16633965Sjdp			if (lkp->lk_waitcount == 0)
16733965Sjdp				wakeup((void *)(&lkp->lk_newlock));
16833965Sjdp			*lkpp = lkp = lkp->lk_newlock;
16933965Sjdp		}
17033965Sjdp	}
17133965Sjdp	mtx_assert(lkp->lk_interlock, MA_OWNED);
17233965Sjdp	return (error);
17333965Sjdp}
17433965Sjdp
17533965Sjdp/*
17633965Sjdp * Set, change, or release a lock.
17733965Sjdp *
17833965Sjdp * Shared requests increment the shared count. Exclusive requests set the
17933965Sjdp * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
18033965Sjdp * accepted shared locks and shared-to-exclusive upgrades to go away.
18133965Sjdp */
18233965Sjdpint
18333965Sjdp_lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
18433965Sjdp	 struct thread *td, char *file, int line)
18533965Sjdp
18633965Sjdp{
18733965Sjdp	int error;
18833965Sjdp	struct thread *thr;
18933965Sjdp	int extflags, lockflags;
19033965Sjdp	int contested = 0;
19133965Sjdp	uint64_t waitstart = 0;
19233965Sjdp
19333965Sjdp	error = 0;
19433965Sjdp	if (td == NULL)
19533965Sjdp		thr = LK_KERNPROC;
19633965Sjdp	else
19733965Sjdp		thr = td;
19833965Sjdp
19933965Sjdp	if ((flags & LK_INTERNAL) == 0)
20033965Sjdp		mtx_lock(lkp->lk_interlock);
20133965Sjdp	CTR6(KTR_LOCK,
20233965Sjdp	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
20333965Sjdp	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
20433965Sjdp	    lkp->lk_exclusivecount, flags, td);
20533965Sjdp#ifdef DEBUG_LOCKS
20633965Sjdp	{
20733965Sjdp		struct stack stack; /* XXX */
20833965Sjdp		stack_save(&stack);
20933965Sjdp		CTRSTACK(KTR_LOCK, &stack, 0, 1);
21033965Sjdp	}
21133965Sjdp#endif
21233965Sjdp
21333965Sjdp	if (flags & LK_INTERLOCK) {
21433965Sjdp		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
21533965Sjdp		mtx_unlock(interlkp);
21633965Sjdp	}
21733965Sjdp
21833965Sjdp	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
21933965Sjdp		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
22033965Sjdp		    &lkp->lk_interlock->mtx_object,
22133965Sjdp		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
22233965Sjdp
22333965Sjdp	if (panicstr != NULL) {
22433965Sjdp		mtx_unlock(lkp->lk_interlock);
22533965Sjdp		return (0);
22633965Sjdp	}
22733965Sjdp	if ((lkp->lk_flags & LK_NOSHARE) &&
22833965Sjdp	    (flags & LK_TYPE_MASK) == LK_SHARED) {
22933965Sjdp		flags &= ~LK_TYPE_MASK;
23033965Sjdp		flags |= LK_EXCLUSIVE;
23133965Sjdp	}
23233965Sjdp	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
23333965Sjdp
23433965Sjdp	switch (flags & LK_TYPE_MASK) {
23533965Sjdp
23633965Sjdp	case LK_SHARED:
23733965Sjdp		/*
23833965Sjdp		 * If we are not the exclusive lock holder, we have to block
23933965Sjdp		 * while there is an exclusive lock holder or while an
24033965Sjdp		 * exclusive lock request or upgrade request is in progress.
24133965Sjdp		 *
24233965Sjdp		 * However, if TDP_DEADLKTREAT is set, we override exclusive
24333965Sjdp		 * lock requests or upgrade requests ( but not the exclusive
24433965Sjdp		 * lock itself ).
24533965Sjdp		 */
24633965Sjdp		if (lkp->lk_lockholder != thr) {
24733965Sjdp			lockflags = LK_HAVE_EXCL;
24833965Sjdp			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
24933965Sjdp				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
25033965Sjdp			error = acquire(&lkp, extflags, lockflags, &contested, &waitstart);
25133965Sjdp			if (error)
25233965Sjdp				break;
25333965Sjdp			sharelock(td, lkp, 1);
25433965Sjdp			if (lkp->lk_sharecount == 1)
25533965Sjdp				lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
25633965Sjdp
25733965Sjdp#if defined(DEBUG_LOCKS)
25833965Sjdp			stack_save(&lkp->lk_stack);
25933965Sjdp#endif
26033965Sjdp			break;
26133965Sjdp		}
26233965Sjdp		/*
26333965Sjdp		 * We hold an exclusive lock, so downgrade it to shared.
26433965Sjdp		 * An alternative would be to fail with EDEADLK.
26533965Sjdp		 */
26633965Sjdp		sharelock(td, lkp, 1);
26733965Sjdp		if (lkp->lk_sharecount == 1)
26833965Sjdp			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
26933965Sjdp		/* FALLTHROUGH downgrade */
27033965Sjdp
27133965Sjdp	case LK_DOWNGRADE:
27233965Sjdp		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
27333965Sjdp			("lockmgr: not holding exclusive lock "
27433965Sjdp			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
27533965Sjdp			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
27633965Sjdp		sharelock(td, lkp, lkp->lk_exclusivecount);
27733965Sjdp		COUNT(td, -lkp->lk_exclusivecount);
27833965Sjdp		lkp->lk_exclusivecount = 0;
27933965Sjdp		lkp->lk_flags &= ~LK_HAVE_EXCL;
28033965Sjdp		lkp->lk_lockholder = LK_NOPROC;
28133965Sjdp		if (lkp->lk_waitcount)
28233965Sjdp			wakeup((void *)lkp);
28333965Sjdp		break;
28433965Sjdp
28533965Sjdp	case LK_EXCLUPGRADE:
28633965Sjdp		/*
28733965Sjdp		 * If another process is ahead of us to get an upgrade,
28833965Sjdp		 * then we want to fail rather than have an intervening
28933965Sjdp		 * exclusive access.
29033965Sjdp		 */
29133965Sjdp		if (lkp->lk_flags & LK_WANT_UPGRADE) {
29233965Sjdp			shareunlock(td, lkp, 1);
29333965Sjdp			error = EBUSY;
29433965Sjdp			break;
29533965Sjdp		}
29633965Sjdp		/* FALLTHROUGH normal upgrade */
29733965Sjdp
29833965Sjdp	case LK_UPGRADE:
29933965Sjdp		/*
30033965Sjdp		 * Upgrade a shared lock to an exclusive one. If another
301		 * shared lock has already requested an upgrade to an
302		 * exclusive lock, our shared lock is released and an
303		 * exclusive lock is requested (which will be granted
304		 * after the upgrade). If we return an error, the file
305		 * will always be unlocked.
306		 */
307		if (lkp->lk_lockholder == thr)
308			panic("lockmgr: upgrade exclusive lock");
309		if (lkp->lk_sharecount <= 0)
310			panic("lockmgr: upgrade without shared");
311		shareunlock(td, lkp, 1);
312		if (lkp->lk_sharecount == 0)
313			lock_profile_release_lock(&lkp->lk_object);
314		/*
315		 * If we are just polling, check to see if we will block.
316		 */
317		if ((extflags & LK_NOWAIT) &&
318		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
319		     lkp->lk_sharecount > 1)) {
320			error = EBUSY;
321			break;
322		}
323		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
324			/*
325			 * We are first shared lock to request an upgrade, so
326			 * request upgrade and wait for the shared count to
327			 * drop to zero, then take exclusive lock.
328			 */
329			lkp->lk_flags |= LK_WANT_UPGRADE;
330			error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart);
331			lkp->lk_flags &= ~LK_WANT_UPGRADE;
332
333			if (error) {
334			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
335			                   wakeup((void *)lkp);
336			         break;
337			}
338			if (lkp->lk_exclusivecount != 0)
339				panic("lockmgr: non-zero exclusive count");
340			lkp->lk_flags |= LK_HAVE_EXCL;
341			lkp->lk_lockholder = thr;
342			lkp->lk_exclusivecount = 1;
343			COUNT(td, 1);
344			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
345#if defined(DEBUG_LOCKS)
346			stack_save(&lkp->lk_stack);
347#endif
348			break;
349		}
350		/*
351		 * Someone else has requested upgrade. Release our shared
352		 * lock, awaken upgrade requestor if we are the last shared
353		 * lock, then request an exclusive lock.
354		 */
355		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
356			LK_WAIT_NONZERO)
357			wakeup((void *)lkp);
358		/* FALLTHROUGH exclusive request */
359
360	case LK_EXCLUSIVE:
361		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
362			/*
363			 *	Recursive lock.
364			 */
365			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
366				panic("lockmgr: locking against myself");
367			if ((extflags & LK_CANRECURSE) != 0) {
368				lkp->lk_exclusivecount++;
369				COUNT(td, 1);
370				break;
371			}
372		}
373		/*
374		 * If we are just polling, check to see if we will sleep.
375		 */
376		if ((extflags & LK_NOWAIT) &&
377		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
378			error = EBUSY;
379			break;
380		}
381		/*
382		 * Try to acquire the want_exclusive flag.
383		 */
384		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart);
385		if (error)
386			break;
387		lkp->lk_flags |= LK_WANT_EXCL;
388		/*
389		 * Wait for shared locks and upgrades to finish.
390		 */
391		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart);
392		lkp->lk_flags &= ~LK_WANT_EXCL;
393		if (error) {
394			if (lkp->lk_flags & LK_WAIT_NONZERO)
395			         wakeup((void *)lkp);
396			break;
397		}
398		lkp->lk_flags |= LK_HAVE_EXCL;
399		lkp->lk_lockholder = thr;
400		if (lkp->lk_exclusivecount != 0)
401			panic("lockmgr: non-zero exclusive count");
402		lkp->lk_exclusivecount = 1;
403		COUNT(td, 1);
404		lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
405#if defined(DEBUG_LOCKS)
406		stack_save(&lkp->lk_stack);
407#endif
408		break;
409
410	case LK_RELEASE:
411		if (lkp->lk_exclusivecount != 0) {
412			if (lkp->lk_lockholder != thr &&
413			    lkp->lk_lockholder != LK_KERNPROC) {
414				panic("lockmgr: thread %p, not %s %p unlocking",
415				    thr, "exclusive lock holder",
416				    lkp->lk_lockholder);
417			}
418			if (lkp->lk_lockholder != LK_KERNPROC)
419				COUNT(td, -1);
420			if (lkp->lk_exclusivecount == 1) {
421				lkp->lk_flags &= ~LK_HAVE_EXCL;
422				lkp->lk_lockholder = LK_NOPROC;
423				lkp->lk_exclusivecount = 0;
424				lock_profile_release_lock(&lkp->lk_object);
425			} else {
426				lkp->lk_exclusivecount--;
427			}
428		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
429			shareunlock(td, lkp, 1);
430		else  {
431			printf("lockmgr: thread %p unlocking unheld lock\n",
432			    thr);
433			kdb_backtrace();
434		}
435
436		if (lkp->lk_flags & LK_WAIT_NONZERO)
437			wakeup((void *)lkp);
438		break;
439
440	case LK_DRAIN:
441		/*
442		 * Check that we do not already hold the lock, as it can
443		 * never drain if we do. Unfortunately, we have no way to
444		 * check for holding a shared lock, but at least we can
445		 * check for an exclusive one.
446		 */
447		if (lkp->lk_lockholder == thr)
448			panic("lockmgr: draining against myself");
449
450		error = acquiredrain(lkp, extflags);
451		if (error)
452			break;
453		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
454		lkp->lk_lockholder = thr;
455		lkp->lk_exclusivecount = 1;
456		COUNT(td, 1);
457#if defined(DEBUG_LOCKS)
458		stack_save(&lkp->lk_stack);
459#endif
460		break;
461
462	default:
463		mtx_unlock(lkp->lk_interlock);
464		panic("lockmgr: unknown locktype request %d",
465		    flags & LK_TYPE_MASK);
466		/* NOTREACHED */
467	}
468	if ((lkp->lk_flags & LK_WAITDRAIN) &&
469	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
470		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
471		lkp->lk_flags &= ~LK_WAITDRAIN;
472		wakeup((void *)&lkp->lk_flags);
473	}
474	mtx_unlock(lkp->lk_interlock);
475	return (error);
476}
477
478static int
479acquiredrain(struct lock *lkp, int extflags) {
480	int error;
481
482	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
483		return EBUSY;
484	}
485	while (lkp->lk_flags & LK_ALL) {
486		lkp->lk_flags |= LK_WAITDRAIN;
487		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
488			lkp->lk_wmesg,
489			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
490		if (error)
491			return error;
492		if (extflags & LK_SLEEPFAIL) {
493			return ENOLCK;
494		}
495	}
496	return 0;
497}
498
499/*
500 * Transfer any waiting processes from one lock to another.
501 */
502void
503transferlockers(from, to)
504	struct lock *from;
505	struct lock *to;
506{
507
508	KASSERT(from != to, ("lock transfer to self"));
509	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
510
511	mtx_lock(from->lk_interlock);
512	if (from->lk_waitcount == 0) {
513		mtx_unlock(from->lk_interlock);
514		return;
515	}
516	from->lk_newlock = to;
517	wakeup((void *)from);
518	msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
519	    "lkxfer", 0);
520	from->lk_newlock = NULL;
521	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
522	KASSERT(from->lk_waitcount == 0, ("active lock"));
523	mtx_unlock(from->lk_interlock);
524}
525
526
527/*
528 * Initialize a lock; required before use.
529 */
530void
531lockinit(lkp, prio, wmesg, timo, flags)
532	struct lock *lkp;
533	int prio;
534	const char *wmesg;
535	int timo;
536	int flags;
537{
538	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
539	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
540
541	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
542	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
543	lkp->lk_sharecount = 0;
544	lkp->lk_waitcount = 0;
545	lkp->lk_exclusivecount = 0;
546	lkp->lk_prio = prio;
547	lkp->lk_wmesg = wmesg;
548	lkp->lk_timo = timo;
549	lkp->lk_lockholder = LK_NOPROC;
550	lkp->lk_newlock = NULL;
551#ifdef DEBUG_LOCKS
552	stack_zero(&lkp->lk_stack);
553#endif
554	lock_profile_object_init(&lkp->lk_object, &lock_class_lockmgr, wmesg);
555}
556
557/*
558 * Destroy a lock.
559 */
560void
561lockdestroy(lkp)
562	struct lock *lkp;
563{
564	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
565	    lkp, lkp->lk_wmesg);
566	lock_profile_object_destroy(&lkp->lk_object);
567}
568
569/*
570 * Determine the status of a lock.
571 */
572int
573lockstatus(lkp, td)
574	struct lock *lkp;
575	struct thread *td;
576{
577	int lock_type = 0;
578	int interlocked;
579
580	if (!kdb_active) {
581		interlocked = 1;
582		mtx_lock(lkp->lk_interlock);
583	} else
584		interlocked = 0;
585	if (lkp->lk_exclusivecount != 0) {
586		if (td == NULL || lkp->lk_lockholder == td)
587			lock_type = LK_EXCLUSIVE;
588		else
589			lock_type = LK_EXCLOTHER;
590	} else if (lkp->lk_sharecount != 0)
591		lock_type = LK_SHARED;
592	if (interlocked)
593		mtx_unlock(lkp->lk_interlock);
594	return (lock_type);
595}
596
597/*
598 * Determine the number of holders of a lock.
599 */
600int
601lockcount(lkp)
602	struct lock *lkp;
603{
604	int count;
605
606	mtx_lock(lkp->lk_interlock);
607	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
608	mtx_unlock(lkp->lk_interlock);
609	return (count);
610}
611
612/*
613 * Determine the number of waiters on a lock.
614 */
615int
616lockwaiters(lkp)
617	struct lock *lkp;
618{
619	int count;
620
621	mtx_lock(lkp->lk_interlock);
622	count = lkp->lk_waitcount;
623	mtx_unlock(lkp->lk_interlock);
624	return (count);
625}
626
627/*
628 * Print out information about state of a lock. Used by VOP_PRINT
629 * routines to display status about contained locks.
630 */
631void
632lockmgr_printinfo(lkp)
633	struct lock *lkp;
634{
635
636	if (lkp->lk_sharecount)
637		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
638		    lkp->lk_sharecount);
639	else if (lkp->lk_flags & LK_HAVE_EXCL)
640		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
641		    lkp->lk_wmesg, lkp->lk_exclusivecount,
642		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
643	if (lkp->lk_waitcount > 0)
644		printf(" with %d pending", lkp->lk_waitcount);
645#ifdef DEBUG_LOCKS
646	stack_print(&lkp->lk_stack);
647#endif
648}
649
650#ifdef DDB
651/*
652 * Check to see if a thread that is blocked on a sleep queue is actually
653 * blocked on a 'struct lock'.  If so, output some details and return true.
654 * If the lock has an exclusive owner, return that in *ownerp.
655 */
656int
657lockmgr_chain(struct thread *td, struct thread **ownerp)
658{
659	struct lock *lkp;
660
661	lkp = td->td_wchan;
662
663	/* Simple test to see if wchan points to a lockmgr lock. */
664	if (lkp->lk_wmesg == td->td_wmesg)
665		goto ok;
666
667	/*
668	 * If this thread is doing a DRAIN, then it would be asleep on
669	 * &lkp->lk_flags rather than lkp.
670	 */
671	lkp = (struct lock *)((char *)td->td_wchan -
672	    offsetof(struct lock, lk_flags));
673	if (lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
674		goto ok;
675
676	/* Doen't seem to be a lockmgr lock. */
677	return (0);
678
679ok:
680	/* Ok, we think we have a lockmgr lock, so output some details. */
681	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
682	if (lkp->lk_sharecount) {
683		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
684		*ownerp = NULL;
685	} else {
686		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
687		*ownerp = lkp->lk_lockholder;
688	}
689	return (1);
690}
691
692void
693db_show_lockmgr(struct lock_object *lock)
694{
695	struct thread *td;
696	struct lock *lkp;
697
698	lkp = (struct lock *)lock;
699
700	db_printf("lock type: %s\n", lkp->lk_wmesg);
701	db_printf("state: ");
702	if (lkp->lk_sharecount)
703		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
704	else if (lkp->lk_flags & LK_HAVE_EXCL) {
705		td = lkp->lk_lockholder;
706		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
707		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
708		    td->td_proc->p_pid, td->td_proc->p_comm);
709	} else
710		db_printf("UNLOCKED\n");
711	if (lkp->lk_waitcount > 0)
712		db_printf("waiters: %d\n", lkp->lk_waitcount);
713}
714#endif
715