kern_lock.c revision 164246
1/*-
2 * Copyright (c) 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Copyright (C) 1997
6 *	John S. Dyson.  All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 164246 2006-11-13 05:41:46Z kmacy $");
45
46#include "opt_ddb.h"
47#include "opt_global.h"
48
49#include <sys/param.h>
50#include <sys/kdb.h>
51#include <sys/kernel.h>
52#include <sys/ktr.h>
53#include <sys/lock.h>
54#include <sys/lockmgr.h>
55#include <sys/mutex.h>
56#include <sys/proc.h>
57#include <sys/systm.h>
58#include <sys/lock_profile.h>
59#ifdef DEBUG_LOCKS
60#include <sys/stack.h>
61#endif
62
63#ifdef DDB
64#include <ddb/ddb.h>
65static void	db_show_lockmgr(struct lock_object *lock);
66#endif
67
68
69struct lock_class lock_class_lockmgr = {
70	"lockmgr",
71	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
72#ifdef DDB
73	db_show_lockmgr
74#endif
75};
76
77
78/*
79 * Locking primitives implementation.
80 * Locks provide shared/exclusive sychronization.
81 */
82
83#define	COUNT(td, x)	if ((td)) (td)->td_locks += (x)
84#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
85	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
86
87static int acquire(struct lock **lkpp, int extflags, int wanted);
88static int acquiredrain(struct lock *lkp, int extflags) ;
89
90static __inline void
91sharelock(struct thread *td, struct lock *lkp, int incr) {
92	lkp->lk_flags |= LK_SHARE_NONZERO;
93	lkp->lk_sharecount += incr;
94	COUNT(td, incr);
95}
96
97static __inline void
98shareunlock(struct thread *td, struct lock *lkp, int decr) {
99
100	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
101
102	COUNT(td, -decr);
103	if (lkp->lk_sharecount == decr) {
104		lkp->lk_flags &= ~LK_SHARE_NONZERO;
105		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
106			wakeup(lkp);
107		}
108		lkp->lk_sharecount = 0;
109	} else {
110		lkp->lk_sharecount -= decr;
111	}
112}
113
114static int
115acquire(struct lock **lkpp, int extflags, int wanted)
116{
117	struct lock *lkp = *lkpp;
118	int error;
119	CTR3(KTR_LOCK,
120	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
121	    lkp, extflags, wanted);
122
123	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
124		return EBUSY;
125	error = 0;
126	while ((lkp->lk_flags & wanted) != 0) {
127		CTR2(KTR_LOCK,
128		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
129		    lkp, lkp->lk_flags);
130		lkp->lk_flags |= LK_WAIT_NONZERO;
131		lkp->lk_waitcount++;
132		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
133		    lkp->lk_wmesg,
134		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
135		lkp->lk_waitcount--;
136		if (lkp->lk_waitcount == 0)
137			lkp->lk_flags &= ~LK_WAIT_NONZERO;
138		if (error)
139			break;
140		if (extflags & LK_SLEEPFAIL) {
141			error = ENOLCK;
142			break;
143		}
144		if (lkp->lk_newlock != NULL) {
145			mtx_lock(lkp->lk_newlock->lk_interlock);
146			mtx_unlock(lkp->lk_interlock);
147			if (lkp->lk_waitcount == 0)
148				wakeup((void *)(&lkp->lk_newlock));
149			*lkpp = lkp = lkp->lk_newlock;
150		}
151	}
152	mtx_assert(lkp->lk_interlock, MA_OWNED);
153	return (error);
154}
155
156/*
157 * Set, change, or release a lock.
158 *
159 * Shared requests increment the shared count. Exclusive requests set the
160 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
161 * accepted shared locks and shared-to-exclusive upgrades to go away.
162 */
163int
164_lockmgr(struct lock *lkp, int flags, struct mtx *interlkp,
165	 struct thread *td, char *file, int line)
166
167{
168	int error;
169	struct thread *thr;
170	int extflags, lockflags;
171	uint64_t waitstart;
172
173	error = 0;
174	if (td == NULL)
175		thr = LK_KERNPROC;
176	else
177		thr = td;
178
179	lock_profile_waitstart(&waitstart);
180	if ((flags & LK_INTERNAL) == 0)
181		mtx_lock(lkp->lk_interlock);
182	CTR6(KTR_LOCK,
183	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
184	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
185	    lkp->lk_exclusivecount, flags, td);
186#ifdef DEBUG_LOCKS
187	{
188		struct stack stack; /* XXX */
189		stack_save(&stack);
190		CTRSTACK(KTR_LOCK, &stack, 0, 1);
191	}
192#endif
193
194	if (flags & LK_INTERLOCK) {
195		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
196		mtx_unlock(interlkp);
197	}
198
199	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
200		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
201		    &lkp->lk_interlock->mtx_object,
202		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
203
204	if (panicstr != NULL) {
205		mtx_unlock(lkp->lk_interlock);
206		return (0);
207	}
208	if ((lkp->lk_flags & LK_NOSHARE) &&
209	    (flags & LK_TYPE_MASK) == LK_SHARED) {
210		flags &= ~LK_TYPE_MASK;
211		flags |= LK_EXCLUSIVE;
212	}
213	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
214
215	switch (flags & LK_TYPE_MASK) {
216
217	case LK_SHARED:
218		/*
219		 * If we are not the exclusive lock holder, we have to block
220		 * while there is an exclusive lock holder or while an
221		 * exclusive lock request or upgrade request is in progress.
222		 *
223		 * However, if TDP_DEADLKTREAT is set, we override exclusive
224		 * lock requests or upgrade requests ( but not the exclusive
225		 * lock itself ).
226		 */
227		if (lkp->lk_lockholder != thr) {
228			lockflags = LK_HAVE_EXCL;
229			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
230				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
231			error = acquire(&lkp, extflags, lockflags);
232			if (error)
233				break;
234			sharelock(td, lkp, 1);
235			if (lkp->lk_sharecount == 1)
236				lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
237
238#if defined(DEBUG_LOCKS)
239			stack_save(&lkp->lk_stack);
240#endif
241			break;
242		}
243		/*
244		 * We hold an exclusive lock, so downgrade it to shared.
245		 * An alternative would be to fail with EDEADLK.
246		 */
247		sharelock(td, lkp, 1);
248		if (lkp->lk_sharecount == 1)
249			lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
250		/* FALLTHROUGH downgrade */
251
252	case LK_DOWNGRADE:
253		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
254			("lockmgr: not holding exclusive lock "
255			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
256			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
257		sharelock(td, lkp, lkp->lk_exclusivecount);
258		COUNT(td, -lkp->lk_exclusivecount);
259		lkp->lk_exclusivecount = 0;
260		lkp->lk_flags &= ~LK_HAVE_EXCL;
261		lkp->lk_lockholder = LK_NOPROC;
262		if (lkp->lk_waitcount)
263			wakeup((void *)lkp);
264		break;
265
266	case LK_EXCLUPGRADE:
267		/*
268		 * If another process is ahead of us to get an upgrade,
269		 * then we want to fail rather than have an intervening
270		 * exclusive access.
271		 */
272		if (lkp->lk_flags & LK_WANT_UPGRADE) {
273			shareunlock(td, lkp, 1);
274			error = EBUSY;
275			break;
276		}
277		/* FALLTHROUGH normal upgrade */
278
279	case LK_UPGRADE:
280		/*
281		 * Upgrade a shared lock to an exclusive one. If another
282		 * shared lock has already requested an upgrade to an
283		 * exclusive lock, our shared lock is released and an
284		 * exclusive lock is requested (which will be granted
285		 * after the upgrade). If we return an error, the file
286		 * will always be unlocked.
287		 */
288		if (lkp->lk_lockholder == thr)
289			panic("lockmgr: upgrade exclusive lock");
290		if (lkp->lk_sharecount <= 0)
291			panic("lockmgr: upgrade without shared");
292		shareunlock(td, lkp, 1);
293		if (lkp->lk_sharecount == 0)
294			lock_profile_release_lock(&lkp->lk_object);
295		/*
296		 * If we are just polling, check to see if we will block.
297		 */
298		if ((extflags & LK_NOWAIT) &&
299		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
300		     lkp->lk_sharecount > 1)) {
301			error = EBUSY;
302			break;
303		}
304		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
305			/*
306			 * We are first shared lock to request an upgrade, so
307			 * request upgrade and wait for the shared count to
308			 * drop to zero, then take exclusive lock.
309			 */
310			lkp->lk_flags |= LK_WANT_UPGRADE;
311			error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
312			lkp->lk_flags &= ~LK_WANT_UPGRADE;
313
314			if (error) {
315			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
316			                   wakeup((void *)lkp);
317			         break;
318			}
319			if (lkp->lk_exclusivecount != 0)
320				panic("lockmgr: non-zero exclusive count");
321			lkp->lk_flags |= LK_HAVE_EXCL;
322			lkp->lk_lockholder = thr;
323			lkp->lk_exclusivecount = 1;
324			COUNT(td, 1);
325			lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
326#if defined(DEBUG_LOCKS)
327			stack_save(&lkp->lk_stack);
328#endif
329			break;
330		}
331		/*
332		 * Someone else has requested upgrade. Release our shared
333		 * lock, awaken upgrade requestor if we are the last shared
334		 * lock, then request an exclusive lock.
335		 */
336		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
337			LK_WAIT_NONZERO)
338			wakeup((void *)lkp);
339		/* FALLTHROUGH exclusive request */
340
341	case LK_EXCLUSIVE:
342		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
343			/*
344			 *	Recursive lock.
345			 */
346			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
347				panic("lockmgr: locking against myself");
348			if ((extflags & LK_CANRECURSE) != 0) {
349				lkp->lk_exclusivecount++;
350				COUNT(td, 1);
351				break;
352			}
353		}
354		/*
355		 * If we are just polling, check to see if we will sleep.
356		 */
357		if ((extflags & LK_NOWAIT) &&
358		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
359			error = EBUSY;
360			break;
361		}
362		/*
363		 * Try to acquire the want_exclusive flag.
364		 */
365		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
366		if (error)
367			break;
368		lkp->lk_flags |= LK_WANT_EXCL;
369		/*
370		 * Wait for shared locks and upgrades to finish.
371		 */
372		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
373		lkp->lk_flags &= ~LK_WANT_EXCL;
374		if (error) {
375			if (lkp->lk_flags & LK_WAIT_NONZERO)
376			         wakeup((void *)lkp);
377			break;
378		}
379		lkp->lk_flags |= LK_HAVE_EXCL;
380		lkp->lk_lockholder = thr;
381		if (lkp->lk_exclusivecount != 0)
382			panic("lockmgr: non-zero exclusive count");
383		lkp->lk_exclusivecount = 1;
384		COUNT(td, 1);
385		lock_profile_obtain_lock_success(&lkp->lk_object, waitstart, file, line);
386#if defined(DEBUG_LOCKS)
387		stack_save(&lkp->lk_stack);
388#endif
389		break;
390
391	case LK_RELEASE:
392		if (lkp->lk_exclusivecount != 0) {
393			if (lkp->lk_lockholder != thr &&
394			    lkp->lk_lockholder != LK_KERNPROC) {
395				panic("lockmgr: thread %p, not %s %p unlocking",
396				    thr, "exclusive lock holder",
397				    lkp->lk_lockholder);
398			}
399			if (lkp->lk_lockholder != LK_KERNPROC)
400				COUNT(td, -1);
401			if (lkp->lk_exclusivecount == 1) {
402				lkp->lk_flags &= ~LK_HAVE_EXCL;
403				lkp->lk_lockholder = LK_NOPROC;
404				lkp->lk_exclusivecount = 0;
405				lock_profile_release_lock(&lkp->lk_object);
406			} else {
407				lkp->lk_exclusivecount--;
408			}
409		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
410			shareunlock(td, lkp, 1);
411		else  {
412			printf("lockmgr: thread %p unlocking unheld lock\n",
413			    thr);
414			kdb_backtrace();
415		}
416
417		if (lkp->lk_flags & LK_WAIT_NONZERO)
418			wakeup((void *)lkp);
419		break;
420
421	case LK_DRAIN:
422		/*
423		 * Check that we do not already hold the lock, as it can
424		 * never drain if we do. Unfortunately, we have no way to
425		 * check for holding a shared lock, but at least we can
426		 * check for an exclusive one.
427		 */
428		if (lkp->lk_lockholder == thr)
429			panic("lockmgr: draining against myself");
430
431		error = acquiredrain(lkp, extflags);
432		if (error)
433			break;
434		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
435		lkp->lk_lockholder = thr;
436		lkp->lk_exclusivecount = 1;
437		COUNT(td, 1);
438#if defined(DEBUG_LOCKS)
439		stack_save(&lkp->lk_stack);
440#endif
441		break;
442
443	default:
444		mtx_unlock(lkp->lk_interlock);
445		panic("lockmgr: unknown locktype request %d",
446		    flags & LK_TYPE_MASK);
447		/* NOTREACHED */
448	}
449	if ((lkp->lk_flags & LK_WAITDRAIN) &&
450	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
451		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
452		lkp->lk_flags &= ~LK_WAITDRAIN;
453		wakeup((void *)&lkp->lk_flags);
454	}
455	mtx_unlock(lkp->lk_interlock);
456	return (error);
457}
458
459static int
460acquiredrain(struct lock *lkp, int extflags) {
461	int error;
462
463	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
464		return EBUSY;
465	}
466	while (lkp->lk_flags & LK_ALL) {
467		lkp->lk_flags |= LK_WAITDRAIN;
468		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
469			lkp->lk_wmesg,
470			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
471		if (error)
472			return error;
473		if (extflags & LK_SLEEPFAIL) {
474			return ENOLCK;
475		}
476	}
477	return 0;
478}
479
480/*
481 * Transfer any waiting processes from one lock to another.
482 */
483void
484transferlockers(from, to)
485	struct lock *from;
486	struct lock *to;
487{
488
489	KASSERT(from != to, ("lock transfer to self"));
490	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
491
492	mtx_lock(from->lk_interlock);
493	if (from->lk_waitcount == 0) {
494		mtx_unlock(from->lk_interlock);
495		return;
496	}
497	from->lk_newlock = to;
498	wakeup((void *)from);
499	msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
500	    "lkxfer", 0);
501	from->lk_newlock = NULL;
502	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
503	KASSERT(from->lk_waitcount == 0, ("active lock"));
504	mtx_unlock(from->lk_interlock);
505}
506
507
508/*
509 * Initialize a lock; required before use.
510 */
511void
512lockinit(lkp, prio, wmesg, timo, flags)
513	struct lock *lkp;
514	int prio;
515	const char *wmesg;
516	int timo;
517	int flags;
518{
519	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
520	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
521
522	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
523	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
524	lkp->lk_sharecount = 0;
525	lkp->lk_waitcount = 0;
526	lkp->lk_exclusivecount = 0;
527	lkp->lk_prio = prio;
528	lkp->lk_wmesg = wmesg;
529	lkp->lk_timo = timo;
530	lkp->lk_lockholder = LK_NOPROC;
531	lkp->lk_newlock = NULL;
532#ifdef DEBUG_LOCKS
533	stack_zero(&lkp->lk_stack);
534#endif
535	lock_profile_object_init(&lkp->lk_object, &lock_class_lockmgr, wmesg);
536}
537
538/*
539 * Destroy a lock.
540 */
541void
542lockdestroy(lkp)
543	struct lock *lkp;
544{
545	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
546	    lkp, lkp->lk_wmesg);
547	lock_profile_object_destroy(&lkp->lk_object);
548}
549
550/*
551 * Determine the status of a lock.
552 */
553int
554lockstatus(lkp, td)
555	struct lock *lkp;
556	struct thread *td;
557{
558	int lock_type = 0;
559	int interlocked;
560
561	if (!kdb_active) {
562		interlocked = 1;
563		mtx_lock(lkp->lk_interlock);
564	} else
565		interlocked = 0;
566	if (lkp->lk_exclusivecount != 0) {
567		if (td == NULL || lkp->lk_lockholder == td)
568			lock_type = LK_EXCLUSIVE;
569		else
570			lock_type = LK_EXCLOTHER;
571	} else if (lkp->lk_sharecount != 0)
572		lock_type = LK_SHARED;
573	if (interlocked)
574		mtx_unlock(lkp->lk_interlock);
575	return (lock_type);
576}
577
578/*
579 * Determine the number of holders of a lock.
580 */
581int
582lockcount(lkp)
583	struct lock *lkp;
584{
585	int count;
586
587	mtx_lock(lkp->lk_interlock);
588	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
589	mtx_unlock(lkp->lk_interlock);
590	return (count);
591}
592
593/*
594 * Determine the number of waiters on a lock.
595 */
596int
597lockwaiters(lkp)
598	struct lock *lkp;
599{
600	int count;
601
602	mtx_lock(lkp->lk_interlock);
603	count = lkp->lk_waitcount;
604	mtx_unlock(lkp->lk_interlock);
605	return (count);
606}
607
608/*
609 * Print out information about state of a lock. Used by VOP_PRINT
610 * routines to display status about contained locks.
611 */
612void
613lockmgr_printinfo(lkp)
614	struct lock *lkp;
615{
616
617	if (lkp->lk_sharecount)
618		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
619		    lkp->lk_sharecount);
620	else if (lkp->lk_flags & LK_HAVE_EXCL)
621		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
622		    lkp->lk_wmesg, lkp->lk_exclusivecount,
623		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
624	if (lkp->lk_waitcount > 0)
625		printf(" with %d pending", lkp->lk_waitcount);
626#ifdef DEBUG_LOCKS
627	stack_print(&lkp->lk_stack);
628#endif
629}
630
631#ifdef DDB
632/*
633 * Check to see if a thread that is blocked on a sleep queue is actually
634 * blocked on a 'struct lock'.  If so, output some details and return true.
635 * If the lock has an exclusive owner, return that in *ownerp.
636 */
637int
638lockmgr_chain(struct thread *td, struct thread **ownerp)
639{
640	struct lock *lkp;
641
642	lkp = td->td_wchan;
643
644	/* Simple test to see if wchan points to a lockmgr lock. */
645	if (lkp->lk_wmesg != td->td_wmesg)
646		return (0);
647
648	/* Ok, we think we have a lockmgr lock, so output some details. */
649	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
650	if (lkp->lk_sharecount) {
651		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
652		*ownerp = NULL;
653	} else {
654		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
655		*ownerp = lkp->lk_lockholder;
656	}
657	return (1);
658}
659
660void
661db_show_lockmgr(struct lock_object *lock)
662{
663	struct thread *td;
664	struct lock *lkp;
665
666	lkp = (struct lock *)lock;
667
668	db_printf("lock type: %s\n", lkp->lk_wmesg);
669	db_printf("state: ");
670	if (lkp->lk_sharecount)
671		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
672	else if (lkp->lk_flags & LK_HAVE_EXCL) {
673		td = lkp->lk_lockholder;
674		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
675		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
676		    td->td_proc->p_pid, td->td_proc->p_comm);
677	} else
678		db_printf("UNLOCKED\n");
679	if (lkp->lk_waitcount > 0)
680		db_printf("waiters: %d\n", lkp->lk_waitcount);
681}
682#endif
683