kern_lock.c revision 144222
1/*-
2 * Copyright (c) 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Copyright (C) 1997
6 *	John S. Dyson.  All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 144222 2005-03-28 12:52:10Z jeff $");
45
46#include <sys/param.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/lockmgr.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/systm.h>
54
55/*
56 * Locking primitives implementation.
57 * Locks provide shared/exclusive sychronization.
58 */
59
60#define LOCK_WAIT_TIME 100
61#define LOCK_SAMPLE_WAIT 7
62
63#if defined(DIAGNOSTIC)
64#define LOCK_INLINE
65#else
66#define LOCK_INLINE __inline
67#endif
68
69#define	COUNT(td, x)	if ((td)) (td)->td_locks += (x)
70
71#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
72	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
73
74/*
75 * Mutex array variables.  Rather than each lockmgr lock having its own mutex,
76 * share a fixed (at boot time) number of mutexes across all lockmgr locks in
77 * order to keep sizeof(struct lock) down.
78 */
79static struct mtx lock_mtx;
80
81static int acquire(struct lock **lkpp, int extflags, int wanted);
82static int apause(struct lock *lkp, int flags);
83static int acquiredrain(struct lock *lkp, int extflags) ;
84
85static void
86lockmgr_init(void *dummy __unused)
87{
88	mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
89}
90SYSINIT(lmgrinit, SI_SUB_LOCKMGR, SI_ORDER_FIRST, lockmgr_init, NULL)
91
92static LOCK_INLINE void
93sharelock(struct thread *td, struct lock *lkp, int incr) {
94	lkp->lk_flags |= LK_SHARE_NONZERO;
95	lkp->lk_sharecount += incr;
96	COUNT(td, incr);
97}
98
99static LOCK_INLINE void
100shareunlock(struct thread *td, struct lock *lkp, int decr) {
101
102	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
103
104	COUNT(td, -decr);
105	if (lkp->lk_sharecount == decr) {
106		lkp->lk_flags &= ~LK_SHARE_NONZERO;
107		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
108			wakeup(lkp);
109		}
110		lkp->lk_sharecount = 0;
111	} else {
112		lkp->lk_sharecount -= decr;
113	}
114}
115
116/*
117 * This is the waitloop optimization.
118 */
119static int
120apause(struct lock *lkp, int flags)
121{
122#ifdef SMP
123	int i, lock_wait;
124#endif
125
126	if ((lkp->lk_flags & flags) == 0)
127		return 0;
128#ifdef SMP
129	for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
130		mtx_unlock(lkp->lk_interlock);
131		for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
132			if ((lkp->lk_flags & flags) == 0)
133				break;
134		mtx_lock(lkp->lk_interlock);
135		if ((lkp->lk_flags & flags) == 0)
136			return 0;
137	}
138#endif
139	return 1;
140}
141
142static int
143acquire(struct lock **lkpp, int extflags, int wanted)
144{
145	struct lock *lkp = *lkpp;
146	int s, error;
147	CTR3(KTR_LOCK,
148	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
149	    lkp, extflags, wanted);
150
151	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
152		return EBUSY;
153	}
154
155	if ((extflags & LK_INTERLOCK) == 0) {
156		error = apause(lkp, wanted);
157		if (error == 0)
158			return 0;
159	}
160
161	s = splhigh();
162	while ((lkp->lk_flags & wanted) != 0) {
163		lkp->lk_flags |= LK_WAIT_NONZERO;
164		lkp->lk_waitcount++;
165		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
166		    lkp->lk_wmesg,
167		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
168		if (lkp->lk_waitcount == 1) {
169			lkp->lk_flags &= ~LK_WAIT_NONZERO;
170			lkp->lk_waitcount = 0;
171		} else {
172			lkp->lk_waitcount--;
173		}
174		if (error) {
175			splx(s);
176			return error;
177		}
178		if (extflags & LK_SLEEPFAIL) {
179			splx(s);
180			return ENOLCK;
181		}
182		if (lkp->lk_newlock != NULL) {
183			mtx_lock(lkp->lk_newlock->lk_interlock);
184			mtx_unlock(lkp->lk_interlock);
185			if (lkp->lk_waitcount == 0)
186				wakeup((void *)(&lkp->lk_newlock));
187			*lkpp = lkp = lkp->lk_newlock;
188		}
189	}
190	splx(s);
191	return 0;
192}
193
194/*
195 * Set, change, or release a lock.
196 *
197 * Shared requests increment the shared count. Exclusive requests set the
198 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
199 * accepted shared locks and shared-to-exclusive upgrades to go away.
200 */
201int
202#ifndef	DEBUG_LOCKS
203lockmgr(lkp, flags, interlkp, td)
204#else
205debuglockmgr(lkp, flags, interlkp, td, name, file, line)
206#endif
207	struct lock *lkp;
208	u_int flags;
209	struct mtx *interlkp;
210	struct thread *td;
211#ifdef	DEBUG_LOCKS
212	const char *name;	/* Name of lock function */
213	const char *file;	/* Name of file call is from */
214	int line;		/* Line number in file */
215#endif
216{
217	int error;
218	struct thread *thr;
219	int extflags, lockflags;
220
221	error = 0;
222	if (td == NULL)
223		thr = LK_KERNPROC;
224	else
225		thr = td;
226
227	if ((flags & LK_INTERNAL) == 0)
228		mtx_lock(lkp->lk_interlock);
229#ifdef DEBUG_LOCKS
230	CTR6(KTR_LOCK,
231	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
232	    "td == %p %s:%d", lkp, lkp->lk_wmesg, flags, td, file, line);
233#else
234	CTR6(KTR_LOCK,
235	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
236	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
237	    lkp->lk_exclusivecount, flags, td);
238#endif
239
240	if (flags & LK_INTERLOCK) {
241		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
242		mtx_unlock(interlkp);
243	}
244
245	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
246		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
247		    &lkp->lk_interlock->mtx_object,
248		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
249
250	if (panicstr != NULL) {
251		mtx_unlock(lkp->lk_interlock);
252		return (0);
253	}
254
255	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
256
257	switch (flags & LK_TYPE_MASK) {
258
259	case LK_SHARED:
260		/*
261		 * If we are not the exclusive lock holder, we have to block
262		 * while there is an exclusive lock holder or while an
263		 * exclusive lock request or upgrade request is in progress.
264		 *
265		 * However, if TDP_DEADLKTREAT is set, we override exclusive
266		 * lock requests or upgrade requests ( but not the exclusive
267		 * lock itself ).
268		 */
269		if (lkp->lk_lockholder != thr) {
270			lockflags = LK_HAVE_EXCL;
271			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
272				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
273			error = acquire(&lkp, extflags, lockflags);
274			if (error)
275				break;
276			sharelock(td, lkp, 1);
277#if defined(DEBUG_LOCKS)
278			lkp->lk_slockholder = thr;
279			lkp->lk_sfilename = file;
280			lkp->lk_slineno = line;
281			lkp->lk_slockername = name;
282#endif
283			break;
284		}
285		/*
286		 * We hold an exclusive lock, so downgrade it to shared.
287		 * An alternative would be to fail with EDEADLK.
288		 */
289		sharelock(td, lkp, 1);
290		/* FALLTHROUGH downgrade */
291
292	case LK_DOWNGRADE:
293		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
294			("lockmgr: not holding exclusive lock "
295			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
296			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
297		sharelock(td, lkp, lkp->lk_exclusivecount);
298		COUNT(td, -lkp->lk_exclusivecount);
299		lkp->lk_exclusivecount = 0;
300		lkp->lk_flags &= ~LK_HAVE_EXCL;
301		lkp->lk_lockholder = LK_NOPROC;
302		if (lkp->lk_waitcount)
303			wakeup((void *)lkp);
304		break;
305
306	case LK_EXCLUPGRADE:
307		/*
308		 * If another process is ahead of us to get an upgrade,
309		 * then we want to fail rather than have an intervening
310		 * exclusive access.
311		 */
312		if (lkp->lk_flags & LK_WANT_UPGRADE) {
313			shareunlock(td, lkp, 1);
314			error = EBUSY;
315			break;
316		}
317		/* FALLTHROUGH normal upgrade */
318
319	case LK_UPGRADE:
320		/*
321		 * Upgrade a shared lock to an exclusive one. If another
322		 * shared lock has already requested an upgrade to an
323		 * exclusive lock, our shared lock is released and an
324		 * exclusive lock is requested (which will be granted
325		 * after the upgrade). If we return an error, the file
326		 * will always be unlocked.
327		 */
328		if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0))
329			panic("lockmgr: upgrade exclusive lock");
330		shareunlock(td, lkp, 1);
331		/*
332		 * If we are just polling, check to see if we will block.
333		 */
334		if ((extflags & LK_NOWAIT) &&
335		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
336		     lkp->lk_sharecount > 1)) {
337			error = EBUSY;
338			break;
339		}
340		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
341			/*
342			 * We are first shared lock to request an upgrade, so
343			 * request upgrade and wait for the shared count to
344			 * drop to zero, then take exclusive lock.
345			 */
346			lkp->lk_flags |= LK_WANT_UPGRADE;
347			error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
348			lkp->lk_flags &= ~LK_WANT_UPGRADE;
349
350			if (error) {
351			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
352			                   wakeup((void *)lkp);
353			         break;
354			}
355			if (lkp->lk_exclusivecount != 0)
356				panic("lockmgr: non-zero exclusive count");
357			lkp->lk_flags |= LK_HAVE_EXCL;
358			lkp->lk_lockholder = thr;
359			lkp->lk_exclusivecount = 1;
360			COUNT(td, 1);
361#if defined(DEBUG_LOCKS)
362			lkp->lk_filename = file;
363			lkp->lk_lineno = line;
364			lkp->lk_lockername = name;
365#endif
366			break;
367		}
368		/*
369		 * Someone else has requested upgrade. Release our shared
370		 * lock, awaken upgrade requestor if we are the last shared
371		 * lock, then request an exclusive lock.
372		 */
373		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
374			LK_WAIT_NONZERO)
375			wakeup((void *)lkp);
376		/* FALLTHROUGH exclusive request */
377
378	case LK_EXCLUSIVE:
379		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
380			/*
381			 *	Recursive lock.
382			 */
383			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
384				panic("lockmgr: locking against myself");
385			if ((extflags & LK_CANRECURSE) != 0) {
386				lkp->lk_exclusivecount++;
387				COUNT(td, 1);
388				break;
389			}
390		}
391		/*
392		 * If we are just polling, check to see if we will sleep.
393		 */
394		if ((extflags & LK_NOWAIT) &&
395		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
396			error = EBUSY;
397			break;
398		}
399		/*
400		 * Try to acquire the want_exclusive flag.
401		 */
402		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
403		if (error)
404			break;
405		lkp->lk_flags |= LK_WANT_EXCL;
406		/*
407		 * Wait for shared locks and upgrades to finish.
408		 */
409		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
410		lkp->lk_flags &= ~LK_WANT_EXCL;
411		if (error) {
412			if (lkp->lk_flags & LK_WAIT_NONZERO)
413			         wakeup((void *)lkp);
414			break;
415		}
416		lkp->lk_flags |= LK_HAVE_EXCL;
417		lkp->lk_lockholder = thr;
418		if (lkp->lk_exclusivecount != 0)
419			panic("lockmgr: non-zero exclusive count");
420		lkp->lk_exclusivecount = 1;
421		COUNT(td, 1);
422#if defined(DEBUG_LOCKS)
423			lkp->lk_filename = file;
424			lkp->lk_lineno = line;
425			lkp->lk_lockername = name;
426#endif
427		break;
428
429	case LK_RELEASE:
430		if (lkp->lk_exclusivecount != 0) {
431			if (lkp->lk_lockholder != thr &&
432			    lkp->lk_lockholder != LK_KERNPROC) {
433				panic("lockmgr: thread %p, not %s %p unlocking",
434				    thr, "exclusive lock holder",
435				    lkp->lk_lockholder);
436			}
437			if (lkp->lk_lockholder != LK_KERNPROC)
438				COUNT(td, -1);
439			if (lkp->lk_exclusivecount == 1) {
440				lkp->lk_flags &= ~LK_HAVE_EXCL;
441				lkp->lk_lockholder = LK_NOPROC;
442				lkp->lk_exclusivecount = 0;
443			} else {
444				lkp->lk_exclusivecount--;
445			}
446		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
447			shareunlock(td, lkp, 1);
448		if (lkp->lk_flags & LK_WAIT_NONZERO)
449			wakeup((void *)lkp);
450		break;
451
452	case LK_DRAIN:
453		/*
454		 * Check that we do not already hold the lock, as it can
455		 * never drain if we do. Unfortunately, we have no way to
456		 * check for holding a shared lock, but at least we can
457		 * check for an exclusive one.
458		 */
459		if (lkp->lk_lockholder == thr)
460			panic("lockmgr: draining against myself");
461
462		error = acquiredrain(lkp, extflags);
463		if (error)
464			break;
465		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
466		lkp->lk_lockholder = thr;
467		lkp->lk_exclusivecount = 1;
468		COUNT(td, 1);
469#if defined(DEBUG_LOCKS)
470			lkp->lk_filename = file;
471			lkp->lk_lineno = line;
472			lkp->lk_lockername = name;
473#endif
474		break;
475
476	default:
477		mtx_unlock(lkp->lk_interlock);
478		panic("lockmgr: unknown locktype request %d",
479		    flags & LK_TYPE_MASK);
480		/* NOTREACHED */
481	}
482	if ((lkp->lk_flags & LK_WAITDRAIN) &&
483	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
484		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
485		lkp->lk_flags &= ~LK_WAITDRAIN;
486		wakeup((void *)&lkp->lk_flags);
487	}
488	mtx_unlock(lkp->lk_interlock);
489	return (error);
490}
491
492static int
493acquiredrain(struct lock *lkp, int extflags) {
494	int error;
495
496	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
497		return EBUSY;
498	}
499
500	if ((extflags & LK_INTERLOCK) == 0) {
501		error = apause(lkp, LK_ALL);
502		if (error == 0)
503			return 0;
504	}
505
506	while (lkp->lk_flags & LK_ALL) {
507		lkp->lk_flags |= LK_WAITDRAIN;
508		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
509			lkp->lk_wmesg,
510			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
511		if (error)
512			return error;
513		if (extflags & LK_SLEEPFAIL) {
514			return ENOLCK;
515		}
516	}
517	return 0;
518}
519
520/*
521 * Transfer any waiting processes from one lock to another.
522 */
523void
524transferlockers(from, to)
525	struct lock *from;
526	struct lock *to;
527{
528
529	KASSERT(from != to, ("lock transfer to self"));
530	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
531
532	mtx_lock(from->lk_interlock);
533	if (from->lk_waitcount == 0) {
534		mtx_unlock(from->lk_interlock);
535		return;
536	}
537	from->lk_newlock = to;
538	wakeup((void *)from);
539	msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
540	    "lkxfer", 0);
541	from->lk_newlock = NULL;
542	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
543	KASSERT(from->lk_waitcount == 0, ("active lock"));
544	mtx_unlock(from->lk_interlock);
545}
546
547
548/*
549 * Initialize a lock; required before use.
550 */
551void
552lockinit(lkp, prio, wmesg, timo, flags)
553	struct lock *lkp;
554	int prio;
555	const char *wmesg;
556	int timo;
557	int flags;
558{
559	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
560	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
561
562	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
563	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
564	lkp->lk_sharecount = 0;
565	lkp->lk_waitcount = 0;
566	lkp->lk_exclusivecount = 0;
567	lkp->lk_prio = prio;
568	lkp->lk_wmesg = wmesg;
569	lkp->lk_timo = timo;
570	lkp->lk_lockholder = LK_NOPROC;
571	lkp->lk_newlock = NULL;
572#ifdef DEBUG_LOCKS
573	lkp->lk_filename = "none";
574	lkp->lk_lockername = "never exclusive locked";
575	lkp->lk_lineno = 0;
576	lkp->lk_slockholder = LK_NOPROC;
577	lkp->lk_sfilename = "none";
578	lkp->lk_slockername = "never share locked";
579	lkp->lk_slineno = 0;
580#endif
581}
582
583/*
584 * Destroy a lock.
585 */
586void
587lockdestroy(lkp)
588	struct lock *lkp;
589{
590	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
591	    lkp, lkp->lk_wmesg);
592}
593
594/*
595 * Determine the status of a lock.
596 */
597int
598lockstatus(lkp, td)
599	struct lock *lkp;
600	struct thread *td;
601{
602	int lock_type = 0;
603
604	mtx_lock(lkp->lk_interlock);
605	if (lkp->lk_exclusivecount != 0) {
606		if (td == NULL || lkp->lk_lockholder == td)
607			lock_type = LK_EXCLUSIVE;
608		else
609			lock_type = LK_EXCLOTHER;
610	} else if (lkp->lk_sharecount != 0)
611		lock_type = LK_SHARED;
612	mtx_unlock(lkp->lk_interlock);
613	return (lock_type);
614}
615
616/*
617 * Determine the number of holders of a lock.
618 */
619int
620lockcount(lkp)
621	struct lock *lkp;
622{
623	int count;
624
625	mtx_lock(lkp->lk_interlock);
626	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
627	mtx_unlock(lkp->lk_interlock);
628	return (count);
629}
630
631/*
632 * Print out information about state of a lock. Used by VOP_PRINT
633 * routines to display status about contained locks.
634 */
635void
636lockmgr_printinfo(lkp)
637	struct lock *lkp;
638{
639
640	if (lkp->lk_sharecount)
641		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
642		    lkp->lk_sharecount);
643	else if (lkp->lk_flags & LK_HAVE_EXCL)
644		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
645		    lkp->lk_wmesg, lkp->lk_exclusivecount,
646		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
647	if (lkp->lk_waitcount > 0)
648		printf(" with %d pending", lkp->lk_waitcount);
649}
650