kern_lock.c revision 176116
1/*-
2 * Copyright (c) 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Copyright (C) 1997
6 *	John S. Dyson.  All rights reserved.
7 *
8 * This code contains ideas from software contributed to Berkeley by
9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10 * System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 176116 2008-02-08 21:45:47Z attilio $");
45
46#include "opt_ddb.h"
47#include "opt_global.h"
48
49#include <sys/param.h>
50#include <sys/kdb.h>
51#include <sys/kernel.h>
52#include <sys/ktr.h>
53#include <sys/lock.h>
54#include <sys/lockmgr.h>
55#include <sys/mutex.h>
56#include <sys/proc.h>
57#include <sys/systm.h>
58#include <sys/lock_profile.h>
59#ifdef DEBUG_LOCKS
60#include <sys/stack.h>
61#endif
62
63#define	LOCKMGR_TRYOP(x)	((x) & LK_NOWAIT)
64#define	LOCKMGR_TRYW(x)		(LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
65
66static void	assert_lockmgr(struct lock_object *lock, int what);
67#ifdef DDB
68#include <ddb/ddb.h>
69static void	db_show_lockmgr(struct lock_object *lock);
70#endif
71static void	lock_lockmgr(struct lock_object *lock, int how);
72static int	unlock_lockmgr(struct lock_object *lock);
73
74struct lock_class lock_class_lockmgr = {
75	.lc_name = "lockmgr",
76	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
77	.lc_assert = assert_lockmgr,
78#ifdef DDB
79	.lc_ddb_show = db_show_lockmgr,
80#endif
81	.lc_lock = lock_lockmgr,
82	.lc_unlock = unlock_lockmgr,
83};
84
85/*
86 * Locking primitives implementation.
87 * Locks provide shared/exclusive sychronization.
88 */
89
90void
91assert_lockmgr(struct lock_object *lock, int what)
92{
93
94	panic("lockmgr locks do not support assertions");
95}
96
97void
98lock_lockmgr(struct lock_object *lock, int how)
99{
100
101	panic("lockmgr locks do not support sleep interlocking");
102}
103
104int
105unlock_lockmgr(struct lock_object *lock)
106{
107
108	panic("lockmgr locks do not support sleep interlocking");
109}
110
111#define	COUNT(td, x)	((td)->td_locks += (x))
112#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
113	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
114
115static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime);
116static int acquiredrain(struct lock *lkp, int extflags) ;
117
118static __inline void
119sharelock(struct thread *td, struct lock *lkp, int incr) {
120	lkp->lk_flags |= LK_SHARE_NONZERO;
121	lkp->lk_sharecount += incr;
122	COUNT(td, incr);
123}
124
125static __inline void
126shareunlock(struct thread *td, struct lock *lkp, int decr) {
127
128	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
129
130	COUNT(td, -decr);
131	if (lkp->lk_sharecount == decr) {
132		lkp->lk_flags &= ~LK_SHARE_NONZERO;
133		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
134			wakeup(lkp);
135		}
136		lkp->lk_sharecount = 0;
137	} else {
138		lkp->lk_sharecount -= decr;
139	}
140}
141
142static int
143acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime)
144{
145	struct lock *lkp = *lkpp;
146	int error;
147	CTR3(KTR_LOCK,
148	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
149	    lkp, extflags, wanted);
150
151	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
152		return EBUSY;
153	error = 0;
154	if ((lkp->lk_flags & wanted) != 0)
155		lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
156
157	while ((lkp->lk_flags & wanted) != 0) {
158		CTR2(KTR_LOCK,
159		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
160		    lkp, lkp->lk_flags);
161		lkp->lk_flags |= LK_WAIT_NONZERO;
162		lkp->lk_waitcount++;
163		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
164		    lkp->lk_wmesg,
165		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
166		lkp->lk_waitcount--;
167		if (lkp->lk_waitcount == 0)
168			lkp->lk_flags &= ~LK_WAIT_NONZERO;
169		if (error)
170			break;
171		if (extflags & LK_SLEEPFAIL) {
172			error = ENOLCK;
173			break;
174		}
175		if (lkp->lk_newlock != NULL) {
176			mtx_lock(lkp->lk_newlock->lk_interlock);
177			mtx_unlock(lkp->lk_interlock);
178			if (lkp->lk_waitcount == 0)
179				wakeup((void *)(&lkp->lk_newlock));
180			*lkpp = lkp = lkp->lk_newlock;
181		}
182	}
183	mtx_assert(lkp->lk_interlock, MA_OWNED);
184	return (error);
185}
186
187/*
188 * Set, change, or release a lock.
189 *
190 * Shared requests increment the shared count. Exclusive requests set the
191 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
192 * accepted shared locks and shared-to-exclusive upgrades to go away.
193 */
194int
195_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, char *file,
196    int line)
197
198{
199	struct thread *td;
200	int error;
201	int extflags, lockflags;
202	int contested = 0;
203	uint64_t waitstart = 0;
204
205	error = 0;
206	td = curthread;
207
208	if ((flags & LK_INTERNAL) == 0)
209		mtx_lock(lkp->lk_interlock);
210	CTR6(KTR_LOCK,
211	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
212	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
213	    lkp->lk_exclusivecount, flags, td);
214#ifdef DEBUG_LOCKS
215	{
216		struct stack stack; /* XXX */
217		stack_save(&stack);
218		CTRSTACK(KTR_LOCK, &stack, 0, 1);
219	}
220#endif
221
222	if (flags & LK_INTERLOCK) {
223		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
224		mtx_unlock(interlkp);
225	}
226
227	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
228		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
229		    &lkp->lk_interlock->lock_object,
230		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
231
232	if (panicstr != NULL) {
233		mtx_unlock(lkp->lk_interlock);
234		return (0);
235	}
236	if ((lkp->lk_flags & LK_NOSHARE) &&
237	    (flags & LK_TYPE_MASK) == LK_SHARED) {
238		flags &= ~LK_TYPE_MASK;
239		flags |= LK_EXCLUSIVE;
240	}
241	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
242
243	switch (flags & LK_TYPE_MASK) {
244
245	case LK_SHARED:
246		if (!LOCKMGR_TRYOP(extflags))
247			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file,
248			    line);
249		/*
250		 * If we are not the exclusive lock holder, we have to block
251		 * while there is an exclusive lock holder or while an
252		 * exclusive lock request or upgrade request is in progress.
253		 *
254		 * However, if TDP_DEADLKTREAT is set, we override exclusive
255		 * lock requests or upgrade requests ( but not the exclusive
256		 * lock itself ).
257		 */
258		if (lkp->lk_lockholder != td) {
259			lockflags = LK_HAVE_EXCL;
260			if (!(td->td_pflags & TDP_DEADLKTREAT))
261				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
262			error = acquire(&lkp, extflags, lockflags, &contested, &waitstart);
263			if (error)
264				break;
265			sharelock(td, lkp, 1);
266			if (lkp->lk_sharecount == 1)
267				lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
268			WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags),
269			    file, line);
270
271#if defined(DEBUG_LOCKS)
272			stack_save(&lkp->lk_stack);
273#endif
274			break;
275		}
276		/*
277		 * We hold an exclusive lock, so downgrade it to shared.
278		 * An alternative would be to fail with EDEADLK.
279		 */
280		/* FALLTHROUGH downgrade */
281
282	case LK_DOWNGRADE:
283		KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0,
284			("lockmgr: not holding exclusive lock "
285			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
286			lkp->lk_lockholder, td, lkp->lk_exclusivecount));
287		sharelock(td, lkp, lkp->lk_exclusivecount);
288		WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
289		COUNT(td, -lkp->lk_exclusivecount);
290		lkp->lk_exclusivecount = 0;
291		lkp->lk_flags &= ~LK_HAVE_EXCL;
292		lkp->lk_lockholder = LK_NOPROC;
293		if (lkp->lk_waitcount)
294			wakeup((void *)lkp);
295		break;
296
297	case LK_UPGRADE:
298		/*
299		 * Upgrade a shared lock to an exclusive one. If another
300		 * shared lock has already requested an upgrade to an
301		 * exclusive lock, our shared lock is released and an
302		 * exclusive lock is requested (which will be granted
303		 * after the upgrade). If we return an error, the file
304		 * will always be unlocked.
305		 */
306		if (lkp->lk_lockholder == td)
307			panic("lockmgr: upgrade exclusive lock");
308		if (lkp->lk_sharecount <= 0)
309			panic("lockmgr: upgrade without shared");
310		shareunlock(td, lkp, 1);
311		if (lkp->lk_sharecount == 0)
312			lock_profile_release_lock(&lkp->lk_object);
313		/*
314		 * If we are just polling, check to see if we will block.
315		 */
316		if ((extflags & LK_NOWAIT) &&
317		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
318		     lkp->lk_sharecount > 1)) {
319			error = EBUSY;
320			WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
321			break;
322		}
323		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
324			/*
325			 * We are first shared lock to request an upgrade, so
326			 * request upgrade and wait for the shared count to
327			 * drop to zero, then take exclusive lock.
328			 */
329			lkp->lk_flags |= LK_WANT_UPGRADE;
330			error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart);
331			lkp->lk_flags &= ~LK_WANT_UPGRADE;
332
333			if (error) {
334			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
335			                   wakeup((void *)lkp);
336				WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
337			         break;
338			}
339			if (lkp->lk_exclusivecount != 0)
340				panic("lockmgr: non-zero exclusive count");
341			lkp->lk_flags |= LK_HAVE_EXCL;
342			lkp->lk_lockholder = td;
343			lkp->lk_exclusivecount = 1;
344			WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE |
345			    LOP_TRYLOCK, file, line);
346			COUNT(td, 1);
347			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
348#if defined(DEBUG_LOCKS)
349			stack_save(&lkp->lk_stack);
350#endif
351			break;
352		}
353		/*
354		 * Someone else has requested upgrade. Release our shared
355		 * lock, awaken upgrade requestor if we are the last shared
356		 * lock, then request an exclusive lock.
357		 */
358		WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
359		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
360			LK_WAIT_NONZERO)
361			wakeup((void *)lkp);
362		/* FALLTHROUGH exclusive request */
363
364	case LK_EXCLUSIVE:
365		if (!LOCKMGR_TRYOP(extflags))
366			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
367			    LOP_EXCLUSIVE, file, line);
368		if (lkp->lk_lockholder == td) {
369			/*
370			 *	Recursive lock.
371			 */
372			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
373				panic("lockmgr: locking against myself");
374			if ((extflags & LK_CANRECURSE) != 0) {
375				lkp->lk_exclusivecount++;
376				WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
377				    LOCKMGR_TRYW(extflags), file, line);
378				COUNT(td, 1);
379				break;
380			}
381		}
382		/*
383		 * If we are just polling, check to see if we will sleep.
384		 */
385		if ((extflags & LK_NOWAIT) &&
386		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
387			error = EBUSY;
388			break;
389		}
390		/*
391		 * Try to acquire the want_exclusive flag.
392		 */
393		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart);
394		if (error)
395			break;
396		lkp->lk_flags |= LK_WANT_EXCL;
397		/*
398		 * Wait for shared locks and upgrades to finish.
399		 */
400		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart);
401		lkp->lk_flags &= ~LK_WANT_EXCL;
402		if (error) {
403			if (lkp->lk_flags & LK_WAIT_NONZERO)
404			         wakeup((void *)lkp);
405			break;
406		}
407		lkp->lk_flags |= LK_HAVE_EXCL;
408		lkp->lk_lockholder = td;
409		if (lkp->lk_exclusivecount != 0)
410			panic("lockmgr: non-zero exclusive count");
411		lkp->lk_exclusivecount = 1;
412		WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
413		    LOCKMGR_TRYW(extflags), file, line);
414		COUNT(td, 1);
415		lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
416#if defined(DEBUG_LOCKS)
417		stack_save(&lkp->lk_stack);
418#endif
419		break;
420
421	case LK_RELEASE:
422		if (lkp->lk_exclusivecount != 0) {
423			if (lkp->lk_lockholder != td &&
424			    lkp->lk_lockholder != LK_KERNPROC) {
425				panic("lockmgr: thread %p, not %s %p unlocking",
426				    td, "exclusive lock holder",
427				    lkp->lk_lockholder);
428			}
429			if (lkp->lk_lockholder != LK_KERNPROC) {
430				WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
431				    file, line);
432				COUNT(td, -1);
433			}
434			if (lkp->lk_exclusivecount == 1) {
435				lkp->lk_flags &= ~LK_HAVE_EXCL;
436				lkp->lk_lockholder = LK_NOPROC;
437				lkp->lk_exclusivecount = 0;
438				lock_profile_release_lock(&lkp->lk_object);
439			} else {
440				lkp->lk_exclusivecount--;
441			}
442		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
443			WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
444			shareunlock(td, lkp, 1);
445		} else  {
446			printf("lockmgr: thread %p unlocking unheld lock\n",
447			    td);
448			kdb_backtrace();
449		}
450
451		if (lkp->lk_flags & LK_WAIT_NONZERO)
452			wakeup((void *)lkp);
453		break;
454
455	case LK_DRAIN:
456		/*
457		 * Check that we do not already hold the lock, as it can
458		 * never drain if we do. Unfortunately, we have no way to
459		 * check for holding a shared lock, but at least we can
460		 * check for an exclusive one.
461		 */
462		if (!LOCKMGR_TRYOP(extflags))
463			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
464			    LOP_EXCLUSIVE, file, line);
465		if (lkp->lk_lockholder == td)
466			panic("lockmgr: draining against myself");
467
468		error = acquiredrain(lkp, extflags);
469		if (error)
470			break;
471		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
472		lkp->lk_lockholder = td;
473		lkp->lk_exclusivecount = 1;
474		WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
475		    LOCKMGR_TRYW(extflags), file, line);
476		COUNT(td, 1);
477#if defined(DEBUG_LOCKS)
478		stack_save(&lkp->lk_stack);
479#endif
480		break;
481
482	default:
483		mtx_unlock(lkp->lk_interlock);
484		panic("lockmgr: unknown locktype request %d",
485		    flags & LK_TYPE_MASK);
486		/* NOTREACHED */
487	}
488	if ((lkp->lk_flags & LK_WAITDRAIN) &&
489	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
490		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
491		lkp->lk_flags &= ~LK_WAITDRAIN;
492		wakeup((void *)&lkp->lk_flags);
493	}
494	mtx_unlock(lkp->lk_interlock);
495	return (error);
496}
497
498static int
499acquiredrain(struct lock *lkp, int extflags) {
500	int error;
501
502	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
503		return EBUSY;
504	}
505	while (lkp->lk_flags & LK_ALL) {
506		lkp->lk_flags |= LK_WAITDRAIN;
507		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
508			lkp->lk_wmesg,
509			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
510		if (error)
511			return error;
512		if (extflags & LK_SLEEPFAIL) {
513			return ENOLCK;
514		}
515	}
516	return 0;
517}
518
519/*
520 * Initialize a lock; required before use.
521 */
522void
523lockinit(lkp, prio, wmesg, timo, flags)
524	struct lock *lkp;
525	int prio;
526	const char *wmesg;
527	int timo;
528	int flags;
529{
530	int iflags;
531
532	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
533	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
534
535	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
536	lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_NOWITNESS | LK_NODUP);
537	lkp->lk_sharecount = 0;
538	lkp->lk_waitcount = 0;
539	lkp->lk_exclusivecount = 0;
540	lkp->lk_prio = prio;
541	lkp->lk_timo = timo;
542	lkp->lk_lockholder = LK_NOPROC;
543	lkp->lk_newlock = NULL;
544	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
545	if (!(flags & LK_NODUP))
546		iflags |= LO_DUPOK;
547	if (!(flags & LK_NOWITNESS))
548		iflags |= LO_WITNESS;
549#ifdef DEBUG_LOCKS
550	stack_zero(&lkp->lk_stack);
551#endif
552	lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags);
553}
554
555/*
556 * Destroy a lock.
557 */
558void
559lockdestroy(lkp)
560	struct lock *lkp;
561{
562
563	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
564	    lkp, lkp->lk_wmesg);
565	lock_destroy(&lkp->lk_object);
566}
567
568/*
569 * Disown the lockmgr.
570 */
571void
572_lockmgr_disown(struct lock *lkp, const char *file, int line)
573{
574	struct thread *td;
575
576	td = curthread;
577	KASSERT(panicstr != NULL || lkp->lk_exclusivecount,
578	    ("%s: %p lockmgr must be exclusively locked", __func__, lkp));
579	KASSERT(panicstr != NULL || lkp->lk_lockholder == td ||
580	    lkp->lk_lockholder == LK_KERNPROC,
581	    ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp,
582	    td));
583
584	/*
585	 * Drop the lock reference and switch the owner.  This will result
586	 * in an atomic operation like td_lock is only accessed by curthread
587	 * and lk_lockholder only needs one write.  Note also that the lock
588	 * owner can be alredy KERNPROC, so in that case just skip the
589	 * decrement.
590	 */
591	if (lkp->lk_lockholder == td) {
592		WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line);
593		td->td_locks--;
594	}
595	lkp->lk_lockholder = LK_KERNPROC;
596}
597
598/*
599 * Determine the status of a lock.
600 */
601int
602lockstatus(lkp, td)
603	struct lock *lkp;
604	struct thread *td;
605{
606	int lock_type = 0;
607	int interlocked;
608
609	KASSERT(td == curthread,
610	    ("%s: thread passed argument (%p) is not valid", __func__, td));
611
612	if (!kdb_active) {
613		interlocked = 1;
614		mtx_lock(lkp->lk_interlock);
615	} else
616		interlocked = 0;
617	if (lkp->lk_exclusivecount != 0) {
618		if (lkp->lk_lockholder == td)
619			lock_type = LK_EXCLUSIVE;
620		else
621			lock_type = LK_EXCLOTHER;
622	} else if (lkp->lk_sharecount != 0)
623		lock_type = LK_SHARED;
624	if (interlocked)
625		mtx_unlock(lkp->lk_interlock);
626	return (lock_type);
627}
628
629/*
630 * Determine the number of waiters on a lock.
631 */
632int
633lockwaiters(lkp)
634	struct lock *lkp;
635{
636	int count;
637
638	mtx_lock(lkp->lk_interlock);
639	count = lkp->lk_waitcount;
640	mtx_unlock(lkp->lk_interlock);
641	return (count);
642}
643
644/*
645 * Print out information about state of a lock. Used by VOP_PRINT
646 * routines to display status about contained locks.
647 */
648void
649lockmgr_printinfo(lkp)
650	struct lock *lkp;
651{
652
653	if (lkp->lk_sharecount)
654		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
655		    lkp->lk_sharecount);
656	else if (lkp->lk_flags & LK_HAVE_EXCL)
657		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
658		    lkp->lk_wmesg, lkp->lk_exclusivecount,
659		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
660	if (lkp->lk_waitcount > 0)
661		printf(" with %d pending", lkp->lk_waitcount);
662#ifdef DEBUG_LOCKS
663	stack_print_ddb(&lkp->lk_stack);
664#endif
665}
666
667#ifdef DDB
668/*
669 * Check to see if a thread that is blocked on a sleep queue is actually
670 * blocked on a 'struct lock'.  If so, output some details and return true.
671 * If the lock has an exclusive owner, return that in *ownerp.
672 */
673int
674lockmgr_chain(struct thread *td, struct thread **ownerp)
675{
676	struct lock *lkp;
677
678	lkp = td->td_wchan;
679
680	/* Simple test to see if wchan points to a lockmgr lock. */
681	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
682	    lkp->lk_wmesg == td->td_wmesg)
683		goto ok;
684
685	/*
686	 * If this thread is doing a DRAIN, then it would be asleep on
687	 * &lkp->lk_flags rather than lkp.
688	 */
689	lkp = (struct lock *)((char *)td->td_wchan -
690	    offsetof(struct lock, lk_flags));
691	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
692	    lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
693		goto ok;
694
695	/* Doen't seem to be a lockmgr lock. */
696	return (0);
697
698ok:
699	/* Ok, we think we have a lockmgr lock, so output some details. */
700	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
701	if (lkp->lk_sharecount) {
702		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
703		*ownerp = NULL;
704	} else {
705		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
706		*ownerp = lkp->lk_lockholder;
707	}
708	return (1);
709}
710
711void
712db_show_lockmgr(struct lock_object *lock)
713{
714	struct thread *td;
715	struct lock *lkp;
716
717	lkp = (struct lock *)lock;
718
719	db_printf(" lock type: %s\n", lkp->lk_wmesg);
720	db_printf(" state: ");
721	if (lkp->lk_sharecount)
722		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
723	else if (lkp->lk_flags & LK_HAVE_EXCL) {
724		td = lkp->lk_lockholder;
725		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
726		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
727		    td->td_proc->p_pid, td->td_name);
728	} else
729		db_printf("UNLOCKED\n");
730	if (lkp->lk_waitcount > 0)
731		db_printf(" waiters: %d\n", lkp->lk_waitcount);
732}
733#endif
734