kern_lock.c revision 178150
1139804Simp/*-
2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3177957Sattilio * All rights reserved.
424269Speter *
524269Speter * Redistribution and use in source and binary forms, with or without
624269Speter * modification, are permitted provided that the following conditions
724269Speter * are met:
824269Speter * 1. Redistributions of source code must retain the above copyright
9177957Sattilio *    notice(s), this list of conditions and the following disclaimer as
10177957Sattilio *    the first lines of this file unmodified other than the possible
11177957Sattilio *    addition of one or more copyright notices.
1224269Speter * 2. Redistributions in binary form must reproduce the above copyright
13177957Sattilio *    notice(s), this list of conditions and the following disclaimer in the
1424269Speter *    documentation and/or other materials provided with the distribution.
1524269Speter *
16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19177957Sattilio * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26177957Sattilio * DAMAGE.
2724269Speter */
2824269Speter
29177957Sattilio#include "opt_ddb.h"
30177957Sattilio
31116182Sobrien#include <sys/cdefs.h>
32116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 178150 2008-04-12 13:56:17Z attilio $");
33116182Sobrien
3424269Speter#include <sys/param.h>
3584812Sjhb#include <sys/ktr.h>
3624269Speter#include <sys/lock.h>
37177957Sattilio#include <sys/lock_profile.h>
38102477Sbde#include <sys/lockmgr.h>
3967353Sjhb#include <sys/mutex.h>
40102477Sbde#include <sys/proc.h>
41177957Sattilio#include <sys/sleepqueue.h>
42148668Sjeff#ifdef DEBUG_LOCKS
43148668Sjeff#include <sys/stack.h>
44148668Sjeff#endif
45177957Sattilio#include <sys/systm.h>
4624269Speter
47177957Sattilio#include <machine/cpu.h>
48176014Sattilio
49161322Sjhb#ifdef DDB
50161322Sjhb#include <ddb/ddb.h>
51161322Sjhb#endif
52161322Sjhb
53177957SattilioCTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
54177957Sattilio    (LK_CANRECURSE | LK_NOSHARE));
55177957Sattilio
56177957Sattilio#define	SQ_EXCLUSIVE_QUEUE	0
57177957Sattilio#define	SQ_SHARED_QUEUE		1
58177957Sattilio
59177957Sattilio#ifndef INVARIANTS
60177957Sattilio#define	_lockmgr_assert(lk, what, file, line)
61177957Sattilio#define	TD_LOCKS_INC(td)
62177957Sattilio#define	TD_LOCKS_DEC(td)
63177957Sattilio#else
64177957Sattilio#define	TD_LOCKS_INC(td)	((td)->td_locks++)
65177957Sattilio#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
66177957Sattilio#endif
67177957Sattilio#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
68177957Sattilio#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
69177957Sattilio
70177957Sattilio#ifndef DEBUG_LOCKS
71177957Sattilio#define	STACK_PRINT(lk)
72177957Sattilio#define	STACK_SAVE(lk)
73177957Sattilio#define	STACK_ZERO(lk)
74177957Sattilio#else
75177957Sattilio#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
76177957Sattilio#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
77177957Sattilio#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
78177957Sattilio#endif
79177957Sattilio
80177957Sattilio#define	LOCK_LOG2(lk, string, arg1, arg2)				\
81177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
82177957Sattilio		CTR2(KTR_LOCK, (string), (arg1), (arg2))
83177957Sattilio#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
84177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
85177957Sattilio		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
86177957Sattilio
87177957Sattilio#define	LK_TRYOP(x)							\
88177957Sattilio	((x) & LK_NOWAIT)
89177957Sattilio#define	LK_CAN_SHARE(x)							\
90177957Sattilio	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
91177982Sattilio	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
92177957Sattilio
93177957Sattilio#define	lockmgr_disowned(lk)						\
94177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
95177957Sattilio
96177957Sattilio#define	lockmgr_xlocked(lk)						\
97177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
98177957Sattilio
99177957Sattiliostatic void	 assert_lockmgr(struct lock_object *lock, int how);
100177957Sattilio#ifdef DDB
101177957Sattiliostatic void	 db_show_lockmgr(struct lock_object *lock);
102177957Sattilio#endif
103177957Sattiliostatic void	 lock_lockmgr(struct lock_object *lock, int how);
104177957Sattiliostatic int	 unlock_lockmgr(struct lock_object *lock);
105177957Sattilio
106164246Skmacystruct lock_class lock_class_lockmgr = {
107167366Sjhb	.lc_name = "lockmgr",
108177957Sattilio	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
109173733Sattilio	.lc_assert = assert_lockmgr,
110164246Skmacy#ifdef DDB
111167368Sjhb	.lc_ddb_show = db_show_lockmgr,
112164246Skmacy#endif
113167368Sjhb	.lc_lock = lock_lockmgr,
114177957Sattilio	.lc_unlock = unlock_lockmgr
115164246Skmacy};
116164246Skmacy
117177957Sattiliostatic __inline struct thread *
118177957Sattiliolockmgr_xholder(struct lock *lk)
119177957Sattilio{
120177957Sattilio	uintptr_t x;
121176249Sattilio
122177957Sattilio	x = lk->lk_lock;
123177957Sattilio	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
124177957Sattilio}
125177957Sattilio
12624269Speter/*
127177957Sattilio * It assumes sleepq_lock held and returns with this one unheld.
128177957Sattilio * It also assumes the generic interlock is sane and previously checked.
129177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the
130177957Sattilio * sleep.
13124269Speter */
132177957Sattiliostatic __inline int
133177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
134177957Sattilio    const char *wmesg, int pri, int timo, int queue)
135177957Sattilio{
136177957Sattilio	struct lock_class *class;
137177957Sattilio	int catch, error;
13824269Speter
139177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
140177957Sattilio	catch = (pri) ? (pri & PCATCH) : 0;
141177957Sattilio	pri &= PRIMASK;
142177957Sattilio	error = 0;
143177957Sattilio
144177957Sattilio	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
145177957Sattilio	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
146177957Sattilio
147177957Sattilio	if (flags & LK_INTERLOCK)
148177957Sattilio		class->lc_unlock(ilk);
149177957Sattilio	DROP_GIANT();
150177957Sattilio	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
151177957Sattilio	    SLEEPQ_INTERRUPTIBLE : 0), queue);
152177957Sattilio	if ((flags & LK_TIMELOCK) && timo)
153177957Sattilio		sleepq_set_timeout(&lk->lock_object, timo);
154177957Sattilio
155177957Sattilio	/*
156177957Sattilio	 * Decisional switch for real sleeping.
157177957Sattilio	 */
158177957Sattilio	if ((flags & LK_TIMELOCK) && timo && catch)
159177957Sattilio		error = sleepq_timedwait_sig(&lk->lock_object, pri);
160177957Sattilio	else if ((flags & LK_TIMELOCK) && timo)
161177957Sattilio		error = sleepq_timedwait(&lk->lock_object, pri);
162177957Sattilio	else if (catch)
163177957Sattilio		error = sleepq_wait_sig(&lk->lock_object, pri);
164177957Sattilio	else
165177957Sattilio		sleepq_wait(&lk->lock_object, pri);
166177957Sattilio	PICKUP_GIANT();
167177957Sattilio	if ((flags & LK_SLEEPFAIL) && error == 0)
168177957Sattilio		error = ENOLCK;
169177957Sattilio
170177957Sattilio	return (error);
171177957Sattilio}
172177957Sattilio
173177957Sattiliostatic __inline void
174177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line)
175177957Sattilio{
176177957Sattilio	uintptr_t v, x;
177177957Sattilio	int queue;
178177957Sattilio
179177957Sattilio	TD_LOCKS_DEC(curthread);
180177957Sattilio	TD_SLOCKS_DEC(curthread);
181177957Sattilio	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
182177957Sattilio
183177957Sattilio	for (;;) {
184177957Sattilio		x = lk->lk_lock;
185177957Sattilio
186177957Sattilio		/*
187177957Sattilio		 * If there is more than one shared lock held, just drop one
188177957Sattilio		 * and return.
189177957Sattilio		 */
190177957Sattilio		if (LK_SHARERS(x) > 1) {
191177957Sattilio			if (atomic_cmpset_ptr(&lk->lk_lock, x,
192177957Sattilio			    x - LK_ONE_SHARER))
193177957Sattilio				break;
194177957Sattilio			continue;
195177957Sattilio		}
196177957Sattilio
197177957Sattilio		/*
198177957Sattilio		 * If there are not waiters on the exclusive queue, drop the
199177957Sattilio		 * lock quickly.
200177957Sattilio		 */
201177957Sattilio		if ((x & LK_ALL_WAITERS) == 0) {
202177957Sattilio			MPASS(x == LK_SHARERS_LOCK(1));
203177957Sattilio			if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
204177957Sattilio			    LK_UNLOCKED))
205177957Sattilio				break;
206177957Sattilio			continue;
207177957Sattilio		}
208177957Sattilio
209177957Sattilio		/*
210177957Sattilio		 * We should have a sharer with waiters, so enter the hard
211177957Sattilio		 * path in order to handle wakeups correctly.
212177957Sattilio		 */
213177957Sattilio		sleepq_lock(&lk->lock_object);
214177957Sattilio		x = lk->lk_lock & LK_ALL_WAITERS;
215177957Sattilio		v = LK_UNLOCKED;
216177957Sattilio
217177957Sattilio		/*
218177957Sattilio		 * If the lock has exclusive waiters, give them preference in
219177957Sattilio		 * order to avoid deadlock with shared runners up.
220177957Sattilio		 */
221177957Sattilio		if (x & LK_EXCLUSIVE_WAITERS) {
222177957Sattilio			queue = SQ_EXCLUSIVE_QUEUE;
223177957Sattilio			v |= (x & LK_SHARED_WAITERS);
224177957Sattilio		} else {
225177957Sattilio			MPASS(x == LK_SHARED_WAITERS);
226177957Sattilio			queue = SQ_SHARED_QUEUE;
227177957Sattilio		}
228177957Sattilio
229177957Sattilio		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
230177957Sattilio		    v)) {
231177957Sattilio			sleepq_release(&lk->lock_object);
232177957Sattilio			continue;
233177957Sattilio		}
234177957Sattilio		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
235177957Sattilio		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
236177957Sattilio		    "exclusive");
237177957Sattilio		sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
238177957Sattilio		sleepq_release(&lk->lock_object);
239177957Sattilio		break;
240177957Sattilio	}
241177957Sattilio
242177957Sattilio	lock_profile_release_lock(&lk->lock_object);
243177957Sattilio}
244177957Sattilio
245177957Sattiliostatic void
246173733Sattilioassert_lockmgr(struct lock_object *lock, int what)
247173733Sattilio{
248173733Sattilio
249173733Sattilio	panic("lockmgr locks do not support assertions");
250173733Sattilio}
251173733Sattilio
252177957Sattiliostatic void
253167368Sjhblock_lockmgr(struct lock_object *lock, int how)
254167368Sjhb{
255167368Sjhb
256167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
257167368Sjhb}
258167368Sjhb
259177957Sattiliostatic int
260167368Sjhbunlock_lockmgr(struct lock_object *lock)
261167368Sjhb{
262167368Sjhb
263167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
264167368Sjhb}
265167368Sjhb
266177957Sattiliovoid
267177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
268177957Sattilio{
269177957Sattilio	int iflags;
27029653Sdyson
271177957Sattilio	MPASS((flags & ~LK_INIT_MASK) == 0);
27224269Speter
273177957Sattilio	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
274177957Sattilio	if ((flags & LK_NODUP) == 0)
275177957Sattilio		iflags |= LO_DUPOK;
276177957Sattilio	if (flags & LK_NOPROFILE)
277177957Sattilio		iflags |= LO_NOPROFILE;
278177957Sattilio	if ((flags & LK_NOWITNESS) == 0)
279177957Sattilio		iflags |= LO_WITNESS;
280177957Sattilio	if (flags & LK_QUIET)
281177957Sattilio		iflags |= LO_QUIET;
282177957Sattilio	iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
283177957Sattilio
284177957Sattilio	lk->lk_lock = LK_UNLOCKED;
285177957Sattilio	lk->lk_recurse = 0;
286177957Sattilio	lk->lk_timo = timo;
287177957Sattilio	lk->lk_pri = pri;
288177957Sattilio	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
289177957Sattilio	STACK_ZERO(lk);
29028345Sdyson}
29124269Speter
292177957Sattiliovoid
293177957Sattiliolockdestroy(struct lock *lk)
294177957Sattilio{
29542453Seivind
296177957Sattilio	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
297177957Sattilio	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
298177957Sattilio	lock_destroy(&lk->lock_object);
29928345Sdyson}
30028345Sdyson
301177957Sattilioint
302177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
303177957Sattilio    const char *wmesg, int pri, int timo, const char *file, int line)
304140711Sjeff{
305177957Sattilio	uint64_t waittime;
306177957Sattilio	struct lock_class *class;
307176320Sattilio	const char *iwmesg;
308177957Sattilio	uintptr_t tid, v, x;
309177957Sattilio	u_int op;
310177957Sattilio	int contested, error, ipri, itimo, queue;
311176320Sattilio
312177957Sattilio	contested = 0;
313177957Sattilio	error = 0;
314177957Sattilio	waittime = 0;
315177957Sattilio	tid = (uintptr_t)curthread;
316177957Sattilio	op = (flags & LK_TYPE_MASK);
317177957Sattilio	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
318177957Sattilio	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
319177957Sattilio	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
320176320Sattilio
321177957Sattilio	MPASS((flags & ~LK_TOTAL_MASK) == 0);
322178150Sattilio	KASSERT((op & (op - 1)) == 0,
323178150Sattilio	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
324177957Sattilio	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
325177957Sattilio	    (op != LK_DOWNGRADE && op != LK_RELEASE),
326177957Sattilio	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
327177957Sattilio	    __func__, file, line));
328177957Sattilio	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
329177957Sattilio	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
330177957Sattilio	    __func__, file, line));
33166615Sjasone
332177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
333177957Sattilio	if (panicstr != NULL) {
334177957Sattilio		if (flags & LK_INTERLOCK)
335177957Sattilio			class->lc_unlock(ilk);
336177957Sattilio		return (0);
33728345Sdyson	}
33828345Sdyson
339177957Sattilio	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
340177957Sattilio		op = LK_EXCLUSIVE;
341164159Skmacy
342177957Sattilio	switch (op) {
343177957Sattilio	case LK_SHARED:
344177957Sattilio		for (;;) {
345177957Sattilio			x = lk->lk_lock;
346174948Sattilio
347177957Sattilio			/*
348177957Sattilio			 * If no other thread has an exclusive lock, or
349177957Sattilio			 * no exclusive waiter is present, bump the count of
350177957Sattilio			 * sharers.  Since we have to preserve the state of
351177957Sattilio			 * waiters, if we fail to acquire the shared lock
352177957Sattilio			 * loop back and retry.
353177957Sattilio			 */
354177957Sattilio			if (LK_CAN_SHARE(x)) {
355177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
356177957Sattilio				    x + LK_ONE_SHARER))
357177957Sattilio					break;
358177957Sattilio				continue;
359177957Sattilio			}
360177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
361177957Sattilio			    &contested, &waittime);
36228345Sdyson
363177957Sattilio			/*
364177957Sattilio			 * If the lock is alredy held by curthread in
365177957Sattilio			 * exclusive way avoid a deadlock.
366177957Sattilio			 */
367177957Sattilio			if (LK_HOLDER(x) == tid) {
368177957Sattilio				LOCK_LOG2(lk,
369177957Sattilio				    "%s: %p alredy held in exclusive mode",
370177957Sattilio				    __func__, lk);
371177957Sattilio				error = EDEADLK;
372177957Sattilio				break;
373177957Sattilio			}
374140711Sjeff
375177957Sattilio			/*
376177957Sattilio			 * If the lock is expected to not sleep just give up
377177957Sattilio			 * and return.
378177957Sattilio			 */
379177957Sattilio			if (LK_TRYOP(flags)) {
380177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
381177957Sattilio				    __func__, lk);
382177957Sattilio				error = EBUSY;
383177957Sattilio				break;
384177957Sattilio			}
38528345Sdyson
386177957Sattilio			/*
387177957Sattilio			 * Acquire the sleepqueue chain lock because we
388177957Sattilio			 * probabilly will need to manipulate waiters flags.
389177957Sattilio			 */
390177957Sattilio			sleepq_lock(&lk->lock_object);
391177957Sattilio			x = lk->lk_lock;
392111463Sjeff
393177957Sattilio			/*
394177957Sattilio			 * if the lock can be acquired in shared mode, try
395177957Sattilio			 * again.
396177957Sattilio			 */
397177957Sattilio			if (LK_CAN_SHARE(x)) {
398177957Sattilio				sleepq_release(&lk->lock_object);
399177957Sattilio				continue;
400177957Sattilio			}
40124269Speter
402177957Sattilio			/*
403177957Sattilio			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
404177957Sattilio			 * loop back and retry.
405177957Sattilio			 */
406177957Sattilio			if ((x & LK_SHARED_WAITERS) == 0) {
407177957Sattilio				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
408177957Sattilio				    x | LK_SHARED_WAITERS)) {
409177957Sattilio					sleepq_release(&lk->lock_object);
410177957Sattilio					continue;
411177957Sattilio				}
412177957Sattilio				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
413177957Sattilio				    __func__, lk);
414177957Sattilio			}
41524269Speter
416177957Sattilio			/*
417177957Sattilio			 * As far as we have been unable to acquire the
418177957Sattilio			 * shared lock and the shared waiters flag is set,
419177957Sattilio			 * we will sleep.
420177957Sattilio			 */
421177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
422177957Sattilio			    SQ_SHARED_QUEUE);
423177957Sattilio			flags &= ~LK_INTERLOCK;
424177957Sattilio			if (error) {
425177957Sattilio				LOCK_LOG3(lk,
426177957Sattilio				    "%s: interrupted sleep for %p with %d",
427177957Sattilio				    __func__, lk, error);
428177957Sattilio				break;
429177957Sattilio			}
430177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
431177957Sattilio			    __func__, lk);
432177957Sattilio		}
433177957Sattilio		if (error == 0) {
434177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
435177957Sattilio			    contested, waittime, file, line);
436177957Sattilio			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
437176014Sattilio			    line);
438177957Sattilio			TD_LOCKS_INC(curthread);
439177957Sattilio			TD_SLOCKS_INC(curthread);
440177957Sattilio			STACK_SAVE(lk);
441177957Sattilio		}
442177957Sattilio		break;
443177957Sattilio	case LK_UPGRADE:
444177957Sattilio		_lockmgr_assert(lk, KA_SLOCKED, file, line);
445177957Sattilio		x = lk->lk_lock & LK_ALL_WAITERS;
446177957Sattilio
44744681Sjulian		/*
448177957Sattilio		 * Try to switch from one shared lock to an exclusive one.
449177957Sattilio		 * We need to preserve waiters flags during the operation.
45044681Sjulian		 */
451177957Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
452177957Sattilio		    tid | x)) {
453177957Sattilio			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
454177957Sattilio			    line);
455177957Sattilio			TD_SLOCKS_DEC(curthread);
45624269Speter			break;
45724269Speter		}
458177957Sattilio
45924269Speter		/*
460177957Sattilio		 * We have been unable to succeed in upgrading, so just
461177957Sattilio		 * give up the shared lock.
46224269Speter		 */
463177957Sattilio		wakeupshlk(lk, file, line);
46424269Speter
465177957Sattilio		/* FALLTHROUGH */
466177957Sattilio	case LK_EXCLUSIVE:
46724269Speter
46824269Speter		/*
469177957Sattilio		 * If curthread alredy holds the lock and this one is
470177957Sattilio		 * allowed to recurse, simply recurse on it.
47124269Speter		 */
472177957Sattilio		if (lockmgr_xlocked(lk)) {
473177957Sattilio			if ((flags & LK_CANRECURSE) == 0 &&
474177957Sattilio			    (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
475177957Sattilio
476177957Sattilio				/*
477177957Sattilio				 * If the lock is expected to not panic just
478177957Sattilio				 * give up and return.
479177957Sattilio				 */
480177957Sattilio				if (LK_TRYOP(flags)) {
481177957Sattilio					LOCK_LOG2(lk,
482177957Sattilio					    "%s: %p fails the try operation",
483177957Sattilio					    __func__, lk);
484177957Sattilio					error = EBUSY;
485177957Sattilio					break;
486177957Sattilio				}
487177957Sattilio				if (flags & LK_INTERLOCK)
488177957Sattilio					class->lc_unlock(ilk);
489177957Sattilio		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
490177957Sattilio				    __func__, iwmesg, file, line);
491177957Sattilio			}
492177957Sattilio			lk->lk_recurse++;
493177957Sattilio			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
494177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
495177957Sattilio			    lk->lk_recurse, file, line);
496177957Sattilio			TD_LOCKS_INC(curthread);
49724269Speter			break;
49824269Speter		}
499177957Sattilio
500177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
501177957Sattilio		    tid)) {
502177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
503177957Sattilio			    &contested, &waittime);
504177957Sattilio
50524269Speter			/*
506177957Sattilio			 * If the lock is expected to not sleep just give up
507177957Sattilio			 * and return.
50824269Speter			 */
509177957Sattilio			if (LK_TRYOP(flags)) {
510177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
511177957Sattilio				    __func__, lk);
512177957Sattilio				error = EBUSY;
513177957Sattilio				break;
514177957Sattilio			}
51534194Sdyson
516177957Sattilio			/*
517177957Sattilio			 * Acquire the sleepqueue chain lock because we
518177957Sattilio			 * probabilly will need to manipulate waiters flags.
519177957Sattilio			 */
520177957Sattilio			sleepq_lock(&lk->lock_object);
521177957Sattilio			x = lk->lk_lock;
522177957Sattilio			v = x & LK_ALL_WAITERS;
523177957Sattilio
524177957Sattilio			/*
525177957Sattilio			 * if the lock has been released while we spun on
526177957Sattilio			 * the sleepqueue chain lock just try again.
527177957Sattilio			 */
528177957Sattilio			if (x == LK_UNLOCKED) {
529177957Sattilio				sleepq_release(&lk->lock_object);
530177957Sattilio				continue;
531134365Skan			}
53224269Speter
53324269Speter			/*
534177957Sattilio			 * The lock can be in the state where there is a
535177957Sattilio			 * pending queue of waiters, but still no owner.
536177957Sattilio			 * This happens when the lock is contested and an
537177957Sattilio			 * owner is going to claim the lock.
538177957Sattilio			 * If curthread is the one successfully acquiring it
539177957Sattilio			 * claim lock ownership and return, preserving waiters
540177957Sattilio			 * flags.
54124269Speter			 */
542177957Sattilio			if (x == (LK_UNLOCKED | v)) {
543177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
544177957Sattilio				    tid | v)) {
545177957Sattilio					sleepq_release(&lk->lock_object);
546177957Sattilio					LOCK_LOG2(lk,
547177957Sattilio					    "%s: %p claimed by a new writer",
548177957Sattilio					    __func__, lk);
549177957Sattilio					break;
550177957Sattilio				}
551177957Sattilio				sleepq_release(&lk->lock_object);
552177957Sattilio				continue;
553177957Sattilio			}
554177957Sattilio
555177957Sattilio			/*
556177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
557177957Sattilio			 * fail, loop back and retry.
558177957Sattilio			 */
559177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
560177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
561177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
562177957Sattilio					sleepq_release(&lk->lock_object);
563177957Sattilio					continue;
564177957Sattilio				}
565177957Sattilio				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
566177957Sattilio				    __func__, lk);
567177957Sattilio			}
568177957Sattilio
569177957Sattilio			/*
570177957Sattilio			 * As far as we have been unable to acquire the
571177957Sattilio			 * exclusive lock and the exclusive waiters flag
572177957Sattilio			 * is set, we will sleep.
573177957Sattilio			 */
574177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
575177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
576177957Sattilio			flags &= ~LK_INTERLOCK;
577177957Sattilio			if (error) {
578177957Sattilio				LOCK_LOG3(lk,
579177957Sattilio				    "%s: interrupted sleep for %p with %d",
580177957Sattilio				    __func__, lk, error);
58148301Smckusick				break;
58248301Smckusick			}
583177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
584177957Sattilio			    __func__, lk);
58524269Speter		}
586177957Sattilio		if (error == 0) {
587177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
588177957Sattilio			    contested, waittime, file, line);
589177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
590177957Sattilio			    lk->lk_recurse, file, line);
591177957Sattilio			TD_LOCKS_INC(curthread);
592177957Sattilio			STACK_SAVE(lk);
593177957Sattilio		}
594177957Sattilio		break;
595177957Sattilio	case LK_DOWNGRADE:
596177957Sattilio		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
597177957Sattilio
59824269Speter		/*
599177957Sattilio		 * In order to preserve waiters flags, just spin.
60024269Speter		 */
601177957Sattilio		for (;;) {
602177957Sattilio			x = lk->lk_lock & LK_ALL_WAITERS;
603177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
604177957Sattilio			    LK_SHARERS_LOCK(1) | x)) {
605177957Sattilio				LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object,
606177957Sattilio				    0, 0, file, line);
607177957Sattilio				TD_SLOCKS_INC(curthread);
608177957Sattilio				break;
609177957Sattilio			}
610177957Sattilio			cpu_spinwait();
61124269Speter		}
61224269Speter		break;
613177957Sattilio	case LK_RELEASE:
614177957Sattilio		_lockmgr_assert(lk, KA_LOCKED, file, line);
615177957Sattilio		x = lk->lk_lock;
61624269Speter
617177957Sattilio		if ((x & LK_SHARE) == 0) {
618177957Sattilio
619177957Sattilio			/*
620177957Sattilio			 * As first option, treact the lock as if it has not
621177957Sattilio			 * any waiter.
622177957Sattilio			 * Fix-up the tid var if the lock has been disowned.
623177957Sattilio			 */
624177957Sattilio			if (LK_HOLDER(x) == LK_KERNPROC)
625177957Sattilio				tid = LK_KERNPROC;
626177957Sattilio			else
627177957Sattilio				TD_LOCKS_DEC(curthread);
628177957Sattilio			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
629177957Sattilio			    lk->lk_recurse, file, line);
630177957Sattilio
631177957Sattilio			/*
632177957Sattilio			 * The lock is held in exclusive mode.
633177957Sattilio			 * If the lock is recursed also, then unrecurse it.
634177957Sattilio			 */
635177957Sattilio			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
636177957Sattilio				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
637177957Sattilio				    lk);
638177957Sattilio				lk->lk_recurse--;
639177957Sattilio				break;
640176014Sattilio			}
641177957Sattilio			lock_profile_release_lock(&lk->lock_object);
642177957Sattilio
643177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
644177957Sattilio			    LK_UNLOCKED))
645177957Sattilio				break;
646177957Sattilio
647177957Sattilio			sleepq_lock(&lk->lock_object);
648177957Sattilio			x = lk->lk_lock & LK_ALL_WAITERS;
649177957Sattilio			v = LK_UNLOCKED;
650177957Sattilio
651177957Sattilio			/*
652177957Sattilio		 	 * If the lock has exclusive waiters, give them
653177957Sattilio			 * preference in order to avoid deadlock with
654177957Sattilio			 * shared runners up.
655177957Sattilio			 */
656177957Sattilio			if (x & LK_EXCLUSIVE_WAITERS) {
657177957Sattilio				queue = SQ_EXCLUSIVE_QUEUE;
658177957Sattilio				v |= (x & LK_SHARED_WAITERS);
659177957Sattilio			} else {
660177957Sattilio				MPASS(x == LK_SHARED_WAITERS);
661177957Sattilio				queue = SQ_SHARED_QUEUE;
66224269Speter			}
663149723Sssouhlal
664177957Sattilio			LOCK_LOG3(lk,
665177957Sattilio			    "%s: %p waking up threads on the %s queue",
666177957Sattilio			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
667177957Sattilio			    "exclusive");
668177957Sattilio			atomic_store_rel_ptr(&lk->lk_lock, v);
669177957Sattilio			sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
670177957Sattilio			sleepq_release(&lk->lock_object);
671177957Sattilio			break;
672177957Sattilio		} else
673177957Sattilio			wakeupshlk(lk, file, line);
67424269Speter		break;
675177957Sattilio	case LK_DRAIN:
67624269Speter
67724269Speter		/*
678177957Sattilio		 * Trying to drain a lock we alredy own will result in a
679177957Sattilio		 * deadlock.
68024269Speter		 */
681177957Sattilio		if (lockmgr_xlocked(lk)) {
682177957Sattilio			if (flags & LK_INTERLOCK)
683177957Sattilio				class->lc_unlock(ilk);
684177957Sattilio			panic("%s: draining %s with the lock held @ %s:%d\n",
685177957Sattilio			    __func__, iwmesg, file, line);
686177957Sattilio		}
68728345Sdyson
688177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
689177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
690177957Sattilio			    &contested, &waittime);
69124269Speter
692177957Sattilio			/*
693177957Sattilio			 * If the lock is expected to not sleep just give up
694177957Sattilio			 * and return.
695177957Sattilio			 */
696177957Sattilio			if (LK_TRYOP(flags)) {
697177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
698177957Sattilio				    __func__, lk);
699177957Sattilio				error = EBUSY;
700177957Sattilio				break;
701177957Sattilio			}
70224269Speter
703177957Sattilio			/*
704177957Sattilio			 * Acquire the sleepqueue chain lock because we
705177957Sattilio			 * probabilly will need to manipulate waiters flags.
706177957Sattilio			 */
707177957Sattilio			sleepq_lock(&lk->lock_object);
708177957Sattilio			x = lk->lk_lock;
709177957Sattilio			v = x & LK_ALL_WAITERS;
71029653Sdyson
711177957Sattilio			/*
712177957Sattilio			 * if the lock has been released while we spun on
713177957Sattilio			 * the sleepqueue chain lock just try again.
714177957Sattilio			 */
715177957Sattilio			if (x == LK_UNLOCKED) {
716177957Sattilio				sleepq_release(&lk->lock_object);
717177957Sattilio				continue;
718177957Sattilio			}
719176320Sattilio
720177957Sattilio			if (x == (LK_UNLOCKED | v)) {
721177957Sattilio				v = x;
722177957Sattilio				if (v & LK_EXCLUSIVE_WAITERS) {
723177957Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
724177957Sattilio					v &= ~LK_EXCLUSIVE_WAITERS;
725177957Sattilio				} else {
726177957Sattilio					MPASS(v & LK_SHARED_WAITERS);
727177957Sattilio					queue = SQ_SHARED_QUEUE;
728177957Sattilio					v &= ~LK_SHARED_WAITERS;
729177957Sattilio				}
730177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
731177957Sattilio					sleepq_release(&lk->lock_object);
732177957Sattilio					continue;
733177957Sattilio				}
734177957Sattilio				LOCK_LOG3(lk,
735177957Sattilio				"%s: %p waking up all threads on the %s queue",
736177957Sattilio				    __func__, lk, queue == SQ_SHARED_QUEUE ?
737177957Sattilio				    "shared" : "exclusive");
738177957Sattilio				sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
739177957Sattilio				    0, queue);
740177957Sattilio
741177957Sattilio				/*
742177957Sattilio				 * If shared waiters have been woken up we need
743177957Sattilio				 * to wait for one of them to acquire the lock
744177957Sattilio				 * before to set the exclusive waiters in
745177957Sattilio				 * order to avoid a deadlock.
746177957Sattilio				 */
747177957Sattilio				if (queue == SQ_SHARED_QUEUE) {
748177957Sattilio					for (v = lk->lk_lock;
749177957Sattilio					    (v & LK_SHARE) && !LK_SHARERS(v);
750177957Sattilio					    v = lk->lk_lock)
751177957Sattilio						cpu_spinwait();
752177957Sattilio				}
753177957Sattilio			}
754177957Sattilio
755177957Sattilio			/*
756177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
757177957Sattilio			 * fail, loop back and retry.
758177957Sattilio			 */
759177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
760177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
761177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
762177957Sattilio					sleepq_release(&lk->lock_object);
763177957Sattilio					continue;
764177957Sattilio				}
765177957Sattilio				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
766177957Sattilio				    __func__, lk);
767177957Sattilio			}
768177957Sattilio
769177957Sattilio			/*
770177957Sattilio			 * As far as we have been unable to acquire the
771177957Sattilio			 * exclusive lock and the exclusive waiters flag
772177957Sattilio			 * is set, we will sleep.
773177957Sattilio			 */
774177957Sattilio			if (flags & LK_INTERLOCK) {
775177957Sattilio				class->lc_unlock(ilk);
776177957Sattilio				flags &= ~LK_INTERLOCK;
777177957Sattilio			}
778177957Sattilio			DROP_GIANT();
779177957Sattilio			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
780177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
781177957Sattilio			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
782177957Sattilio			PICKUP_GIANT();
783177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
784177957Sattilio			    __func__, lk);
78529653Sdyson		}
786177957Sattilio
787177957Sattilio		if (error == 0) {
788177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
789177957Sattilio			    contested, waittime, file, line);
790177957Sattilio			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
791177957Sattilio			    lk->lk_recurse, file, line);
792177957Sattilio			TD_LOCKS_INC(curthread);
793177957Sattilio			STACK_SAVE(lk);
794177957Sattilio		}
795177957Sattilio		break;
796177957Sattilio	default:
797177957Sattilio		if (flags & LK_INTERLOCK)
798177957Sattilio			class->lc_unlock(ilk);
799177957Sattilio		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
80029653Sdyson	}
801177957Sattilio
802177957Sattilio	if (flags & LK_INTERLOCK)
803177957Sattilio		class->lc_unlock(ilk);
804177957Sattilio
805177957Sattilio	return (error);
80629653Sdyson}
80729653Sdyson
80829653Sdysonvoid
809177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line)
81029653Sdyson{
811177957Sattilio	uintptr_t tid, x;
812176014Sattilio
813177957Sattilio	tid = (uintptr_t)curthread;
814177957Sattilio	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
81529653Sdyson
816177957Sattilio	/*
817177957Sattilio	 * If the owner is alredy LK_KERNPROC just skip the whole operation.
818177957Sattilio	 */
819177957Sattilio	if (LK_HOLDER(lk->lk_lock) != tid)
820177957Sattilio		return;
82129653Sdyson
822177957Sattilio	/*
823177957Sattilio	 * In order to preserve waiters flags, just spin.
824177957Sattilio	 */
825177957Sattilio	for (;;) {
826177957Sattilio		x = lk->lk_lock & LK_ALL_WAITERS;
827177957Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, tid | x,
828177957Sattilio		    LK_KERNPROC | x)) {
829177957Sattilio			LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file,
830177957Sattilio			    line);
831177957Sattilio			TD_LOCKS_DEC(curthread);
832177957Sattilio			return;
833177957Sattilio		}
834177957Sattilio		cpu_spinwait();
835177957Sattilio	}
83666615Sjasone}
83766615Sjasone
838175166Sattiliovoid
839177957Sattiliolockmgr_printinfo(struct lock *lk)
840175166Sattilio{
841175166Sattilio	struct thread *td;
842177957Sattilio	uintptr_t x;
843175166Sattilio
844177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
845177957Sattilio		printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
846177957Sattilio	else if (lk->lk_lock & LK_SHARE)
847177957Sattilio		printf(" lock type %s: SHARED (count %ju)\n",
848177957Sattilio		    lk->lock_object.lo_name,
849177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
850177957Sattilio	else {
851177957Sattilio		td = lockmgr_xholder(lk);
852177957Sattilio		printf(" lock type %s: EXCL by thread %p (pid %d)\n",
853177957Sattilio		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
854177957Sattilio	}
855175166Sattilio
856177957Sattilio	x = lk->lk_lock;
857177957Sattilio	if (x & LK_EXCLUSIVE_WAITERS)
858177957Sattilio		printf(" with exclusive waiters pending\n");
859177957Sattilio	if (x & LK_SHARED_WAITERS)
860177957Sattilio		printf(" with shared waiters pending\n");
861177957Sattilio
862177957Sattilio	STACK_PRINT(lk);
863175166Sattilio}
864175166Sattilio
86529653Sdysonint
866177957Sattiliolockstatus(struct lock *lk)
86729653Sdyson{
868177957Sattilio	uintptr_t v, x;
869177957Sattilio	int ret;
87029653Sdyson
871177957Sattilio	ret = LK_SHARED;
872177957Sattilio	x = lk->lk_lock;
873177957Sattilio	v = LK_HOLDER(x);
874175635Sattilio
875177957Sattilio	if ((x & LK_SHARE) == 0) {
876177957Sattilio		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
877177957Sattilio			ret = LK_EXCLUSIVE;
87854444Seivind		else
879177957Sattilio			ret = LK_EXCLOTHER;
880177957Sattilio	} else if (x == LK_UNLOCKED)
881177957Sattilio		ret = 0;
88229653Sdyson
883177957Sattilio	return (ret);
88424269Speter}
885161322Sjhb
886176249Sattilio#ifdef INVARIANT_SUPPORT
887176249Sattilio#ifndef INVARIANTS
888177957Sattilio#undef	_lockmgr_assert
889176249Sattilio#endif
890176249Sattilio
891176249Sattiliovoid
892177957Sattilio_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
893176249Sattilio{
894176249Sattilio	int slocked = 0;
895176249Sattilio
896176249Sattilio	if (panicstr != NULL)
897176249Sattilio		return;
898176249Sattilio	switch (what) {
899176249Sattilio	case KA_SLOCKED:
900176249Sattilio	case KA_SLOCKED | KA_NOTRECURSED:
901176249Sattilio	case KA_SLOCKED | KA_RECURSED:
902176249Sattilio		slocked = 1;
903176249Sattilio	case KA_LOCKED:
904176249Sattilio	case KA_LOCKED | KA_NOTRECURSED:
905176249Sattilio	case KA_LOCKED | KA_RECURSED:
906177957Sattilio		if (lk->lk_lock == LK_UNLOCKED ||
907177957Sattilio		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
908177957Sattilio		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
909176249Sattilio			panic("Lock %s not %slocked @ %s:%d\n",
910177957Sattilio			    lk->lock_object.lo_name, slocked ? "share" : "",
911176249Sattilio			    file, line);
912177957Sattilio
913177957Sattilio		if ((lk->lk_lock & LK_SHARE) == 0) {
914177957Sattilio			if (lockmgr_recursed(lk)) {
915176249Sattilio				if (what & KA_NOTRECURSED)
916176249Sattilio					panic("Lock %s recursed @ %s:%d\n",
917177957Sattilio					    lk->lock_object.lo_name, file,
918177957Sattilio					    line);
919176249Sattilio			} else if (what & KA_RECURSED)
920176249Sattilio				panic("Lock %s not recursed @ %s:%d\n",
921177957Sattilio				    lk->lock_object.lo_name, file, line);
922176249Sattilio		}
923176249Sattilio		break;
924176249Sattilio	case KA_XLOCKED:
925176249Sattilio	case KA_XLOCKED | KA_NOTRECURSED:
926176249Sattilio	case KA_XLOCKED | KA_RECURSED:
927177957Sattilio		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
928176249Sattilio			panic("Lock %s not exclusively locked @ %s:%d\n",
929177957Sattilio			    lk->lock_object.lo_name, file, line);
930177957Sattilio		if (lockmgr_recursed(lk)) {
931176249Sattilio			if (what & KA_NOTRECURSED)
932176249Sattilio				panic("Lock %s recursed @ %s:%d\n",
933177957Sattilio				    lk->lock_object.lo_name, file, line);
934176249Sattilio		} else if (what & KA_RECURSED)
935176249Sattilio			panic("Lock %s not recursed @ %s:%d\n",
936177957Sattilio			    lk->lock_object.lo_name, file, line);
937176249Sattilio		break;
938176249Sattilio	case KA_UNLOCKED:
939177957Sattilio		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
940176249Sattilio			panic("Lock %s exclusively locked @ %s:%d\n",
941177957Sattilio			    lk->lock_object.lo_name, file, line);
942176249Sattilio		break;
943176249Sattilio	default:
944177957Sattilio		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
945177957Sattilio		    line);
946176249Sattilio	}
947176249Sattilio}
948177957Sattilio#endif
949176249Sattilio
950161322Sjhb#ifdef DDB
951161337Sjhbint
952161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
953161337Sjhb{
954177957Sattilio	struct lock *lk;
955161337Sjhb
956177957Sattilio	lk = td->td_wchan;
957161337Sjhb
958177957Sattilio	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
959177957Sattilio		return (0);
960177957Sattilio	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
961177957Sattilio	if (lk->lk_lock & LK_SHARE)
962177957Sattilio		db_printf("SHARED (count %ju)\n",
963177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
964177957Sattilio	else
965177957Sattilio		db_printf("EXCL\n");
966177957Sattilio	*ownerp = lockmgr_xholder(lk);
967161337Sjhb
968161337Sjhb	return (1);
969161337Sjhb}
970161337Sjhb
971177957Sattiliostatic void
972164246Skmacydb_show_lockmgr(struct lock_object *lock)
973161322Sjhb{
974161322Sjhb	struct thread *td;
975177957Sattilio	struct lock *lk;
976161322Sjhb
977177957Sattilio	lk = (struct lock *)lock;
978161322Sjhb
979168070Sjhb	db_printf(" state: ");
980177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
981161322Sjhb		db_printf("UNLOCKED\n");
982177957Sattilio	else if (lk->lk_lock & LK_SHARE)
983177957Sattilio		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
984177957Sattilio	else {
985177957Sattilio		td = lockmgr_xholder(lk);
986177957Sattilio		if (td == (struct thread *)LK_KERNPROC)
987177957Sattilio			db_printf("XLOCK: LK_KERNPROC\n");
988177957Sattilio		else
989177957Sattilio			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
990177957Sattilio			    td->td_tid, td->td_proc->p_pid,
991177957Sattilio			    td->td_proc->p_comm);
992177957Sattilio		if (lockmgr_recursed(lk))
993177957Sattilio			db_printf(" recursed: %d\n", lk->lk_recurse);
994177957Sattilio	}
995177957Sattilio	db_printf(" waiters: ");
996177957Sattilio	switch (lk->lk_lock & LK_ALL_WAITERS) {
997177957Sattilio	case LK_SHARED_WAITERS:
998177957Sattilio		db_printf("shared\n");
999177957Sattilio	case LK_EXCLUSIVE_WAITERS:
1000177957Sattilio		db_printf("exclusive\n");
1001177957Sattilio		break;
1002177957Sattilio	case LK_ALL_WAITERS:
1003177957Sattilio		db_printf("shared and exclusive\n");
1004177957Sattilio		break;
1005177957Sattilio	default:
1006177957Sattilio		db_printf("none\n");
1007177957Sattilio	}
1008161322Sjhb}
1009161322Sjhb#endif
1010