kern_lock.c revision 177982
1139804Simp/*-
2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3177957Sattilio * All rights reserved.
424269Speter *
524269Speter * Redistribution and use in source and binary forms, with or without
624269Speter * modification, are permitted provided that the following conditions
724269Speter * are met:
824269Speter * 1. Redistributions of source code must retain the above copyright
9177957Sattilio *    notice(s), this list of conditions and the following disclaimer as
10177957Sattilio *    the first lines of this file unmodified other than the possible
11177957Sattilio *    addition of one or more copyright notices.
1224269Speter * 2. Redistributions in binary form must reproduce the above copyright
13177957Sattilio *    notice(s), this list of conditions and the following disclaimer in the
1424269Speter *    documentation and/or other materials provided with the distribution.
1524269Speter *
16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19177957Sattilio * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26177957Sattilio * DAMAGE.
2724269Speter */
2824269Speter
29177957Sattilio#include "opt_ddb.h"
30177957Sattilio
31116182Sobrien#include <sys/cdefs.h>
32116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 177982 2008-04-07 14:46:38Z attilio $");
33116182Sobrien
3424269Speter#include <sys/param.h>
3584812Sjhb#include <sys/ktr.h>
3624269Speter#include <sys/lock.h>
37177957Sattilio#include <sys/lock_profile.h>
38102477Sbde#include <sys/lockmgr.h>
3967353Sjhb#include <sys/mutex.h>
40102477Sbde#include <sys/proc.h>
41177957Sattilio#include <sys/sleepqueue.h>
42148668Sjeff#ifdef DEBUG_LOCKS
43148668Sjeff#include <sys/stack.h>
44148668Sjeff#endif
45177957Sattilio#include <sys/systm.h>
4624269Speter
47177957Sattilio#include <machine/cpu.h>
48176014Sattilio
49161322Sjhb#ifdef DDB
50161322Sjhb#include <ddb/ddb.h>
51161322Sjhb#endif
52161322Sjhb
53177957SattilioCTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
54177957Sattilio    (LK_CANRECURSE | LK_NOSHARE));
55177957Sattilio
56177957Sattilio#define	SQ_EXCLUSIVE_QUEUE	0
57177957Sattilio#define	SQ_SHARED_QUEUE		1
58177957Sattilio
59177957Sattilio#ifndef INVARIANTS
60177957Sattilio#define	_lockmgr_assert(lk, what, file, line)
61177957Sattilio#define	TD_LOCKS_INC(td)
62177957Sattilio#define	TD_LOCKS_DEC(td)
63177957Sattilio#else
64177957Sattilio#define	TD_LOCKS_INC(td)	((td)->td_locks++)
65177957Sattilio#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
66177957Sattilio#endif
67177957Sattilio#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
68177957Sattilio#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
69177957Sattilio
70177957Sattilio#ifndef DEBUG_LOCKS
71177957Sattilio#define	STACK_PRINT(lk)
72177957Sattilio#define	STACK_SAVE(lk)
73177957Sattilio#define	STACK_ZERO(lk)
74177957Sattilio#else
75177957Sattilio#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
76177957Sattilio#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
77177957Sattilio#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
78177957Sattilio#endif
79177957Sattilio
80177957Sattilio#define	LOCK_LOG2(lk, string, arg1, arg2)				\
81177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
82177957Sattilio		CTR2(KTR_LOCK, (string), (arg1), (arg2))
83177957Sattilio#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
84177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
85177957Sattilio		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
86177957Sattilio
87177957Sattilio#define	LK_TRYOP(x)							\
88177957Sattilio	((x) & LK_NOWAIT)
89177957Sattilio#define	LK_CAN_SHARE(x)							\
90177957Sattilio	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
91177982Sattilio	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
92177957Sattilio
93177957Sattilio#define	lockmgr_disowned(lk)						\
94177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
95177957Sattilio
96177957Sattilio#define	lockmgr_xlocked(lk)						\
97177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
98177957Sattilio
99177957Sattiliostatic void	 assert_lockmgr(struct lock_object *lock, int how);
100177957Sattilio#ifdef DDB
101177957Sattiliostatic void	 db_show_lockmgr(struct lock_object *lock);
102177957Sattilio#endif
103177957Sattiliostatic void	 lock_lockmgr(struct lock_object *lock, int how);
104177957Sattiliostatic int	 unlock_lockmgr(struct lock_object *lock);
105177957Sattilio
106164246Skmacystruct lock_class lock_class_lockmgr = {
107167366Sjhb	.lc_name = "lockmgr",
108177957Sattilio	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
109173733Sattilio	.lc_assert = assert_lockmgr,
110164246Skmacy#ifdef DDB
111167368Sjhb	.lc_ddb_show = db_show_lockmgr,
112164246Skmacy#endif
113167368Sjhb	.lc_lock = lock_lockmgr,
114177957Sattilio	.lc_unlock = unlock_lockmgr
115164246Skmacy};
116164246Skmacy
117177957Sattiliostatic __inline struct thread *
118177957Sattiliolockmgr_xholder(struct lock *lk)
119177957Sattilio{
120177957Sattilio	uintptr_t x;
121176249Sattilio
122177957Sattilio	x = lk->lk_lock;
123177957Sattilio	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
124177957Sattilio}
125177957Sattilio
12624269Speter/*
127177957Sattilio * It assumes sleepq_lock held and returns with this one unheld.
128177957Sattilio * It also assumes the generic interlock is sane and previously checked.
129177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the
130177957Sattilio * sleep.
13124269Speter */
132177957Sattiliostatic __inline int
133177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
134177957Sattilio    const char *wmesg, int pri, int timo, int queue)
135177957Sattilio{
136177957Sattilio	struct lock_class *class;
137177957Sattilio	int catch, error;
13824269Speter
139177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
140177957Sattilio	catch = (pri) ? (pri & PCATCH) : 0;
141177957Sattilio	pri &= PRIMASK;
142177957Sattilio	error = 0;
143177957Sattilio
144177957Sattilio	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
145177957Sattilio	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
146177957Sattilio
147177957Sattilio	if (flags & LK_INTERLOCK)
148177957Sattilio		class->lc_unlock(ilk);
149177957Sattilio	DROP_GIANT();
150177957Sattilio	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
151177957Sattilio	    SLEEPQ_INTERRUPTIBLE : 0), queue);
152177957Sattilio	if ((flags & LK_TIMELOCK) && timo)
153177957Sattilio		sleepq_set_timeout(&lk->lock_object, timo);
154177957Sattilio
155177957Sattilio	/*
156177957Sattilio	 * Decisional switch for real sleeping.
157177957Sattilio	 */
158177957Sattilio	if ((flags & LK_TIMELOCK) && timo && catch)
159177957Sattilio		error = sleepq_timedwait_sig(&lk->lock_object, pri);
160177957Sattilio	else if ((flags & LK_TIMELOCK) && timo)
161177957Sattilio		error = sleepq_timedwait(&lk->lock_object, pri);
162177957Sattilio	else if (catch)
163177957Sattilio		error = sleepq_wait_sig(&lk->lock_object, pri);
164177957Sattilio	else
165177957Sattilio		sleepq_wait(&lk->lock_object, pri);
166177957Sattilio	PICKUP_GIANT();
167177957Sattilio	if ((flags & LK_SLEEPFAIL) && error == 0)
168177957Sattilio		error = ENOLCK;
169177957Sattilio
170177957Sattilio	return (error);
171177957Sattilio}
172177957Sattilio
173177957Sattiliostatic __inline void
174177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line)
175177957Sattilio{
176177957Sattilio	uintptr_t v, x;
177177957Sattilio	int queue;
178177957Sattilio
179177957Sattilio	TD_LOCKS_DEC(curthread);
180177957Sattilio	TD_SLOCKS_DEC(curthread);
181177957Sattilio	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
182177957Sattilio
183177957Sattilio	for (;;) {
184177957Sattilio		x = lk->lk_lock;
185177957Sattilio
186177957Sattilio		/*
187177957Sattilio		 * If there is more than one shared lock held, just drop one
188177957Sattilio		 * and return.
189177957Sattilio		 */
190177957Sattilio		if (LK_SHARERS(x) > 1) {
191177957Sattilio			if (atomic_cmpset_ptr(&lk->lk_lock, x,
192177957Sattilio			    x - LK_ONE_SHARER))
193177957Sattilio				break;
194177957Sattilio			continue;
195177957Sattilio		}
196177957Sattilio
197177957Sattilio		/*
198177957Sattilio		 * If there are not waiters on the exclusive queue, drop the
199177957Sattilio		 * lock quickly.
200177957Sattilio		 */
201177957Sattilio		if ((x & LK_ALL_WAITERS) == 0) {
202177957Sattilio			MPASS(x == LK_SHARERS_LOCK(1));
203177957Sattilio			if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
204177957Sattilio			    LK_UNLOCKED))
205177957Sattilio				break;
206177957Sattilio			continue;
207177957Sattilio		}
208177957Sattilio
209177957Sattilio		/*
210177957Sattilio		 * We should have a sharer with waiters, so enter the hard
211177957Sattilio		 * path in order to handle wakeups correctly.
212177957Sattilio		 */
213177957Sattilio		sleepq_lock(&lk->lock_object);
214177957Sattilio		x = lk->lk_lock & LK_ALL_WAITERS;
215177957Sattilio		v = LK_UNLOCKED;
216177957Sattilio
217177957Sattilio		/*
218177957Sattilio		 * If the lock has exclusive waiters, give them preference in
219177957Sattilio		 * order to avoid deadlock with shared runners up.
220177957Sattilio		 */
221177957Sattilio		if (x & LK_EXCLUSIVE_WAITERS) {
222177957Sattilio			queue = SQ_EXCLUSIVE_QUEUE;
223177957Sattilio			v |= (x & LK_SHARED_WAITERS);
224177957Sattilio		} else {
225177957Sattilio			MPASS(x == LK_SHARED_WAITERS);
226177957Sattilio			queue = SQ_SHARED_QUEUE;
227177957Sattilio		}
228177957Sattilio
229177957Sattilio		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
230177957Sattilio		    v)) {
231177957Sattilio			sleepq_release(&lk->lock_object);
232177957Sattilio			continue;
233177957Sattilio		}
234177957Sattilio		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
235177957Sattilio		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
236177957Sattilio		    "exclusive");
237177957Sattilio		sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
238177957Sattilio		sleepq_release(&lk->lock_object);
239177957Sattilio		break;
240177957Sattilio	}
241177957Sattilio
242177957Sattilio	lock_profile_release_lock(&lk->lock_object);
243177957Sattilio}
244177957Sattilio
245177957Sattiliostatic void
246173733Sattilioassert_lockmgr(struct lock_object *lock, int what)
247173733Sattilio{
248173733Sattilio
249173733Sattilio	panic("lockmgr locks do not support assertions");
250173733Sattilio}
251173733Sattilio
252177957Sattiliostatic void
253167368Sjhblock_lockmgr(struct lock_object *lock, int how)
254167368Sjhb{
255167368Sjhb
256167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
257167368Sjhb}
258167368Sjhb
259177957Sattiliostatic int
260167368Sjhbunlock_lockmgr(struct lock_object *lock)
261167368Sjhb{
262167368Sjhb
263167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
264167368Sjhb}
265167368Sjhb
266177957Sattiliovoid
267177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
268177957Sattilio{
269177957Sattilio	int iflags;
27029653Sdyson
271177957Sattilio	MPASS((flags & ~LK_INIT_MASK) == 0);
27224269Speter
273177957Sattilio	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
274177957Sattilio	if ((flags & LK_NODUP) == 0)
275177957Sattilio		iflags |= LO_DUPOK;
276177957Sattilio	if (flags & LK_NOPROFILE)
277177957Sattilio		iflags |= LO_NOPROFILE;
278177957Sattilio	if ((flags & LK_NOWITNESS) == 0)
279177957Sattilio		iflags |= LO_WITNESS;
280177957Sattilio	if (flags & LK_QUIET)
281177957Sattilio		iflags |= LO_QUIET;
282177957Sattilio	iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
283177957Sattilio
284177957Sattilio	lk->lk_lock = LK_UNLOCKED;
285177957Sattilio	lk->lk_recurse = 0;
286177957Sattilio	lk->lk_timo = timo;
287177957Sattilio	lk->lk_pri = pri;
288177957Sattilio	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
289177957Sattilio	STACK_ZERO(lk);
29028345Sdyson}
29124269Speter
292177957Sattiliovoid
293177957Sattiliolockdestroy(struct lock *lk)
294177957Sattilio{
29542453Seivind
296177957Sattilio	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
297177957Sattilio	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
298177957Sattilio	lock_destroy(&lk->lock_object);
29928345Sdyson}
30028345Sdyson
301177957Sattilioint
302177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
303177957Sattilio    const char *wmesg, int pri, int timo, const char *file, int line)
304140711Sjeff{
305177957Sattilio	uint64_t waittime;
306177957Sattilio	struct lock_class *class;
307176320Sattilio	const char *iwmesg;
308177957Sattilio	uintptr_t tid, v, x;
309177957Sattilio	u_int op;
310177957Sattilio	int contested, error, ipri, itimo, queue;
311176320Sattilio
312177957Sattilio	contested = 0;
313177957Sattilio	error = 0;
314177957Sattilio	waittime = 0;
315177957Sattilio	tid = (uintptr_t)curthread;
316177957Sattilio	op = (flags & LK_TYPE_MASK);
317177957Sattilio	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
318177957Sattilio	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
319177957Sattilio	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
320176320Sattilio
321177957Sattilio	MPASS((flags & ~LK_TOTAL_MASK) == 0);
322177957Sattilio	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
323177957Sattilio	    (op != LK_DOWNGRADE && op != LK_RELEASE),
324177957Sattilio	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
325177957Sattilio	    __func__, file, line));
326177957Sattilio	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
327177957Sattilio	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
328177957Sattilio	    __func__, file, line));
32966615Sjasone
330177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
331177957Sattilio	if (panicstr != NULL) {
332177957Sattilio		if (flags & LK_INTERLOCK)
333177957Sattilio			class->lc_unlock(ilk);
334177957Sattilio		return (0);
33528345Sdyson	}
33628345Sdyson
337177957Sattilio	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
338177957Sattilio		op = LK_EXCLUSIVE;
339164159Skmacy
340177957Sattilio	switch (op) {
341177957Sattilio	case LK_SHARED:
342177957Sattilio		for (;;) {
343177957Sattilio			x = lk->lk_lock;
344174948Sattilio
345177957Sattilio			/*
346177957Sattilio			 * If no other thread has an exclusive lock, or
347177957Sattilio			 * no exclusive waiter is present, bump the count of
348177957Sattilio			 * sharers.  Since we have to preserve the state of
349177957Sattilio			 * waiters, if we fail to acquire the shared lock
350177957Sattilio			 * loop back and retry.
351177957Sattilio			 */
352177957Sattilio			if (LK_CAN_SHARE(x)) {
353177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
354177957Sattilio				    x + LK_ONE_SHARER))
355177957Sattilio					break;
356177957Sattilio				continue;
357177957Sattilio			}
358177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
359177957Sattilio			    &contested, &waittime);
36028345Sdyson
361177957Sattilio			/*
362177957Sattilio			 * If the lock is alredy held by curthread in
363177957Sattilio			 * exclusive way avoid a deadlock.
364177957Sattilio			 */
365177957Sattilio			if (LK_HOLDER(x) == tid) {
366177957Sattilio				LOCK_LOG2(lk,
367177957Sattilio				    "%s: %p alredy held in exclusive mode",
368177957Sattilio				    __func__, lk);
369177957Sattilio				error = EDEADLK;
370177957Sattilio				break;
371177957Sattilio			}
372140711Sjeff
373177957Sattilio			/*
374177957Sattilio			 * If the lock is expected to not sleep just give up
375177957Sattilio			 * and return.
376177957Sattilio			 */
377177957Sattilio			if (LK_TRYOP(flags)) {
378177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
379177957Sattilio				    __func__, lk);
380177957Sattilio				error = EBUSY;
381177957Sattilio				break;
382177957Sattilio			}
38328345Sdyson
384177957Sattilio			/*
385177957Sattilio			 * Acquire the sleepqueue chain lock because we
386177957Sattilio			 * probabilly will need to manipulate waiters flags.
387177957Sattilio			 */
388177957Sattilio			sleepq_lock(&lk->lock_object);
389177957Sattilio			x = lk->lk_lock;
390111463Sjeff
391177957Sattilio			/*
392177957Sattilio			 * if the lock can be acquired in shared mode, try
393177957Sattilio			 * again.
394177957Sattilio			 */
395177957Sattilio			if (LK_CAN_SHARE(x)) {
396177957Sattilio				sleepq_release(&lk->lock_object);
397177957Sattilio				continue;
398177957Sattilio			}
39924269Speter
400177957Sattilio			/*
401177957Sattilio			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
402177957Sattilio			 * loop back and retry.
403177957Sattilio			 */
404177957Sattilio			if ((x & LK_SHARED_WAITERS) == 0) {
405177957Sattilio				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
406177957Sattilio				    x | LK_SHARED_WAITERS)) {
407177957Sattilio					sleepq_release(&lk->lock_object);
408177957Sattilio					continue;
409177957Sattilio				}
410177957Sattilio				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
411177957Sattilio				    __func__, lk);
412177957Sattilio			}
41324269Speter
414177957Sattilio			/*
415177957Sattilio			 * As far as we have been unable to acquire the
416177957Sattilio			 * shared lock and the shared waiters flag is set,
417177957Sattilio			 * we will sleep.
418177957Sattilio			 */
419177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
420177957Sattilio			    SQ_SHARED_QUEUE);
421177957Sattilio			flags &= ~LK_INTERLOCK;
422177957Sattilio			if (error) {
423177957Sattilio				LOCK_LOG3(lk,
424177957Sattilio				    "%s: interrupted sleep for %p with %d",
425177957Sattilio				    __func__, lk, error);
426177957Sattilio				break;
427177957Sattilio			}
428177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
429177957Sattilio			    __func__, lk);
430177957Sattilio		}
431177957Sattilio		if (error == 0) {
432177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
433177957Sattilio			    contested, waittime, file, line);
434177957Sattilio			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
435176014Sattilio			    line);
436177957Sattilio			TD_LOCKS_INC(curthread);
437177957Sattilio			TD_SLOCKS_INC(curthread);
438177957Sattilio			STACK_SAVE(lk);
439177957Sattilio		}
440177957Sattilio		break;
441177957Sattilio	case LK_UPGRADE:
442177957Sattilio		_lockmgr_assert(lk, KA_SLOCKED, file, line);
443177957Sattilio		x = lk->lk_lock & LK_ALL_WAITERS;
444177957Sattilio
44544681Sjulian		/*
446177957Sattilio		 * Try to switch from one shared lock to an exclusive one.
447177957Sattilio		 * We need to preserve waiters flags during the operation.
44844681Sjulian		 */
449177957Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
450177957Sattilio		    tid | x)) {
451177957Sattilio			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
452177957Sattilio			    line);
453177957Sattilio			TD_SLOCKS_DEC(curthread);
45424269Speter			break;
45524269Speter		}
456177957Sattilio
45724269Speter		/*
458177957Sattilio		 * We have been unable to succeed in upgrading, so just
459177957Sattilio		 * give up the shared lock.
46024269Speter		 */
461177957Sattilio		wakeupshlk(lk, file, line);
46224269Speter
463177957Sattilio		/* FALLTHROUGH */
464177957Sattilio	case LK_EXCLUSIVE:
46524269Speter
46624269Speter		/*
467177957Sattilio		 * If curthread alredy holds the lock and this one is
468177957Sattilio		 * allowed to recurse, simply recurse on it.
46924269Speter		 */
470177957Sattilio		if (lockmgr_xlocked(lk)) {
471177957Sattilio			if ((flags & LK_CANRECURSE) == 0 &&
472177957Sattilio			    (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
473177957Sattilio
474177957Sattilio				/*
475177957Sattilio				 * If the lock is expected to not panic just
476177957Sattilio				 * give up and return.
477177957Sattilio				 */
478177957Sattilio				if (LK_TRYOP(flags)) {
479177957Sattilio					LOCK_LOG2(lk,
480177957Sattilio					    "%s: %p fails the try operation",
481177957Sattilio					    __func__, lk);
482177957Sattilio					error = EBUSY;
483177957Sattilio					break;
484177957Sattilio				}
485177957Sattilio				if (flags & LK_INTERLOCK)
486177957Sattilio					class->lc_unlock(ilk);
487177957Sattilio		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
488177957Sattilio				    __func__, iwmesg, file, line);
489177957Sattilio			}
490177957Sattilio			lk->lk_recurse++;
491177957Sattilio			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
492177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
493177957Sattilio			    lk->lk_recurse, file, line);
494177957Sattilio			TD_LOCKS_INC(curthread);
49524269Speter			break;
49624269Speter		}
497177957Sattilio
498177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
499177957Sattilio		    tid)) {
500177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
501177957Sattilio			    &contested, &waittime);
502177957Sattilio
50324269Speter			/*
504177957Sattilio			 * If the lock is expected to not sleep just give up
505177957Sattilio			 * and return.
50624269Speter			 */
507177957Sattilio			if (LK_TRYOP(flags)) {
508177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
509177957Sattilio				    __func__, lk);
510177957Sattilio				error = EBUSY;
511177957Sattilio				break;
512177957Sattilio			}
51334194Sdyson
514177957Sattilio			/*
515177957Sattilio			 * Acquire the sleepqueue chain lock because we
516177957Sattilio			 * probabilly will need to manipulate waiters flags.
517177957Sattilio			 */
518177957Sattilio			sleepq_lock(&lk->lock_object);
519177957Sattilio			x = lk->lk_lock;
520177957Sattilio			v = x & LK_ALL_WAITERS;
521177957Sattilio
522177957Sattilio			/*
523177957Sattilio			 * if the lock has been released while we spun on
524177957Sattilio			 * the sleepqueue chain lock just try again.
525177957Sattilio			 */
526177957Sattilio			if (x == LK_UNLOCKED) {
527177957Sattilio				sleepq_release(&lk->lock_object);
528177957Sattilio				continue;
529134365Skan			}
53024269Speter
53124269Speter			/*
532177957Sattilio			 * The lock can be in the state where there is a
533177957Sattilio			 * pending queue of waiters, but still no owner.
534177957Sattilio			 * This happens when the lock is contested and an
535177957Sattilio			 * owner is going to claim the lock.
536177957Sattilio			 * If curthread is the one successfully acquiring it
537177957Sattilio			 * claim lock ownership and return, preserving waiters
538177957Sattilio			 * flags.
53924269Speter			 */
540177957Sattilio			if (x == (LK_UNLOCKED | v)) {
541177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
542177957Sattilio				    tid | v)) {
543177957Sattilio					sleepq_release(&lk->lock_object);
544177957Sattilio					LOCK_LOG2(lk,
545177957Sattilio					    "%s: %p claimed by a new writer",
546177957Sattilio					    __func__, lk);
547177957Sattilio					break;
548177957Sattilio				}
549177957Sattilio				sleepq_release(&lk->lock_object);
550177957Sattilio				continue;
551177957Sattilio			}
552177957Sattilio
553177957Sattilio			/*
554177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
555177957Sattilio			 * fail, loop back and retry.
556177957Sattilio			 */
557177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
558177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
559177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
560177957Sattilio					sleepq_release(&lk->lock_object);
561177957Sattilio					continue;
562177957Sattilio				}
563177957Sattilio				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
564177957Sattilio				    __func__, lk);
565177957Sattilio			}
566177957Sattilio
567177957Sattilio			/*
568177957Sattilio			 * As far as we have been unable to acquire the
569177957Sattilio			 * exclusive lock and the exclusive waiters flag
570177957Sattilio			 * is set, we will sleep.
571177957Sattilio			 */
572177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
573177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
574177957Sattilio			flags &= ~LK_INTERLOCK;
575177957Sattilio			if (error) {
576177957Sattilio				LOCK_LOG3(lk,
577177957Sattilio				    "%s: interrupted sleep for %p with %d",
578177957Sattilio				    __func__, lk, error);
57948301Smckusick				break;
58048301Smckusick			}
581177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
582177957Sattilio			    __func__, lk);
58324269Speter		}
584177957Sattilio		if (error == 0) {
585177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
586177957Sattilio			    contested, waittime, file, line);
587177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
588177957Sattilio			    lk->lk_recurse, file, line);
589177957Sattilio			TD_LOCKS_INC(curthread);
590177957Sattilio			STACK_SAVE(lk);
591177957Sattilio		}
592177957Sattilio		break;
593177957Sattilio	case LK_DOWNGRADE:
594177957Sattilio		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
595177957Sattilio
59624269Speter		/*
597177957Sattilio		 * In order to preserve waiters flags, just spin.
59824269Speter		 */
599177957Sattilio		for (;;) {
600177957Sattilio			x = lk->lk_lock & LK_ALL_WAITERS;
601177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
602177957Sattilio			    LK_SHARERS_LOCK(1) | x)) {
603177957Sattilio				LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object,
604177957Sattilio				    0, 0, file, line);
605177957Sattilio				TD_SLOCKS_INC(curthread);
606177957Sattilio				break;
607177957Sattilio			}
608177957Sattilio			cpu_spinwait();
60924269Speter		}
61024269Speter		break;
611177957Sattilio	case LK_RELEASE:
612177957Sattilio		_lockmgr_assert(lk, KA_LOCKED, file, line);
613177957Sattilio		x = lk->lk_lock;
61424269Speter
615177957Sattilio		if ((x & LK_SHARE) == 0) {
616177957Sattilio
617177957Sattilio			/*
618177957Sattilio			 * As first option, treact the lock as if it has not
619177957Sattilio			 * any waiter.
620177957Sattilio			 * Fix-up the tid var if the lock has been disowned.
621177957Sattilio			 */
622177957Sattilio			if (LK_HOLDER(x) == LK_KERNPROC)
623177957Sattilio				tid = LK_KERNPROC;
624177957Sattilio			else
625177957Sattilio				TD_LOCKS_DEC(curthread);
626177957Sattilio			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
627177957Sattilio			    lk->lk_recurse, file, line);
628177957Sattilio
629177957Sattilio			/*
630177957Sattilio			 * The lock is held in exclusive mode.
631177957Sattilio			 * If the lock is recursed also, then unrecurse it.
632177957Sattilio			 */
633177957Sattilio			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
634177957Sattilio				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
635177957Sattilio				    lk);
636177957Sattilio				lk->lk_recurse--;
637177957Sattilio				break;
638176014Sattilio			}
639177957Sattilio			lock_profile_release_lock(&lk->lock_object);
640177957Sattilio
641177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
642177957Sattilio			    LK_UNLOCKED))
643177957Sattilio				break;
644177957Sattilio
645177957Sattilio			sleepq_lock(&lk->lock_object);
646177957Sattilio			x = lk->lk_lock & LK_ALL_WAITERS;
647177957Sattilio			v = LK_UNLOCKED;
648177957Sattilio
649177957Sattilio			/*
650177957Sattilio		 	 * If the lock has exclusive waiters, give them
651177957Sattilio			 * preference in order to avoid deadlock with
652177957Sattilio			 * shared runners up.
653177957Sattilio			 */
654177957Sattilio			if (x & LK_EXCLUSIVE_WAITERS) {
655177957Sattilio				queue = SQ_EXCLUSIVE_QUEUE;
656177957Sattilio				v |= (x & LK_SHARED_WAITERS);
657177957Sattilio			} else {
658177957Sattilio				MPASS(x == LK_SHARED_WAITERS);
659177957Sattilio				queue = SQ_SHARED_QUEUE;
66024269Speter			}
661149723Sssouhlal
662177957Sattilio			LOCK_LOG3(lk,
663177957Sattilio			    "%s: %p waking up threads on the %s queue",
664177957Sattilio			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
665177957Sattilio			    "exclusive");
666177957Sattilio			atomic_store_rel_ptr(&lk->lk_lock, v);
667177957Sattilio			sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
668177957Sattilio			sleepq_release(&lk->lock_object);
669177957Sattilio			break;
670177957Sattilio		} else
671177957Sattilio			wakeupshlk(lk, file, line);
67224269Speter		break;
673177957Sattilio	case LK_DRAIN:
67424269Speter
67524269Speter		/*
676177957Sattilio		 * Trying to drain a lock we alredy own will result in a
677177957Sattilio		 * deadlock.
67824269Speter		 */
679177957Sattilio		if (lockmgr_xlocked(lk)) {
680177957Sattilio			if (flags & LK_INTERLOCK)
681177957Sattilio				class->lc_unlock(ilk);
682177957Sattilio			panic("%s: draining %s with the lock held @ %s:%d\n",
683177957Sattilio			    __func__, iwmesg, file, line);
684177957Sattilio		}
68528345Sdyson
686177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
687177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
688177957Sattilio			    &contested, &waittime);
68924269Speter
690177957Sattilio			/*
691177957Sattilio			 * If the lock is expected to not sleep just give up
692177957Sattilio			 * and return.
693177957Sattilio			 */
694177957Sattilio			if (LK_TRYOP(flags)) {
695177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
696177957Sattilio				    __func__, lk);
697177957Sattilio				error = EBUSY;
698177957Sattilio				break;
699177957Sattilio			}
70024269Speter
701177957Sattilio			/*
702177957Sattilio			 * Acquire the sleepqueue chain lock because we
703177957Sattilio			 * probabilly will need to manipulate waiters flags.
704177957Sattilio			 */
705177957Sattilio			sleepq_lock(&lk->lock_object);
706177957Sattilio			x = lk->lk_lock;
707177957Sattilio			v = x & LK_ALL_WAITERS;
70829653Sdyson
709177957Sattilio			/*
710177957Sattilio			 * if the lock has been released while we spun on
711177957Sattilio			 * the sleepqueue chain lock just try again.
712177957Sattilio			 */
713177957Sattilio			if (x == LK_UNLOCKED) {
714177957Sattilio				sleepq_release(&lk->lock_object);
715177957Sattilio				continue;
716177957Sattilio			}
717176320Sattilio
718177957Sattilio			if (x == (LK_UNLOCKED | v)) {
719177957Sattilio				v = x;
720177957Sattilio				if (v & LK_EXCLUSIVE_WAITERS) {
721177957Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
722177957Sattilio					v &= ~LK_EXCLUSIVE_WAITERS;
723177957Sattilio				} else {
724177957Sattilio					MPASS(v & LK_SHARED_WAITERS);
725177957Sattilio					queue = SQ_SHARED_QUEUE;
726177957Sattilio					v &= ~LK_SHARED_WAITERS;
727177957Sattilio				}
728177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
729177957Sattilio					sleepq_release(&lk->lock_object);
730177957Sattilio					continue;
731177957Sattilio				}
732177957Sattilio				LOCK_LOG3(lk,
733177957Sattilio				"%s: %p waking up all threads on the %s queue",
734177957Sattilio				    __func__, lk, queue == SQ_SHARED_QUEUE ?
735177957Sattilio				    "shared" : "exclusive");
736177957Sattilio				sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
737177957Sattilio				    0, queue);
738177957Sattilio
739177957Sattilio				/*
740177957Sattilio				 * If shared waiters have been woken up we need
741177957Sattilio				 * to wait for one of them to acquire the lock
742177957Sattilio				 * before to set the exclusive waiters in
743177957Sattilio				 * order to avoid a deadlock.
744177957Sattilio				 */
745177957Sattilio				if (queue == SQ_SHARED_QUEUE) {
746177957Sattilio					for (v = lk->lk_lock;
747177957Sattilio					    (v & LK_SHARE) && !LK_SHARERS(v);
748177957Sattilio					    v = lk->lk_lock)
749177957Sattilio						cpu_spinwait();
750177957Sattilio				}
751177957Sattilio			}
752177957Sattilio
753177957Sattilio			/*
754177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
755177957Sattilio			 * fail, loop back and retry.
756177957Sattilio			 */
757177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
758177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
759177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
760177957Sattilio					sleepq_release(&lk->lock_object);
761177957Sattilio					continue;
762177957Sattilio				}
763177957Sattilio				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
764177957Sattilio				    __func__, lk);
765177957Sattilio			}
766177957Sattilio
767177957Sattilio			/*
768177957Sattilio			 * As far as we have been unable to acquire the
769177957Sattilio			 * exclusive lock and the exclusive waiters flag
770177957Sattilio			 * is set, we will sleep.
771177957Sattilio			 */
772177957Sattilio			if (flags & LK_INTERLOCK) {
773177957Sattilio				class->lc_unlock(ilk);
774177957Sattilio				flags &= ~LK_INTERLOCK;
775177957Sattilio			}
776177957Sattilio			DROP_GIANT();
777177957Sattilio			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
778177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
779177957Sattilio			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
780177957Sattilio			PICKUP_GIANT();
781177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
782177957Sattilio			    __func__, lk);
78329653Sdyson		}
784177957Sattilio
785177957Sattilio		if (error == 0) {
786177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
787177957Sattilio			    contested, waittime, file, line);
788177957Sattilio			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
789177957Sattilio			    lk->lk_recurse, file, line);
790177957Sattilio			TD_LOCKS_INC(curthread);
791177957Sattilio			STACK_SAVE(lk);
792177957Sattilio		}
793177957Sattilio		break;
794177957Sattilio	default:
795177957Sattilio		if (flags & LK_INTERLOCK)
796177957Sattilio			class->lc_unlock(ilk);
797177957Sattilio		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
79829653Sdyson	}
799177957Sattilio
800177957Sattilio	/*
801177957Sattilio	 * We could have exited from the switch without reacquiring the
802177957Sattilio	 * interlock, so we need to check for the interlock ownership.
803177957Sattilio	 */
804177957Sattilio	if (flags & LK_INTERLOCK)
805177957Sattilio		class->lc_unlock(ilk);
806177957Sattilio
807177957Sattilio	return (error);
80829653Sdyson}
80929653Sdyson
81029653Sdysonvoid
811177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line)
81229653Sdyson{
813177957Sattilio	uintptr_t tid, x;
814176014Sattilio
815177957Sattilio	tid = (uintptr_t)curthread;
816177957Sattilio	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
81729653Sdyson
818177957Sattilio	/*
819177957Sattilio	 * If the owner is alredy LK_KERNPROC just skip the whole operation.
820177957Sattilio	 */
821177957Sattilio	if (LK_HOLDER(lk->lk_lock) != tid)
822177957Sattilio		return;
82329653Sdyson
824177957Sattilio	/*
825177957Sattilio	 * In order to preserve waiters flags, just spin.
826177957Sattilio	 */
827177957Sattilio	for (;;) {
828177957Sattilio		x = lk->lk_lock & LK_ALL_WAITERS;
829177957Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, tid | x,
830177957Sattilio		    LK_KERNPROC | x)) {
831177957Sattilio			LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file,
832177957Sattilio			    line);
833177957Sattilio			TD_LOCKS_DEC(curthread);
834177957Sattilio			return;
835177957Sattilio		}
836177957Sattilio		cpu_spinwait();
837177957Sattilio	}
83866615Sjasone}
83966615Sjasone
840175166Sattiliovoid
841177957Sattiliolockmgr_printinfo(struct lock *lk)
842175166Sattilio{
843175166Sattilio	struct thread *td;
844177957Sattilio	uintptr_t x;
845175166Sattilio
846177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
847177957Sattilio		printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
848177957Sattilio	else if (lk->lk_lock & LK_SHARE)
849177957Sattilio		printf(" lock type %s: SHARED (count %ju)\n",
850177957Sattilio		    lk->lock_object.lo_name,
851177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
852177957Sattilio	else {
853177957Sattilio		td = lockmgr_xholder(lk);
854177957Sattilio		printf(" lock type %s: EXCL by thread %p (pid %d)\n",
855177957Sattilio		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
856177957Sattilio	}
857175166Sattilio
858177957Sattilio	x = lk->lk_lock;
859177957Sattilio	if (x & LK_EXCLUSIVE_WAITERS)
860177957Sattilio		printf(" with exclusive waiters pending\n");
861177957Sattilio	if (x & LK_SHARED_WAITERS)
862177957Sattilio		printf(" with shared waiters pending\n");
863177957Sattilio
864177957Sattilio	STACK_PRINT(lk);
865175166Sattilio}
866175166Sattilio
86729653Sdysonint
868177957Sattiliolockstatus(struct lock *lk)
86929653Sdyson{
870177957Sattilio	uintptr_t v, x;
871177957Sattilio	int ret;
87229653Sdyson
873177957Sattilio	ret = LK_SHARED;
874177957Sattilio	x = lk->lk_lock;
875177957Sattilio	v = LK_HOLDER(x);
876175635Sattilio
877177957Sattilio	if ((x & LK_SHARE) == 0) {
878177957Sattilio		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
879177957Sattilio			ret = LK_EXCLUSIVE;
88054444Seivind		else
881177957Sattilio			ret = LK_EXCLOTHER;
882177957Sattilio	} else if (x == LK_UNLOCKED)
883177957Sattilio		ret = 0;
88429653Sdyson
885177957Sattilio	return (ret);
88624269Speter}
887161322Sjhb
888176249Sattilio#ifdef INVARIANT_SUPPORT
889176249Sattilio#ifndef INVARIANTS
890177957Sattilio#undef	_lockmgr_assert
891176249Sattilio#endif
892176249Sattilio
893176249Sattiliovoid
894177957Sattilio_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
895176249Sattilio{
896176249Sattilio	int slocked = 0;
897176249Sattilio
898176249Sattilio	if (panicstr != NULL)
899176249Sattilio		return;
900176249Sattilio	switch (what) {
901176249Sattilio	case KA_SLOCKED:
902176249Sattilio	case KA_SLOCKED | KA_NOTRECURSED:
903176249Sattilio	case KA_SLOCKED | KA_RECURSED:
904176249Sattilio		slocked = 1;
905176249Sattilio	case KA_LOCKED:
906176249Sattilio	case KA_LOCKED | KA_NOTRECURSED:
907176249Sattilio	case KA_LOCKED | KA_RECURSED:
908177957Sattilio		if (lk->lk_lock == LK_UNLOCKED ||
909177957Sattilio		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
910177957Sattilio		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
911176249Sattilio			panic("Lock %s not %slocked @ %s:%d\n",
912177957Sattilio			    lk->lock_object.lo_name, slocked ? "share" : "",
913176249Sattilio			    file, line);
914177957Sattilio
915177957Sattilio		if ((lk->lk_lock & LK_SHARE) == 0) {
916177957Sattilio			if (lockmgr_recursed(lk)) {
917176249Sattilio				if (what & KA_NOTRECURSED)
918176249Sattilio					panic("Lock %s recursed @ %s:%d\n",
919177957Sattilio					    lk->lock_object.lo_name, file,
920177957Sattilio					    line);
921176249Sattilio			} else if (what & KA_RECURSED)
922176249Sattilio				panic("Lock %s not recursed @ %s:%d\n",
923177957Sattilio				    lk->lock_object.lo_name, file, line);
924176249Sattilio		}
925176249Sattilio		break;
926176249Sattilio	case KA_XLOCKED:
927176249Sattilio	case KA_XLOCKED | KA_NOTRECURSED:
928176249Sattilio	case KA_XLOCKED | KA_RECURSED:
929177957Sattilio		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
930176249Sattilio			panic("Lock %s not exclusively locked @ %s:%d\n",
931177957Sattilio			    lk->lock_object.lo_name, file, line);
932177957Sattilio		if (lockmgr_recursed(lk)) {
933176249Sattilio			if (what & KA_NOTRECURSED)
934176249Sattilio				panic("Lock %s recursed @ %s:%d\n",
935177957Sattilio				    lk->lock_object.lo_name, file, line);
936176249Sattilio		} else if (what & KA_RECURSED)
937176249Sattilio			panic("Lock %s not recursed @ %s:%d\n",
938177957Sattilio			    lk->lock_object.lo_name, file, line);
939176249Sattilio		break;
940176249Sattilio	case KA_UNLOCKED:
941177957Sattilio		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
942176249Sattilio			panic("Lock %s exclusively locked @ %s:%d\n",
943177957Sattilio			    lk->lock_object.lo_name, file, line);
944176249Sattilio		break;
945176249Sattilio	default:
946177957Sattilio		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
947177957Sattilio		    line);
948176249Sattilio	}
949176249Sattilio}
950177957Sattilio#endif
951176249Sattilio
952161322Sjhb#ifdef DDB
953161337Sjhbint
954161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
955161337Sjhb{
956177957Sattilio	struct lock *lk;
957161337Sjhb
958177957Sattilio	lk = td->td_wchan;
959161337Sjhb
960177957Sattilio	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
961177957Sattilio		return (0);
962177957Sattilio	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
963177957Sattilio	if (lk->lk_lock & LK_SHARE)
964177957Sattilio		db_printf("SHARED (count %ju)\n",
965177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
966177957Sattilio	else
967177957Sattilio		db_printf("EXCL\n");
968177957Sattilio	*ownerp = lockmgr_xholder(lk);
969161337Sjhb
970161337Sjhb	return (1);
971161337Sjhb}
972161337Sjhb
973177957Sattiliostatic void
974164246Skmacydb_show_lockmgr(struct lock_object *lock)
975161322Sjhb{
976161322Sjhb	struct thread *td;
977177957Sattilio	struct lock *lk;
978161322Sjhb
979177957Sattilio	lk = (struct lock *)lock;
980161322Sjhb
981168070Sjhb	db_printf(" state: ");
982177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
983161322Sjhb		db_printf("UNLOCKED\n");
984177957Sattilio	else if (lk->lk_lock & LK_SHARE)
985177957Sattilio		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
986177957Sattilio	else {
987177957Sattilio		td = lockmgr_xholder(lk);
988177957Sattilio		if (td == (struct thread *)LK_KERNPROC)
989177957Sattilio			db_printf("XLOCK: LK_KERNPROC\n");
990177957Sattilio		else
991177957Sattilio			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
992177957Sattilio			    td->td_tid, td->td_proc->p_pid,
993177957Sattilio			    td->td_proc->p_comm);
994177957Sattilio		if (lockmgr_recursed(lk))
995177957Sattilio			db_printf(" recursed: %d\n", lk->lk_recurse);
996177957Sattilio	}
997177957Sattilio	db_printf(" waiters: ");
998177957Sattilio	switch (lk->lk_lock & LK_ALL_WAITERS) {
999177957Sattilio	case LK_SHARED_WAITERS:
1000177957Sattilio		db_printf("shared\n");
1001177957Sattilio	case LK_EXCLUSIVE_WAITERS:
1002177957Sattilio		db_printf("exclusive\n");
1003177957Sattilio		break;
1004177957Sattilio	case LK_ALL_WAITERS:
1005177957Sattilio		db_printf("shared and exclusive\n");
1006177957Sattilio		break;
1007177957Sattilio	default:
1008177957Sattilio		db_printf("none\n");
1009177957Sattilio	}
1010161322Sjhb}
1011161322Sjhb#endif
1012