kern_lock.c revision 211531
1139804Simp/*-
2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3177957Sattilio * All rights reserved.
424269Speter *
524269Speter * Redistribution and use in source and binary forms, with or without
624269Speter * modification, are permitted provided that the following conditions
724269Speter * are met:
824269Speter * 1. Redistributions of source code must retain the above copyright
9177957Sattilio *    notice(s), this list of conditions and the following disclaimer as
10177957Sattilio *    the first lines of this file unmodified other than the possible
11177957Sattilio *    addition of one or more copyright notices.
1224269Speter * 2. Redistributions in binary form must reproduce the above copyright
13177957Sattilio *    notice(s), this list of conditions and the following disclaimer in the
1424269Speter *    documentation and/or other materials provided with the distribution.
1524269Speter *
16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19177957Sattilio * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26177957Sattilio * DAMAGE.
2724269Speter */
2824269Speter
29194317Sattilio#include "opt_adaptive_lockmgrs.h"
30177957Sattilio#include "opt_ddb.h"
31192853Ssson#include "opt_kdtrace.h"
32177957Sattilio
33116182Sobrien#include <sys/cdefs.h>
34116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 211531 2010-08-20 19:46:50Z jhb $");
35116182Sobrien
3624269Speter#include <sys/param.h>
3784812Sjhb#include <sys/ktr.h>
38194317Sattilio#include <sys/linker_set.h>
3924269Speter#include <sys/lock.h>
40177957Sattilio#include <sys/lock_profile.h>
41102477Sbde#include <sys/lockmgr.h>
4267353Sjhb#include <sys/mutex.h>
43102477Sbde#include <sys/proc.h>
44177957Sattilio#include <sys/sleepqueue.h>
45148668Sjeff#ifdef DEBUG_LOCKS
46148668Sjeff#include <sys/stack.h>
47148668Sjeff#endif
48194317Sattilio#include <sys/sysctl.h>
49177957Sattilio#include <sys/systm.h>
5024269Speter
51177957Sattilio#include <machine/cpu.h>
52176014Sattilio
53161322Sjhb#ifdef DDB
54161322Sjhb#include <ddb/ddb.h>
55161322Sjhb#endif
56161322Sjhb
57194317SattilioCTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58194317Sattilio    (LK_ADAPTIVE | LK_NOSHARE));
59194317SattilioCTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60194317Sattilio    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61177957Sattilio
62177957Sattilio#define	SQ_EXCLUSIVE_QUEUE	0
63177957Sattilio#define	SQ_SHARED_QUEUE		1
64177957Sattilio
65177957Sattilio#ifndef INVARIANTS
66177957Sattilio#define	_lockmgr_assert(lk, what, file, line)
67177957Sattilio#define	TD_LOCKS_INC(td)
68177957Sattilio#define	TD_LOCKS_DEC(td)
69177957Sattilio#else
70177957Sattilio#define	TD_LOCKS_INC(td)	((td)->td_locks++)
71177957Sattilio#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
72177957Sattilio#endif
73177957Sattilio#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
74177957Sattilio#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
75177957Sattilio
76177957Sattilio#ifndef DEBUG_LOCKS
77177957Sattilio#define	STACK_PRINT(lk)
78177957Sattilio#define	STACK_SAVE(lk)
79177957Sattilio#define	STACK_ZERO(lk)
80177957Sattilio#else
81177957Sattilio#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
82177957Sattilio#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
83177957Sattilio#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
84177957Sattilio#endif
85177957Sattilio
86177957Sattilio#define	LOCK_LOG2(lk, string, arg1, arg2)				\
87177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
88177957Sattilio		CTR2(KTR_LOCK, (string), (arg1), (arg2))
89177957Sattilio#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
90177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
91177957Sattilio		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92177957Sattilio
93178159Sattilio#define	GIANT_DECLARE							\
94178159Sattilio	int _i = 0;							\
95178159Sattilio	WITNESS_SAVE_DECL(Giant)
96178159Sattilio#define	GIANT_RESTORE() do {						\
97178159Sattilio	if (_i > 0) {							\
98178159Sattilio		while (_i--)						\
99178159Sattilio			mtx_lock(&Giant);				\
100178159Sattilio		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
101178159Sattilio	}								\
102178159Sattilio} while (0)
103178159Sattilio#define	GIANT_SAVE() do {						\
104178159Sattilio	if (mtx_owned(&Giant)) {					\
105178159Sattilio		WITNESS_SAVE(&Giant.lock_object, Giant);		\
106178159Sattilio		while (mtx_owned(&Giant)) {				\
107178159Sattilio			_i++;						\
108178159Sattilio			mtx_unlock(&Giant);				\
109178159Sattilio		}							\
110178159Sattilio	}								\
111178159Sattilio} while (0)
112178159Sattilio
113177957Sattilio#define	LK_CAN_SHARE(x)							\
114177957Sattilio	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
115194317Sattilio	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
116177982Sattilio	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117178159Sattilio#define	LK_TRYOP(x)							\
118178159Sattilio	((x) & LK_NOWAIT)
119177957Sattilio
120178159Sattilio#define	LK_CAN_WITNESS(x)						\
121178159Sattilio	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122178159Sattilio#define	LK_TRYWIT(x)							\
123178159Sattilio	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124178159Sattilio
125194317Sattilio#define	LK_CAN_ADAPT(lk, f)						\
126194317Sattilio	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
127194317Sattilio	((f) & LK_SLEEPFAIL) == 0)
128194317Sattilio
129177957Sattilio#define	lockmgr_disowned(lk)						\
130177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131177957Sattilio
132177957Sattilio#define	lockmgr_xlocked(lk)						\
133177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134177957Sattilio
135177957Sattiliostatic void	 assert_lockmgr(struct lock_object *lock, int how);
136177957Sattilio#ifdef DDB
137177957Sattiliostatic void	 db_show_lockmgr(struct lock_object *lock);
138177957Sattilio#endif
139177957Sattiliostatic void	 lock_lockmgr(struct lock_object *lock, int how);
140192853Ssson#ifdef KDTRACE_HOOKS
141192853Sssonstatic int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
142192853Ssson#endif
143177957Sattiliostatic int	 unlock_lockmgr(struct lock_object *lock);
144177957Sattilio
145164246Skmacystruct lock_class lock_class_lockmgr = {
146167366Sjhb	.lc_name = "lockmgr",
147177957Sattilio	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148173733Sattilio	.lc_assert = assert_lockmgr,
149164246Skmacy#ifdef DDB
150167368Sjhb	.lc_ddb_show = db_show_lockmgr,
151164246Skmacy#endif
152167368Sjhb	.lc_lock = lock_lockmgr,
153192853Ssson	.lc_unlock = unlock_lockmgr,
154192853Ssson#ifdef KDTRACE_HOOKS
155192853Ssson	.lc_owner = owner_lockmgr,
156192853Ssson#endif
157164246Skmacy};
158164246Skmacy
159194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
160194317Sattiliostatic u_int alk_retries = 10;
161194317Sattiliostatic u_int alk_loops = 10000;
162194317SattilioSYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165194317Sattilio#endif
166194317Sattilio
167177957Sattiliostatic __inline struct thread *
168177957Sattiliolockmgr_xholder(struct lock *lk)
169177957Sattilio{
170177957Sattilio	uintptr_t x;
171176249Sattilio
172177957Sattilio	x = lk->lk_lock;
173177957Sattilio	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174177957Sattilio}
175177957Sattilio
17624269Speter/*
177177957Sattilio * It assumes sleepq_lock held and returns with this one unheld.
178177957Sattilio * It also assumes the generic interlock is sane and previously checked.
179177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the
180177957Sattilio * sleep.
18124269Speter */
182177957Sattiliostatic __inline int
183177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184177957Sattilio    const char *wmesg, int pri, int timo, int queue)
185177957Sattilio{
186178159Sattilio	GIANT_DECLARE;
187177957Sattilio	struct lock_class *class;
188177957Sattilio	int catch, error;
18924269Speter
190177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191179306Sattilio	catch = pri & PCATCH;
192177957Sattilio	pri &= PRIMASK;
193177957Sattilio	error = 0;
194177957Sattilio
195177957Sattilio	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196177957Sattilio	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197177957Sattilio
198177957Sattilio	if (flags & LK_INTERLOCK)
199177957Sattilio		class->lc_unlock(ilk);
200200447Sattilio	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
201200447Sattilio		lk->lk_exslpfail++;
202178159Sattilio	GIANT_SAVE();
203177957Sattilio	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
204177957Sattilio	    SLEEPQ_INTERRUPTIBLE : 0), queue);
205177957Sattilio	if ((flags & LK_TIMELOCK) && timo)
206177957Sattilio		sleepq_set_timeout(&lk->lock_object, timo);
207177957Sattilio
208177957Sattilio	/*
209177957Sattilio	 * Decisional switch for real sleeping.
210177957Sattilio	 */
211177957Sattilio	if ((flags & LK_TIMELOCK) && timo && catch)
212177957Sattilio		error = sleepq_timedwait_sig(&lk->lock_object, pri);
213177957Sattilio	else if ((flags & LK_TIMELOCK) && timo)
214177957Sattilio		error = sleepq_timedwait(&lk->lock_object, pri);
215177957Sattilio	else if (catch)
216177957Sattilio		error = sleepq_wait_sig(&lk->lock_object, pri);
217177957Sattilio	else
218177957Sattilio		sleepq_wait(&lk->lock_object, pri);
219178159Sattilio	GIANT_RESTORE();
220177957Sattilio	if ((flags & LK_SLEEPFAIL) && error == 0)
221177957Sattilio		error = ENOLCK;
222177957Sattilio
223177957Sattilio	return (error);
224177957Sattilio}
225177957Sattilio
226181334Sjhbstatic __inline int
227177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line)
228177957Sattilio{
229177957Sattilio	uintptr_t v, x;
230200447Sattilio	u_int realexslp;
231181334Sjhb	int queue, wakeup_swapper;
232177957Sattilio
233177957Sattilio	TD_LOCKS_DEC(curthread);
234177957Sattilio	TD_SLOCKS_DEC(curthread);
235178159Sattilio	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
236177957Sattilio	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
237177957Sattilio
238181334Sjhb	wakeup_swapper = 0;
239177957Sattilio	for (;;) {
240177957Sattilio		x = lk->lk_lock;
241177957Sattilio
242177957Sattilio		/*
243177957Sattilio		 * If there is more than one shared lock held, just drop one
244177957Sattilio		 * and return.
245177957Sattilio		 */
246177957Sattilio		if (LK_SHARERS(x) > 1) {
247197735Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
248177957Sattilio			    x - LK_ONE_SHARER))
249177957Sattilio				break;
250177957Sattilio			continue;
251177957Sattilio		}
252177957Sattilio
253177957Sattilio		/*
254177957Sattilio		 * If there are not waiters on the exclusive queue, drop the
255177957Sattilio		 * lock quickly.
256177957Sattilio		 */
257177957Sattilio		if ((x & LK_ALL_WAITERS) == 0) {
258194317Sattilio			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
259194317Sattilio			    LK_SHARERS_LOCK(1));
260197735Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
261177957Sattilio				break;
262177957Sattilio			continue;
263177957Sattilio		}
264177957Sattilio
265177957Sattilio		/*
266177957Sattilio		 * We should have a sharer with waiters, so enter the hard
267177957Sattilio		 * path in order to handle wakeups correctly.
268177957Sattilio		 */
269177957Sattilio		sleepq_lock(&lk->lock_object);
270194317Sattilio		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
271177957Sattilio		v = LK_UNLOCKED;
272177957Sattilio
273177957Sattilio		/*
274177957Sattilio		 * If the lock has exclusive waiters, give them preference in
275177957Sattilio		 * order to avoid deadlock with shared runners up.
276200447Sattilio		 * If interruptible sleeps left the exclusive queue empty
277200447Sattilio		 * avoid a starvation for the threads sleeping on the shared
278200447Sattilio		 * queue by giving them precedence and cleaning up the
279200447Sattilio		 * exclusive waiters bit anyway.
280201709Sattilio		 * Please note that lk_exslpfail count may be lying about
281201709Sattilio		 * the real number of waiters with the LK_SLEEPFAIL flag on
282201709Sattilio		 * because they may be used in conjuction with interruptible
283201710Sattilio		 * sleeps so lk_exslpfail might be considered an 'upper limit'
284201710Sattilio		 * bound, including the edge cases.
285177957Sattilio		 */
286200447Sattilio		realexslp = sleepq_sleepcnt(&lk->lock_object,
287200447Sattilio		    SQ_EXCLUSIVE_QUEUE);
288200447Sattilio		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
289200447Sattilio			if (lk->lk_exslpfail < realexslp) {
290200447Sattilio				lk->lk_exslpfail = 0;
291200447Sattilio				queue = SQ_EXCLUSIVE_QUEUE;
292200447Sattilio				v |= (x & LK_SHARED_WAITERS);
293200447Sattilio			} else {
294200447Sattilio				lk->lk_exslpfail = 0;
295200447Sattilio				LOCK_LOG2(lk,
296200447Sattilio				    "%s: %p has only LK_SLEEPFAIL sleepers",
297200447Sattilio				    __func__, lk);
298200447Sattilio				LOCK_LOG2(lk,
299200447Sattilio			    "%s: %p waking up threads on the exclusive queue",
300200447Sattilio				    __func__, lk);
301200447Sattilio				wakeup_swapper =
302200447Sattilio				    sleepq_broadcast(&lk->lock_object,
303200447Sattilio				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
304200447Sattilio				queue = SQ_SHARED_QUEUE;
305200447Sattilio			}
306200447Sattilio
307177957Sattilio		} else {
308201703Sattilio
309201703Sattilio			/*
310201703Sattilio			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
311201703Sattilio			 * and using interruptible sleeps/timeout may have
312201703Sattilio			 * left spourious lk_exslpfail counts on, so clean
313201703Sattilio			 * it up anyway.
314201703Sattilio			 */
315201703Sattilio			lk->lk_exslpfail = 0;
316177957Sattilio			queue = SQ_SHARED_QUEUE;
317177957Sattilio		}
318177957Sattilio
319197735Sattilio		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
320177957Sattilio		    v)) {
321177957Sattilio			sleepq_release(&lk->lock_object);
322177957Sattilio			continue;
323177957Sattilio		}
324177957Sattilio		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
325177957Sattilio		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
326177957Sattilio		    "exclusive");
327200447Sattilio		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
328181334Sjhb		    0, queue);
329177957Sattilio		sleepq_release(&lk->lock_object);
330177957Sattilio		break;
331177957Sattilio	}
332177957Sattilio
333177957Sattilio	lock_profile_release_lock(&lk->lock_object);
334181334Sjhb	return (wakeup_swapper);
335177957Sattilio}
336177957Sattilio
337177957Sattiliostatic void
338173733Sattilioassert_lockmgr(struct lock_object *lock, int what)
339173733Sattilio{
340173733Sattilio
341173733Sattilio	panic("lockmgr locks do not support assertions");
342173733Sattilio}
343173733Sattilio
344177957Sattiliostatic void
345167368Sjhblock_lockmgr(struct lock_object *lock, int how)
346167368Sjhb{
347167368Sjhb
348167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
349167368Sjhb}
350167368Sjhb
351177957Sattiliostatic int
352167368Sjhbunlock_lockmgr(struct lock_object *lock)
353167368Sjhb{
354167368Sjhb
355167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
356167368Sjhb}
357167368Sjhb
358192853Ssson#ifdef KDTRACE_HOOKS
359192853Sssonstatic int
360192853Sssonowner_lockmgr(struct lock_object *lock, struct thread **owner)
361192853Ssson{
362192853Ssson
363192853Ssson	panic("lockmgr locks do not support owner inquiring");
364192853Ssson}
365192853Ssson#endif
366192853Ssson
367177957Sattiliovoid
368177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
369177957Sattilio{
370177957Sattilio	int iflags;
37129653Sdyson
372177957Sattilio	MPASS((flags & ~LK_INIT_MASK) == 0);
373196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
374196334Sattilio            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
375196334Sattilio            &lk->lk_lock));
37624269Speter
377193307Sattilio	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
378193307Sattilio	if (flags & LK_CANRECURSE)
379193307Sattilio		iflags |= LO_RECURSABLE;
380177957Sattilio	if ((flags & LK_NODUP) == 0)
381177957Sattilio		iflags |= LO_DUPOK;
382177957Sattilio	if (flags & LK_NOPROFILE)
383177957Sattilio		iflags |= LO_NOPROFILE;
384177957Sattilio	if ((flags & LK_NOWITNESS) == 0)
385177957Sattilio		iflags |= LO_WITNESS;
386177957Sattilio	if (flags & LK_QUIET)
387177957Sattilio		iflags |= LO_QUIET;
388194317Sattilio	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
389177957Sattilio
390177957Sattilio	lk->lk_lock = LK_UNLOCKED;
391177957Sattilio	lk->lk_recurse = 0;
392200447Sattilio	lk->lk_exslpfail = 0;
393177957Sattilio	lk->lk_timo = timo;
394177957Sattilio	lk->lk_pri = pri;
395177957Sattilio	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
396177957Sattilio	STACK_ZERO(lk);
39728345Sdyson}
39824269Speter
399211531Sjhb/*
400211531Sjhb * XXX: Gross hacks to manipulate external lock flags after
401211531Sjhb * initialization.  Used for certain vnode and buf locks.
402211531Sjhb */
403177957Sattiliovoid
404211531Sjhblockallowshare(struct lock *lk)
405211531Sjhb{
406211531Sjhb
407211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
408211531Sjhb	lk->lock_object.lo_flags &= ~LK_NOSHARE;
409211531Sjhb}
410211531Sjhb
411211531Sjhbvoid
412211531Sjhblockallowrecurse(struct lock *lk)
413211531Sjhb{
414211531Sjhb
415211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
416211531Sjhb	lk->lock_object.lo_flags |= LO_RECURSABLE;
417211531Sjhb}
418211531Sjhb
419211531Sjhbvoid
420211531Sjhblockdisablerecurse(struct lock *lk)
421211531Sjhb{
422211531Sjhb
423211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
424211531Sjhb	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
425211531Sjhb}
426211531Sjhb
427211531Sjhbvoid
428177957Sattiliolockdestroy(struct lock *lk)
429177957Sattilio{
43042453Seivind
431177957Sattilio	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
432177957Sattilio	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
433200447Sattilio	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
434177957Sattilio	lock_destroy(&lk->lock_object);
43528345Sdyson}
43628345Sdyson
437177957Sattilioint
438177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
439177957Sattilio    const char *wmesg, int pri, int timo, const char *file, int line)
440140711Sjeff{
441178159Sattilio	GIANT_DECLARE;
442177957Sattilio	struct lock_class *class;
443176320Sattilio	const char *iwmesg;
444177957Sattilio	uintptr_t tid, v, x;
445200447Sattilio	u_int op, realexslp;
446189846Sjeff	int error, ipri, itimo, queue, wakeup_swapper;
447189846Sjeff#ifdef LOCK_PROFILING
448189846Sjeff	uint64_t waittime = 0;
449189846Sjeff	int contested = 0;
450189846Sjeff#endif
451194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
452194317Sattilio	volatile struct thread *owner;
453194317Sattilio	u_int i, spintries = 0;
454194317Sattilio#endif
455176320Sattilio
456177957Sattilio	error = 0;
457177957Sattilio	tid = (uintptr_t)curthread;
458177957Sattilio	op = (flags & LK_TYPE_MASK);
459177957Sattilio	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
460177957Sattilio	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
461177957Sattilio	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
462176320Sattilio
463177957Sattilio	MPASS((flags & ~LK_TOTAL_MASK) == 0);
464178150Sattilio	KASSERT((op & (op - 1)) == 0,
465178150Sattilio	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
466177957Sattilio	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
467177957Sattilio	    (op != LK_DOWNGRADE && op != LK_RELEASE),
468177957Sattilio	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
469177957Sattilio	    __func__, file, line));
470177957Sattilio	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
471177957Sattilio	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
472177957Sattilio	    __func__, file, line));
47366615Sjasone
474177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
475177957Sattilio	if (panicstr != NULL) {
476177957Sattilio		if (flags & LK_INTERLOCK)
477177957Sattilio			class->lc_unlock(ilk);
478177957Sattilio		return (0);
47928345Sdyson	}
48028345Sdyson
481177957Sattilio	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
482177957Sattilio		op = LK_EXCLUSIVE;
483164159Skmacy
484181334Sjhb	wakeup_swapper = 0;
485177957Sattilio	switch (op) {
486177957Sattilio	case LK_SHARED:
487178159Sattilio		if (LK_CAN_WITNESS(flags))
488178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
489182914Sjhb			    file, line, ilk);
490177957Sattilio		for (;;) {
491177957Sattilio			x = lk->lk_lock;
492174948Sattilio
493177957Sattilio			/*
494177957Sattilio			 * If no other thread has an exclusive lock, or
495177957Sattilio			 * no exclusive waiter is present, bump the count of
496177957Sattilio			 * sharers.  Since we have to preserve the state of
497177957Sattilio			 * waiters, if we fail to acquire the shared lock
498177957Sattilio			 * loop back and retry.
499177957Sattilio			 */
500177957Sattilio			if (LK_CAN_SHARE(x)) {
501177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
502177957Sattilio				    x + LK_ONE_SHARER))
503177957Sattilio					break;
504177957Sattilio				continue;
505177957Sattilio			}
506177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
507177957Sattilio			    &contested, &waittime);
50828345Sdyson
509177957Sattilio			/*
510180798Skib			 * If the lock is already held by curthread in
511177957Sattilio			 * exclusive way avoid a deadlock.
512177957Sattilio			 */
513177957Sattilio			if (LK_HOLDER(x) == tid) {
514177957Sattilio				LOCK_LOG2(lk,
515180798Skib				    "%s: %p already held in exclusive mode",
516177957Sattilio				    __func__, lk);
517177957Sattilio				error = EDEADLK;
518177957Sattilio				break;
519177957Sattilio			}
520140711Sjeff
521177957Sattilio			/*
522177957Sattilio			 * If the lock is expected to not sleep just give up
523177957Sattilio			 * and return.
524177957Sattilio			 */
525177957Sattilio			if (LK_TRYOP(flags)) {
526177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
527177957Sattilio				    __func__, lk);
528177957Sattilio				error = EBUSY;
529177957Sattilio				break;
530177957Sattilio			}
53128345Sdyson
532194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
533177957Sattilio			/*
534194317Sattilio			 * If the owner is running on another CPU, spin until
535194317Sattilio			 * the owner stops running or the state of the lock
536196772Sattilio			 * changes.  We need a double-state handle here
537196772Sattilio			 * because for a failed acquisition the lock can be
538196772Sattilio			 * either held in exclusive mode or shared mode
539196772Sattilio			 * (for the writer starvation avoidance technique).
540194317Sattilio			 */
541194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
542194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
543194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
544194317Sattilio				if (LOCK_LOG_TEST(&lk->lock_object, 0))
545194317Sattilio					CTR3(KTR_LOCK,
546194317Sattilio					    "%s: spinning on %p held by %p",
547194317Sattilio					    __func__, lk, owner);
548194317Sattilio
549194317Sattilio				/*
550194317Sattilio				 * If we are holding also an interlock drop it
551194317Sattilio				 * in order to avoid a deadlock if the lockmgr
552194317Sattilio				 * owner is adaptively spinning on the
553194317Sattilio				 * interlock itself.
554194317Sattilio				 */
555194317Sattilio				if (flags & LK_INTERLOCK) {
556194317Sattilio					class->lc_unlock(ilk);
557194317Sattilio					flags &= ~LK_INTERLOCK;
558194317Sattilio				}
559194317Sattilio				GIANT_SAVE();
560194317Sattilio				while (LK_HOLDER(lk->lk_lock) ==
561194317Sattilio				    (uintptr_t)owner && TD_IS_RUNNING(owner))
562194317Sattilio					cpu_spinwait();
563196772Sattilio				GIANT_RESTORE();
564196772Sattilio				continue;
565194317Sattilio			} else if (LK_CAN_ADAPT(lk, flags) &&
566196772Sattilio			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
567194317Sattilio			    spintries < alk_retries) {
568194317Sattilio				if (flags & LK_INTERLOCK) {
569194317Sattilio					class->lc_unlock(ilk);
570194317Sattilio					flags &= ~LK_INTERLOCK;
571194317Sattilio				}
572194317Sattilio				GIANT_SAVE();
573194317Sattilio				spintries++;
574194317Sattilio				for (i = 0; i < alk_loops; i++) {
575194317Sattilio					if (LOCK_LOG_TEST(&lk->lock_object, 0))
576194317Sattilio						CTR4(KTR_LOCK,
577194317Sattilio				    "%s: shared spinning on %p with %u and %u",
578194317Sattilio						    __func__, lk, spintries, i);
579194317Sattilio					x = lk->lk_lock;
580194317Sattilio					if ((x & LK_SHARE) == 0 ||
581194317Sattilio					    LK_CAN_SHARE(x) != 0)
582194317Sattilio						break;
583194317Sattilio					cpu_spinwait();
584194317Sattilio				}
585196772Sattilio				GIANT_RESTORE();
586194317Sattilio				if (i != alk_loops)
587194317Sattilio					continue;
588194317Sattilio			}
589194317Sattilio#endif
590194317Sattilio
591194317Sattilio			/*
592177957Sattilio			 * Acquire the sleepqueue chain lock because we
593177957Sattilio			 * probabilly will need to manipulate waiters flags.
594177957Sattilio			 */
595177957Sattilio			sleepq_lock(&lk->lock_object);
596177957Sattilio			x = lk->lk_lock;
597111463Sjeff
598177957Sattilio			/*
599177957Sattilio			 * if the lock can be acquired in shared mode, try
600177957Sattilio			 * again.
601177957Sattilio			 */
602177957Sattilio			if (LK_CAN_SHARE(x)) {
603177957Sattilio				sleepq_release(&lk->lock_object);
604177957Sattilio				continue;
605177957Sattilio			}
60624269Speter
607194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
608177957Sattilio			/*
609194317Sattilio			 * The current lock owner might have started executing
610194317Sattilio			 * on another CPU (or the lock could have changed
611194317Sattilio			 * owner) while we were waiting on the turnstile
612194317Sattilio			 * chain lock.  If so, drop the turnstile lock and try
613194317Sattilio			 * again.
614194317Sattilio			 */
615194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
616194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
617194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
618194317Sattilio				if (TD_IS_RUNNING(owner)) {
619194317Sattilio					sleepq_release(&lk->lock_object);
620194317Sattilio					continue;
621194317Sattilio				}
622194317Sattilio			}
623194317Sattilio#endif
624194317Sattilio
625194317Sattilio			/*
626177957Sattilio			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
627177957Sattilio			 * loop back and retry.
628177957Sattilio			 */
629177957Sattilio			if ((x & LK_SHARED_WAITERS) == 0) {
630177957Sattilio				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
631177957Sattilio				    x | LK_SHARED_WAITERS)) {
632177957Sattilio					sleepq_release(&lk->lock_object);
633177957Sattilio					continue;
634177957Sattilio				}
635177957Sattilio				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
636177957Sattilio				    __func__, lk);
637177957Sattilio			}
63824269Speter
639177957Sattilio			/*
640177957Sattilio			 * As far as we have been unable to acquire the
641177957Sattilio			 * shared lock and the shared waiters flag is set,
642177957Sattilio			 * we will sleep.
643177957Sattilio			 */
644177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
645177957Sattilio			    SQ_SHARED_QUEUE);
646177957Sattilio			flags &= ~LK_INTERLOCK;
647177957Sattilio			if (error) {
648177957Sattilio				LOCK_LOG3(lk,
649177957Sattilio				    "%s: interrupted sleep for %p with %d",
650177957Sattilio				    __func__, lk, error);
651177957Sattilio				break;
652177957Sattilio			}
653177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
654177957Sattilio			    __func__, lk);
655177957Sattilio		}
656177957Sattilio		if (error == 0) {
657177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
658177957Sattilio			    contested, waittime, file, line);
659177957Sattilio			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
660176014Sattilio			    line);
661178159Sattilio			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
662178159Sattilio			    line);
663177957Sattilio			TD_LOCKS_INC(curthread);
664177957Sattilio			TD_SLOCKS_INC(curthread);
665177957Sattilio			STACK_SAVE(lk);
666177957Sattilio		}
667177957Sattilio		break;
668177957Sattilio	case LK_UPGRADE:
669177957Sattilio		_lockmgr_assert(lk, KA_SLOCKED, file, line);
670194317Sattilio		v = lk->lk_lock;
671194317Sattilio		x = v & LK_ALL_WAITERS;
672194317Sattilio		v &= LK_EXCLUSIVE_SPINNERS;
673177957Sattilio
67444681Sjulian		/*
675177957Sattilio		 * Try to switch from one shared lock to an exclusive one.
676177957Sattilio		 * We need to preserve waiters flags during the operation.
67744681Sjulian		 */
678194317Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
679177957Sattilio		    tid | x)) {
680177957Sattilio			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
681177957Sattilio			    line);
682178159Sattilio			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
683178159Sattilio			    LK_TRYWIT(flags), file, line);
684177957Sattilio			TD_SLOCKS_DEC(curthread);
68524269Speter			break;
68624269Speter		}
687177957Sattilio
68824269Speter		/*
689177957Sattilio		 * We have been unable to succeed in upgrading, so just
690177957Sattilio		 * give up the shared lock.
69124269Speter		 */
692182010Sjhb		wakeup_swapper |= wakeupshlk(lk, file, line);
69324269Speter
694177957Sattilio		/* FALLTHROUGH */
695177957Sattilio	case LK_EXCLUSIVE:
696178159Sattilio		if (LK_CAN_WITNESS(flags))
697178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
698182914Sjhb			    LOP_EXCLUSIVE, file, line, ilk);
69924269Speter
70024269Speter		/*
701180798Skib		 * If curthread already holds the lock and this one is
702177957Sattilio		 * allowed to recurse, simply recurse on it.
70324269Speter		 */
704177957Sattilio		if (lockmgr_xlocked(lk)) {
705177957Sattilio			if ((flags & LK_CANRECURSE) == 0 &&
706193307Sattilio			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
707177957Sattilio
708177957Sattilio				/*
709177957Sattilio				 * If the lock is expected to not panic just
710177957Sattilio				 * give up and return.
711177957Sattilio				 */
712177957Sattilio				if (LK_TRYOP(flags)) {
713177957Sattilio					LOCK_LOG2(lk,
714177957Sattilio					    "%s: %p fails the try operation",
715177957Sattilio					    __func__, lk);
716177957Sattilio					error = EBUSY;
717177957Sattilio					break;
718177957Sattilio				}
719177957Sattilio				if (flags & LK_INTERLOCK)
720177957Sattilio					class->lc_unlock(ilk);
721177957Sattilio		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
722177957Sattilio				    __func__, iwmesg, file, line);
723177957Sattilio			}
724177957Sattilio			lk->lk_recurse++;
725177957Sattilio			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
726177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
727177957Sattilio			    lk->lk_recurse, file, line);
728178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
729178159Sattilio			    LK_TRYWIT(flags), file, line);
730177957Sattilio			TD_LOCKS_INC(curthread);
73124269Speter			break;
73224269Speter		}
733177957Sattilio
734177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
735177957Sattilio		    tid)) {
736177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
737177957Sattilio			    &contested, &waittime);
738177957Sattilio
73924269Speter			/*
740177957Sattilio			 * If the lock is expected to not sleep just give up
741177957Sattilio			 * and return.
74224269Speter			 */
743177957Sattilio			if (LK_TRYOP(flags)) {
744177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
745177957Sattilio				    __func__, lk);
746177957Sattilio				error = EBUSY;
747177957Sattilio				break;
748177957Sattilio			}
74934194Sdyson
750194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
751177957Sattilio			/*
752194317Sattilio			 * If the owner is running on another CPU, spin until
753194317Sattilio			 * the owner stops running or the state of the lock
754194317Sattilio			 * changes.
755194317Sattilio			 */
756194317Sattilio			x = lk->lk_lock;
757194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
758194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
759194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
760194317Sattilio				if (LOCK_LOG_TEST(&lk->lock_object, 0))
761194317Sattilio					CTR3(KTR_LOCK,
762194317Sattilio					    "%s: spinning on %p held by %p",
763194317Sattilio					    __func__, lk, owner);
764194317Sattilio
765194317Sattilio				/*
766194317Sattilio				 * If we are holding also an interlock drop it
767194317Sattilio				 * in order to avoid a deadlock if the lockmgr
768194317Sattilio				 * owner is adaptively spinning on the
769194317Sattilio				 * interlock itself.
770194317Sattilio				 */
771194317Sattilio				if (flags & LK_INTERLOCK) {
772194317Sattilio					class->lc_unlock(ilk);
773194317Sattilio					flags &= ~LK_INTERLOCK;
774194317Sattilio				}
775194317Sattilio				GIANT_SAVE();
776194317Sattilio				while (LK_HOLDER(lk->lk_lock) ==
777194317Sattilio				    (uintptr_t)owner && TD_IS_RUNNING(owner))
778194317Sattilio					cpu_spinwait();
779196772Sattilio				GIANT_RESTORE();
780196772Sattilio				continue;
781194317Sattilio			} else if (LK_CAN_ADAPT(lk, flags) &&
782194317Sattilio			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
783194317Sattilio			    spintries < alk_retries) {
784194317Sattilio				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
785194317Sattilio				    !atomic_cmpset_ptr(&lk->lk_lock, x,
786194317Sattilio				    x | LK_EXCLUSIVE_SPINNERS))
787194317Sattilio					continue;
788194317Sattilio				if (flags & LK_INTERLOCK) {
789194317Sattilio					class->lc_unlock(ilk);
790194317Sattilio					flags &= ~LK_INTERLOCK;
791194317Sattilio				}
792194317Sattilio				GIANT_SAVE();
793194317Sattilio				spintries++;
794194317Sattilio				for (i = 0; i < alk_loops; i++) {
795194317Sattilio					if (LOCK_LOG_TEST(&lk->lock_object, 0))
796194317Sattilio						CTR4(KTR_LOCK,
797194317Sattilio				    "%s: shared spinning on %p with %u and %u",
798194317Sattilio						    __func__, lk, spintries, i);
799194317Sattilio					if ((lk->lk_lock &
800194317Sattilio					    LK_EXCLUSIVE_SPINNERS) == 0)
801194317Sattilio						break;
802194317Sattilio					cpu_spinwait();
803194317Sattilio				}
804196772Sattilio				GIANT_RESTORE();
805194317Sattilio				if (i != alk_loops)
806194317Sattilio					continue;
807194317Sattilio			}
808194317Sattilio#endif
809194317Sattilio
810194317Sattilio			/*
811177957Sattilio			 * Acquire the sleepqueue chain lock because we
812177957Sattilio			 * probabilly will need to manipulate waiters flags.
813177957Sattilio			 */
814177957Sattilio			sleepq_lock(&lk->lock_object);
815177957Sattilio			x = lk->lk_lock;
816177957Sattilio
817177957Sattilio			/*
818177957Sattilio			 * if the lock has been released while we spun on
819177957Sattilio			 * the sleepqueue chain lock just try again.
820177957Sattilio			 */
821177957Sattilio			if (x == LK_UNLOCKED) {
822177957Sattilio				sleepq_release(&lk->lock_object);
823177957Sattilio				continue;
824134365Skan			}
82524269Speter
826194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
82724269Speter			/*
828194317Sattilio			 * The current lock owner might have started executing
829194317Sattilio			 * on another CPU (or the lock could have changed
830194317Sattilio			 * owner) while we were waiting on the turnstile
831194317Sattilio			 * chain lock.  If so, drop the turnstile lock and try
832194317Sattilio			 * again.
833194317Sattilio			 */
834194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
835194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
836194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
837194317Sattilio				if (TD_IS_RUNNING(owner)) {
838194317Sattilio					sleepq_release(&lk->lock_object);
839194317Sattilio					continue;
840194317Sattilio				}
841194317Sattilio			}
842194317Sattilio#endif
843194317Sattilio
844194317Sattilio			/*
845177957Sattilio			 * The lock can be in the state where there is a
846177957Sattilio			 * pending queue of waiters, but still no owner.
847177957Sattilio			 * This happens when the lock is contested and an
848177957Sattilio			 * owner is going to claim the lock.
849177957Sattilio			 * If curthread is the one successfully acquiring it
850177957Sattilio			 * claim lock ownership and return, preserving waiters
851177957Sattilio			 * flags.
85224269Speter			 */
853194317Sattilio			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
854194317Sattilio			if ((x & ~v) == LK_UNLOCKED) {
855194317Sattilio				v &= ~LK_EXCLUSIVE_SPINNERS;
856177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
857177957Sattilio				    tid | v)) {
858177957Sattilio					sleepq_release(&lk->lock_object);
859177957Sattilio					LOCK_LOG2(lk,
860177957Sattilio					    "%s: %p claimed by a new writer",
861177957Sattilio					    __func__, lk);
862177957Sattilio					break;
863177957Sattilio				}
864177957Sattilio				sleepq_release(&lk->lock_object);
865177957Sattilio				continue;
866177957Sattilio			}
867177957Sattilio
868177957Sattilio			/*
869177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
870177957Sattilio			 * fail, loop back and retry.
871177957Sattilio			 */
872177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
873177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
874177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
875177957Sattilio					sleepq_release(&lk->lock_object);
876177957Sattilio					continue;
877177957Sattilio				}
878177957Sattilio				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
879177957Sattilio				    __func__, lk);
880177957Sattilio			}
881177957Sattilio
882177957Sattilio			/*
883177957Sattilio			 * As far as we have been unable to acquire the
884177957Sattilio			 * exclusive lock and the exclusive waiters flag
885177957Sattilio			 * is set, we will sleep.
886177957Sattilio			 */
887177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
888177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
889177957Sattilio			flags &= ~LK_INTERLOCK;
890177957Sattilio			if (error) {
891177957Sattilio				LOCK_LOG3(lk,
892177957Sattilio				    "%s: interrupted sleep for %p with %d",
893177957Sattilio				    __func__, lk, error);
89448301Smckusick				break;
89548301Smckusick			}
896177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
897177957Sattilio			    __func__, lk);
89824269Speter		}
899177957Sattilio		if (error == 0) {
900177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
901177957Sattilio			    contested, waittime, file, line);
902177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
903177957Sattilio			    lk->lk_recurse, file, line);
904178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
905178159Sattilio			    LK_TRYWIT(flags), file, line);
906177957Sattilio			TD_LOCKS_INC(curthread);
907177957Sattilio			STACK_SAVE(lk);
908177957Sattilio		}
909177957Sattilio		break;
910177957Sattilio	case LK_DOWNGRADE:
911177957Sattilio		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
912178159Sattilio		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
913178159Sattilio		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
914178159Sattilio		TD_SLOCKS_INC(curthread);
915177957Sattilio
91624269Speter		/*
917177957Sattilio		 * In order to preserve waiters flags, just spin.
91824269Speter		 */
919177957Sattilio		for (;;) {
920194317Sattilio			x = lk->lk_lock;
921194317Sattilio			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
922194317Sattilio			x &= LK_ALL_WAITERS;
923177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
924178159Sattilio			    LK_SHARERS_LOCK(1) | x))
925177957Sattilio				break;
926177957Sattilio			cpu_spinwait();
92724269Speter		}
92824269Speter		break;
929177957Sattilio	case LK_RELEASE:
930177957Sattilio		_lockmgr_assert(lk, KA_LOCKED, file, line);
931177957Sattilio		x = lk->lk_lock;
93224269Speter
933177957Sattilio		if ((x & LK_SHARE) == 0) {
934177957Sattilio
935177957Sattilio			/*
936177957Sattilio			 * As first option, treact the lock as if it has not
937177957Sattilio			 * any waiter.
938177957Sattilio			 * Fix-up the tid var if the lock has been disowned.
939177957Sattilio			 */
940177957Sattilio			if (LK_HOLDER(x) == LK_KERNPROC)
941177957Sattilio				tid = LK_KERNPROC;
942178159Sattilio			else {
943178159Sattilio				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
944178159Sattilio				    file, line);
945177957Sattilio				TD_LOCKS_DEC(curthread);
946178159Sattilio			}
947177957Sattilio			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
948177957Sattilio			    lk->lk_recurse, file, line);
949177957Sattilio
950177957Sattilio			/*
951177957Sattilio			 * The lock is held in exclusive mode.
952177957Sattilio			 * If the lock is recursed also, then unrecurse it.
953177957Sattilio			 */
954177957Sattilio			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
955177957Sattilio				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
956177957Sattilio				    lk);
957177957Sattilio				lk->lk_recurse--;
958177957Sattilio				break;
959176014Sattilio			}
960189788Sjeff			if (tid != LK_KERNPROC)
961189788Sjeff				lock_profile_release_lock(&lk->lock_object);
962177957Sattilio
963177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
964177957Sattilio			    LK_UNLOCKED))
965177957Sattilio				break;
966177957Sattilio
967177957Sattilio			sleepq_lock(&lk->lock_object);
968194317Sattilio			x = lk->lk_lock;
969177957Sattilio			v = LK_UNLOCKED;
970177957Sattilio
971177957Sattilio			/*
972177957Sattilio		 	 * If the lock has exclusive waiters, give them
973177957Sattilio			 * preference in order to avoid deadlock with
974177957Sattilio			 * shared runners up.
975200447Sattilio			 * If interruptible sleeps left the exclusive queue
976200447Sattilio			 * empty avoid a starvation for the threads sleeping
977200447Sattilio			 * on the shared queue by giving them precedence
978200447Sattilio			 * and cleaning up the exclusive waiters bit anyway.
979201709Sattilio			 * Please note that lk_exslpfail count may be lying
980201709Sattilio			 * about the real number of waiters with the
981201709Sattilio			 * LK_SLEEPFAIL flag on because they may be used in
982201709Sattilio			 * conjuction with interruptible sleeps so
983201710Sattilio			 * lk_exslpfail might be considered an 'upper limit'
984201710Sattilio			 * bound, including the edge cases.
985177957Sattilio			 */
986194317Sattilio			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
987200447Sattilio			realexslp = sleepq_sleepcnt(&lk->lock_object,
988200447Sattilio			    SQ_EXCLUSIVE_QUEUE);
989200447Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
990200447Sattilio				if (lk->lk_exslpfail < realexslp) {
991200447Sattilio					lk->lk_exslpfail = 0;
992200447Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
993200447Sattilio					v |= (x & LK_SHARED_WAITERS);
994200447Sattilio				} else {
995200447Sattilio					lk->lk_exslpfail = 0;
996200447Sattilio					LOCK_LOG2(lk,
997200447Sattilio					"%s: %p has only LK_SLEEPFAIL sleepers",
998200447Sattilio					    __func__, lk);
999200447Sattilio					LOCK_LOG2(lk,
1000200447Sattilio			"%s: %p waking up threads on the exclusive queue",
1001200447Sattilio					    __func__, lk);
1002200447Sattilio					wakeup_swapper =
1003200447Sattilio					    sleepq_broadcast(&lk->lock_object,
1004200447Sattilio					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1005200447Sattilio					queue = SQ_SHARED_QUEUE;
1006200447Sattilio				}
1007177957Sattilio			} else {
1008201703Sattilio
1009201703Sattilio				/*
1010201703Sattilio				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1011201703Sattilio				 * on and using interruptible sleeps/timeout
1012201703Sattilio				 * may have left spourious lk_exslpfail counts
1013201703Sattilio				 * on, so clean it up anyway.
1014201703Sattilio				 */
1015201703Sattilio				lk->lk_exslpfail = 0;
1016177957Sattilio				queue = SQ_SHARED_QUEUE;
101724269Speter			}
1018149723Sssouhlal
1019177957Sattilio			LOCK_LOG3(lk,
1020177957Sattilio			    "%s: %p waking up threads on the %s queue",
1021177957Sattilio			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1022177957Sattilio			    "exclusive");
1023177957Sattilio			atomic_store_rel_ptr(&lk->lk_lock, v);
1024200447Sattilio			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1025181334Sjhb			    SLEEPQ_LK, 0, queue);
1026177957Sattilio			sleepq_release(&lk->lock_object);
1027177957Sattilio			break;
1028177957Sattilio		} else
1029181334Sjhb			wakeup_swapper = wakeupshlk(lk, file, line);
103024269Speter		break;
1031177957Sattilio	case LK_DRAIN:
1032178159Sattilio		if (LK_CAN_WITNESS(flags))
1033178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1034182914Sjhb			    LOP_EXCLUSIVE, file, line, ilk);
103524269Speter
103624269Speter		/*
1037180798Skib		 * Trying to drain a lock we already own will result in a
1038177957Sattilio		 * deadlock.
103924269Speter		 */
1040177957Sattilio		if (lockmgr_xlocked(lk)) {
1041177957Sattilio			if (flags & LK_INTERLOCK)
1042177957Sattilio				class->lc_unlock(ilk);
1043177957Sattilio			panic("%s: draining %s with the lock held @ %s:%d\n",
1044177957Sattilio			    __func__, iwmesg, file, line);
1045177957Sattilio		}
104628345Sdyson
1047177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1048177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
1049177957Sattilio			    &contested, &waittime);
105024269Speter
1051177957Sattilio			/*
1052177957Sattilio			 * If the lock is expected to not sleep just give up
1053177957Sattilio			 * and return.
1054177957Sattilio			 */
1055177957Sattilio			if (LK_TRYOP(flags)) {
1056177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
1057177957Sattilio				    __func__, lk);
1058177957Sattilio				error = EBUSY;
1059177957Sattilio				break;
1060177957Sattilio			}
106124269Speter
1062177957Sattilio			/*
1063177957Sattilio			 * Acquire the sleepqueue chain lock because we
1064177957Sattilio			 * probabilly will need to manipulate waiters flags.
1065177957Sattilio			 */
1066177957Sattilio			sleepq_lock(&lk->lock_object);
1067177957Sattilio			x = lk->lk_lock;
106829653Sdyson
1069177957Sattilio			/*
1070177957Sattilio			 * if the lock has been released while we spun on
1071177957Sattilio			 * the sleepqueue chain lock just try again.
1072177957Sattilio			 */
1073177957Sattilio			if (x == LK_UNLOCKED) {
1074177957Sattilio				sleepq_release(&lk->lock_object);
1075177957Sattilio				continue;
1076177957Sattilio			}
1077176320Sattilio
1078194317Sattilio			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1079194317Sattilio			if ((x & ~v) == LK_UNLOCKED) {
1080194317Sattilio				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1081200447Sattilio
1082200447Sattilio				/*
1083200447Sattilio				 * If interruptible sleeps left the exclusive
1084200447Sattilio				 * queue empty avoid a starvation for the
1085200447Sattilio				 * threads sleeping on the shared queue by
1086200447Sattilio				 * giving them precedence and cleaning up the
1087200447Sattilio				 * exclusive waiters bit anyway.
1088201709Sattilio				 * Please note that lk_exslpfail count may be
1089201709Sattilio				 * lying about the real number of waiters with
1090201709Sattilio				 * the LK_SLEEPFAIL flag on because they may
1091201709Sattilio				 * be used in conjuction with interruptible
1092201710Sattilio				 * sleeps so lk_exslpfail might be considered
1093201710Sattilio				 * an 'upper limit' bound, including the edge
1094201709Sattilio				 * cases.
1095200447Sattilio				 */
1096177957Sattilio				if (v & LK_EXCLUSIVE_WAITERS) {
1097177957Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
1098177957Sattilio					v &= ~LK_EXCLUSIVE_WAITERS;
1099177957Sattilio				} else {
1100201703Sattilio
1101201703Sattilio					/*
1102201703Sattilio					 * Exclusive waiters sleeping with
1103201703Sattilio					 * LK_SLEEPFAIL on and using
1104201703Sattilio					 * interruptible sleeps/timeout may
1105201703Sattilio					 * have left spourious lk_exslpfail
1106201703Sattilio					 * counts on, so clean it up anyway.
1107201703Sattilio					 */
1108177957Sattilio					MPASS(v & LK_SHARED_WAITERS);
1109201703Sattilio					lk->lk_exslpfail = 0;
1110177957Sattilio					queue = SQ_SHARED_QUEUE;
1111177957Sattilio					v &= ~LK_SHARED_WAITERS;
1112177957Sattilio				}
1113200447Sattilio				if (queue == SQ_EXCLUSIVE_QUEUE) {
1114200447Sattilio					realexslp =
1115200447Sattilio					    sleepq_sleepcnt(&lk->lock_object,
1116200447Sattilio					    SQ_EXCLUSIVE_QUEUE);
1117200447Sattilio					if (lk->lk_exslpfail >= realexslp) {
1118200447Sattilio						lk->lk_exslpfail = 0;
1119200447Sattilio						queue = SQ_SHARED_QUEUE;
1120200447Sattilio						v &= ~LK_SHARED_WAITERS;
1121200447Sattilio						if (realexslp != 0) {
1122200447Sattilio							LOCK_LOG2(lk,
1123200447Sattilio					"%s: %p has only LK_SLEEPFAIL sleepers",
1124200447Sattilio							    __func__, lk);
1125200447Sattilio							LOCK_LOG2(lk,
1126200447Sattilio			"%s: %p waking up threads on the exclusive queue",
1127200447Sattilio							    __func__, lk);
1128200447Sattilio							wakeup_swapper =
1129200447Sattilio							    sleepq_broadcast(
1130200447Sattilio							    &lk->lock_object,
1131200447Sattilio							    SLEEPQ_LK, 0,
1132200447Sattilio							    SQ_EXCLUSIVE_QUEUE);
1133200447Sattilio						}
1134200447Sattilio					} else
1135200447Sattilio						lk->lk_exslpfail = 0;
1136200447Sattilio				}
1137177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1138177957Sattilio					sleepq_release(&lk->lock_object);
1139177957Sattilio					continue;
1140177957Sattilio				}
1141177957Sattilio				LOCK_LOG3(lk,
1142177957Sattilio				"%s: %p waking up all threads on the %s queue",
1143177957Sattilio				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1144177957Sattilio				    "shared" : "exclusive");
1145182010Sjhb				wakeup_swapper |= sleepq_broadcast(
1146181334Sjhb				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1147177957Sattilio
1148177957Sattilio				/*
1149177957Sattilio				 * If shared waiters have been woken up we need
1150177957Sattilio				 * to wait for one of them to acquire the lock
1151177957Sattilio				 * before to set the exclusive waiters in
1152177957Sattilio				 * order to avoid a deadlock.
1153177957Sattilio				 */
1154177957Sattilio				if (queue == SQ_SHARED_QUEUE) {
1155177957Sattilio					for (v = lk->lk_lock;
1156177957Sattilio					    (v & LK_SHARE) && !LK_SHARERS(v);
1157177957Sattilio					    v = lk->lk_lock)
1158177957Sattilio						cpu_spinwait();
1159177957Sattilio				}
1160177957Sattilio			}
1161177957Sattilio
1162177957Sattilio			/*
1163177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1164177957Sattilio			 * fail, loop back and retry.
1165177957Sattilio			 */
1166177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1167177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1168177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
1169177957Sattilio					sleepq_release(&lk->lock_object);
1170177957Sattilio					continue;
1171177957Sattilio				}
1172177957Sattilio				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1173177957Sattilio				    __func__, lk);
1174177957Sattilio			}
1175177957Sattilio
1176177957Sattilio			/*
1177177957Sattilio			 * As far as we have been unable to acquire the
1178177957Sattilio			 * exclusive lock and the exclusive waiters flag
1179177957Sattilio			 * is set, we will sleep.
1180177957Sattilio			 */
1181177957Sattilio			if (flags & LK_INTERLOCK) {
1182177957Sattilio				class->lc_unlock(ilk);
1183177957Sattilio				flags &= ~LK_INTERLOCK;
1184177957Sattilio			}
1185178159Sattilio			GIANT_SAVE();
1186177957Sattilio			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1187177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
1188177957Sattilio			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1189178159Sattilio			GIANT_RESTORE();
1190177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1191177957Sattilio			    __func__, lk);
119229653Sdyson		}
1193177957Sattilio
1194177957Sattilio		if (error == 0) {
1195177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
1196177957Sattilio			    contested, waittime, file, line);
1197177957Sattilio			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1198177957Sattilio			    lk->lk_recurse, file, line);
1199178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1200178159Sattilio			    LK_TRYWIT(flags), file, line);
1201177957Sattilio			TD_LOCKS_INC(curthread);
1202177957Sattilio			STACK_SAVE(lk);
1203177957Sattilio		}
1204177957Sattilio		break;
1205177957Sattilio	default:
1206177957Sattilio		if (flags & LK_INTERLOCK)
1207177957Sattilio			class->lc_unlock(ilk);
1208177957Sattilio		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
120929653Sdyson	}
1210177957Sattilio
1211177957Sattilio	if (flags & LK_INTERLOCK)
1212177957Sattilio		class->lc_unlock(ilk);
1213181334Sjhb	if (wakeup_swapper)
1214181334Sjhb		kick_proc0();
1215177957Sattilio
1216177957Sattilio	return (error);
121729653Sdyson}
121829653Sdyson
121929653Sdysonvoid
1220177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line)
122129653Sdyson{
1222177957Sattilio	uintptr_t tid, x;
1223176014Sattilio
1224177957Sattilio	tid = (uintptr_t)curthread;
1225177957Sattilio	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
122629653Sdyson
1227177957Sattilio	/*
1228180798Skib	 * If the owner is already LK_KERNPROC just skip the whole operation.
1229177957Sattilio	 */
1230177957Sattilio	if (LK_HOLDER(lk->lk_lock) != tid)
1231177957Sattilio		return;
1232189788Sjeff	lock_profile_release_lock(&lk->lock_object);
1233178159Sattilio	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1234178159Sattilio	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1235178159Sattilio	TD_LOCKS_DEC(curthread);
1236199008Sattilio	STACK_SAVE(lk);
123729653Sdyson
1238177957Sattilio	/*
1239177957Sattilio	 * In order to preserve waiters flags, just spin.
1240177957Sattilio	 */
1241177957Sattilio	for (;;) {
1242194317Sattilio		x = lk->lk_lock;
1243194317Sattilio		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1244194317Sattilio		x &= LK_ALL_WAITERS;
1245178166Sattilio		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1246178159Sattilio		    LK_KERNPROC | x))
1247177957Sattilio			return;
1248177957Sattilio		cpu_spinwait();
1249177957Sattilio	}
125066615Sjasone}
125166615Sjasone
1252175166Sattiliovoid
1253177957Sattiliolockmgr_printinfo(struct lock *lk)
1254175166Sattilio{
1255175166Sattilio	struct thread *td;
1256177957Sattilio	uintptr_t x;
1257175166Sattilio
1258177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
1259188244Sjhb		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1260177957Sattilio	else if (lk->lk_lock & LK_SHARE)
1261188244Sjhb		printf("lock type %s: SHARED (count %ju)\n",
1262177957Sattilio		    lk->lock_object.lo_name,
1263177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1264177957Sattilio	else {
1265177957Sattilio		td = lockmgr_xholder(lk);
1266188244Sjhb		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1267177957Sattilio		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1268177957Sattilio	}
1269175166Sattilio
1270177957Sattilio	x = lk->lk_lock;
1271177957Sattilio	if (x & LK_EXCLUSIVE_WAITERS)
1272177957Sattilio		printf(" with exclusive waiters pending\n");
1273177957Sattilio	if (x & LK_SHARED_WAITERS)
1274177957Sattilio		printf(" with shared waiters pending\n");
1275194317Sattilio	if (x & LK_EXCLUSIVE_SPINNERS)
1276194317Sattilio		printf(" with exclusive spinners pending\n");
1277177957Sattilio
1278177957Sattilio	STACK_PRINT(lk);
1279175166Sattilio}
1280175166Sattilio
128129653Sdysonint
1282177957Sattiliolockstatus(struct lock *lk)
128329653Sdyson{
1284177957Sattilio	uintptr_t v, x;
1285177957Sattilio	int ret;
128629653Sdyson
1287177957Sattilio	ret = LK_SHARED;
1288177957Sattilio	x = lk->lk_lock;
1289177957Sattilio	v = LK_HOLDER(x);
1290175635Sattilio
1291177957Sattilio	if ((x & LK_SHARE) == 0) {
1292177957Sattilio		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1293177957Sattilio			ret = LK_EXCLUSIVE;
129454444Seivind		else
1295177957Sattilio			ret = LK_EXCLOTHER;
1296177957Sattilio	} else if (x == LK_UNLOCKED)
1297177957Sattilio		ret = 0;
129829653Sdyson
1299177957Sattilio	return (ret);
130024269Speter}
1301161322Sjhb
1302176249Sattilio#ifdef INVARIANT_SUPPORT
1303176249Sattilio#ifndef INVARIANTS
1304177957Sattilio#undef	_lockmgr_assert
1305176249Sattilio#endif
1306176249Sattilio
1307176249Sattiliovoid
1308177957Sattilio_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1309176249Sattilio{
1310176249Sattilio	int slocked = 0;
1311176249Sattilio
1312176249Sattilio	if (panicstr != NULL)
1313176249Sattilio		return;
1314176249Sattilio	switch (what) {
1315176249Sattilio	case KA_SLOCKED:
1316176249Sattilio	case KA_SLOCKED | KA_NOTRECURSED:
1317176249Sattilio	case KA_SLOCKED | KA_RECURSED:
1318176249Sattilio		slocked = 1;
1319176249Sattilio	case KA_LOCKED:
1320176249Sattilio	case KA_LOCKED | KA_NOTRECURSED:
1321176249Sattilio	case KA_LOCKED | KA_RECURSED:
1322178159Sattilio#ifdef WITNESS
1323178159Sattilio
1324178159Sattilio		/*
1325178159Sattilio		 * We cannot trust WITNESS if the lock is held in exclusive
1326178159Sattilio		 * mode and a call to lockmgr_disown() happened.
1327178159Sattilio		 * Workaround this skipping the check if the lock is held in
1328178159Sattilio		 * exclusive mode even for the KA_LOCKED case.
1329178159Sattilio		 */
1330178159Sattilio		if (slocked || (lk->lk_lock & LK_SHARE)) {
1331178159Sattilio			witness_assert(&lk->lock_object, what, file, line);
1332178159Sattilio			break;
1333178159Sattilio		}
1334178159Sattilio#endif
1335177957Sattilio		if (lk->lk_lock == LK_UNLOCKED ||
1336177957Sattilio		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1337177957Sattilio		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1338176249Sattilio			panic("Lock %s not %slocked @ %s:%d\n",
1339177957Sattilio			    lk->lock_object.lo_name, slocked ? "share" : "",
1340176249Sattilio			    file, line);
1341177957Sattilio
1342177957Sattilio		if ((lk->lk_lock & LK_SHARE) == 0) {
1343177957Sattilio			if (lockmgr_recursed(lk)) {
1344176249Sattilio				if (what & KA_NOTRECURSED)
1345176249Sattilio					panic("Lock %s recursed @ %s:%d\n",
1346177957Sattilio					    lk->lock_object.lo_name, file,
1347177957Sattilio					    line);
1348176249Sattilio			} else if (what & KA_RECURSED)
1349176249Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1350177957Sattilio				    lk->lock_object.lo_name, file, line);
1351176249Sattilio		}
1352176249Sattilio		break;
1353176249Sattilio	case KA_XLOCKED:
1354176249Sattilio	case KA_XLOCKED | KA_NOTRECURSED:
1355176249Sattilio	case KA_XLOCKED | KA_RECURSED:
1356177957Sattilio		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1357176249Sattilio			panic("Lock %s not exclusively locked @ %s:%d\n",
1358177957Sattilio			    lk->lock_object.lo_name, file, line);
1359177957Sattilio		if (lockmgr_recursed(lk)) {
1360176249Sattilio			if (what & KA_NOTRECURSED)
1361176249Sattilio				panic("Lock %s recursed @ %s:%d\n",
1362177957Sattilio				    lk->lock_object.lo_name, file, line);
1363176249Sattilio		} else if (what & KA_RECURSED)
1364176249Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1365177957Sattilio			    lk->lock_object.lo_name, file, line);
1366176249Sattilio		break;
1367176249Sattilio	case KA_UNLOCKED:
1368177957Sattilio		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1369176249Sattilio			panic("Lock %s exclusively locked @ %s:%d\n",
1370177957Sattilio			    lk->lock_object.lo_name, file, line);
1371176249Sattilio		break;
1372176249Sattilio	default:
1373177957Sattilio		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1374177957Sattilio		    line);
1375176249Sattilio	}
1376176249Sattilio}
1377177957Sattilio#endif
1378176249Sattilio
1379161322Sjhb#ifdef DDB
1380161337Sjhbint
1381161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
1382161337Sjhb{
1383177957Sattilio	struct lock *lk;
1384161337Sjhb
1385177957Sattilio	lk = td->td_wchan;
1386161337Sjhb
1387177957Sattilio	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1388177957Sattilio		return (0);
1389177957Sattilio	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1390177957Sattilio	if (lk->lk_lock & LK_SHARE)
1391177957Sattilio		db_printf("SHARED (count %ju)\n",
1392177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1393177957Sattilio	else
1394177957Sattilio		db_printf("EXCL\n");
1395177957Sattilio	*ownerp = lockmgr_xholder(lk);
1396161337Sjhb
1397161337Sjhb	return (1);
1398161337Sjhb}
1399161337Sjhb
1400177957Sattiliostatic void
1401164246Skmacydb_show_lockmgr(struct lock_object *lock)
1402161322Sjhb{
1403161322Sjhb	struct thread *td;
1404177957Sattilio	struct lock *lk;
1405161322Sjhb
1406177957Sattilio	lk = (struct lock *)lock;
1407161322Sjhb
1408168070Sjhb	db_printf(" state: ");
1409177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
1410161322Sjhb		db_printf("UNLOCKED\n");
1411177957Sattilio	else if (lk->lk_lock & LK_SHARE)
1412177957Sattilio		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1413177957Sattilio	else {
1414177957Sattilio		td = lockmgr_xholder(lk);
1415177957Sattilio		if (td == (struct thread *)LK_KERNPROC)
1416177957Sattilio			db_printf("XLOCK: LK_KERNPROC\n");
1417177957Sattilio		else
1418177957Sattilio			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1419177957Sattilio			    td->td_tid, td->td_proc->p_pid,
1420177957Sattilio			    td->td_proc->p_comm);
1421177957Sattilio		if (lockmgr_recursed(lk))
1422177957Sattilio			db_printf(" recursed: %d\n", lk->lk_recurse);
1423177957Sattilio	}
1424177957Sattilio	db_printf(" waiters: ");
1425177957Sattilio	switch (lk->lk_lock & LK_ALL_WAITERS) {
1426177957Sattilio	case LK_SHARED_WAITERS:
1427177957Sattilio		db_printf("shared\n");
1428192022Strasz		break;
1429177957Sattilio	case LK_EXCLUSIVE_WAITERS:
1430177957Sattilio		db_printf("exclusive\n");
1431177957Sattilio		break;
1432177957Sattilio	case LK_ALL_WAITERS:
1433177957Sattilio		db_printf("shared and exclusive\n");
1434177957Sattilio		break;
1435177957Sattilio	default:
1436177957Sattilio		db_printf("none\n");
1437177957Sattilio	}
1438194317Sattilio	db_printf(" spinners: ");
1439194317Sattilio	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1440194317Sattilio		db_printf("exclusive\n");
1441194317Sattilio	else
1442194317Sattilio		db_printf("none\n");
1443161322Sjhb}
1444161322Sjhb#endif
1445