kern_lock.c revision 243900
1139804Simp/*-
2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3177957Sattilio * All rights reserved.
424269Speter *
524269Speter * Redistribution and use in source and binary forms, with or without
624269Speter * modification, are permitted provided that the following conditions
724269Speter * are met:
824269Speter * 1. Redistributions of source code must retain the above copyright
9177957Sattilio *    notice(s), this list of conditions and the following disclaimer as
10177957Sattilio *    the first lines of this file unmodified other than the possible
11177957Sattilio *    addition of one or more copyright notices.
1224269Speter * 2. Redistributions in binary form must reproduce the above copyright
13177957Sattilio *    notice(s), this list of conditions and the following disclaimer in the
1424269Speter *    documentation and/or other materials provided with the distribution.
1524269Speter *
16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19177957Sattilio * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26177957Sattilio * DAMAGE.
2724269Speter */
2824269Speter
29194317Sattilio#include "opt_adaptive_lockmgrs.h"
30177957Sattilio#include "opt_ddb.h"
31233628Sfabient#include "opt_hwpmc_hooks.h"
32192853Ssson#include "opt_kdtrace.h"
33177957Sattilio
34116182Sobrien#include <sys/cdefs.h>
35116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 243900 2012-12-05 15:11:01Z attilio $");
36116182Sobrien
3724269Speter#include <sys/param.h>
3884812Sjhb#include <sys/ktr.h>
3924269Speter#include <sys/lock.h>
40177957Sattilio#include <sys/lock_profile.h>
41102477Sbde#include <sys/lockmgr.h>
4267353Sjhb#include <sys/mutex.h>
43102477Sbde#include <sys/proc.h>
44177957Sattilio#include <sys/sleepqueue.h>
45148668Sjeff#ifdef DEBUG_LOCKS
46148668Sjeff#include <sys/stack.h>
47148668Sjeff#endif
48194317Sattilio#include <sys/sysctl.h>
49177957Sattilio#include <sys/systm.h>
5024269Speter
51177957Sattilio#include <machine/cpu.h>
52176014Sattilio
53161322Sjhb#ifdef DDB
54161322Sjhb#include <ddb/ddb.h>
55161322Sjhb#endif
56161322Sjhb
57233628Sfabient#ifdef HWPMC_HOOKS
58233628Sfabient#include <sys/pmckern.h>
59233628SfabientPMC_SOFT_DECLARE( , , lock, failed);
60233628Sfabient#endif
61233628Sfabient
62194317SattilioCTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63194317Sattilio    (LK_ADAPTIVE | LK_NOSHARE));
64194317SattilioCTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65194317Sattilio    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66177957Sattilio
67177957Sattilio#define	SQ_EXCLUSIVE_QUEUE	0
68177957Sattilio#define	SQ_SHARED_QUEUE		1
69177957Sattilio
70177957Sattilio#ifndef INVARIANTS
71177957Sattilio#define	_lockmgr_assert(lk, what, file, line)
72177957Sattilio#define	TD_LOCKS_INC(td)
73177957Sattilio#define	TD_LOCKS_DEC(td)
74177957Sattilio#else
75177957Sattilio#define	TD_LOCKS_INC(td)	((td)->td_locks++)
76177957Sattilio#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
77177957Sattilio#endif
78177957Sattilio#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
79177957Sattilio#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
80177957Sattilio
81177957Sattilio#ifndef DEBUG_LOCKS
82177957Sattilio#define	STACK_PRINT(lk)
83177957Sattilio#define	STACK_SAVE(lk)
84177957Sattilio#define	STACK_ZERO(lk)
85177957Sattilio#else
86177957Sattilio#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
87177957Sattilio#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
88177957Sattilio#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
89177957Sattilio#endif
90177957Sattilio
91177957Sattilio#define	LOCK_LOG2(lk, string, arg1, arg2)				\
92177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
93177957Sattilio		CTR2(KTR_LOCK, (string), (arg1), (arg2))
94177957Sattilio#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
95177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
96177957Sattilio		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97177957Sattilio
98178159Sattilio#define	GIANT_DECLARE							\
99178159Sattilio	int _i = 0;							\
100178159Sattilio	WITNESS_SAVE_DECL(Giant)
101178159Sattilio#define	GIANT_RESTORE() do {						\
102178159Sattilio	if (_i > 0) {							\
103178159Sattilio		while (_i--)						\
104178159Sattilio			mtx_lock(&Giant);				\
105178159Sattilio		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
106178159Sattilio	}								\
107178159Sattilio} while (0)
108178159Sattilio#define	GIANT_SAVE() do {						\
109178159Sattilio	if (mtx_owned(&Giant)) {					\
110178159Sattilio		WITNESS_SAVE(&Giant.lock_object, Giant);		\
111178159Sattilio		while (mtx_owned(&Giant)) {				\
112178159Sattilio			_i++;						\
113178159Sattilio			mtx_unlock(&Giant);				\
114178159Sattilio		}							\
115178159Sattilio	}								\
116178159Sattilio} while (0)
117178159Sattilio
118177957Sattilio#define	LK_CAN_SHARE(x)							\
119177957Sattilio	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
120194317Sattilio	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
121177982Sattilio	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
122178159Sattilio#define	LK_TRYOP(x)							\
123178159Sattilio	((x) & LK_NOWAIT)
124177957Sattilio
125178159Sattilio#define	LK_CAN_WITNESS(x)						\
126178159Sattilio	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
127178159Sattilio#define	LK_TRYWIT(x)							\
128178159Sattilio	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
129178159Sattilio
130194317Sattilio#define	LK_CAN_ADAPT(lk, f)						\
131194317Sattilio	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
132194317Sattilio	((f) & LK_SLEEPFAIL) == 0)
133194317Sattilio
134177957Sattilio#define	lockmgr_disowned(lk)						\
135177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
136177957Sattilio
137177957Sattilio#define	lockmgr_xlocked(lk)						\
138177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
139177957Sattilio
140227588Spjdstatic void	assert_lockmgr(const struct lock_object *lock, int how);
141177957Sattilio#ifdef DDB
142227588Spjdstatic void	db_show_lockmgr(const struct lock_object *lock);
143177957Sattilio#endif
144227588Spjdstatic void	lock_lockmgr(struct lock_object *lock, int how);
145192853Ssson#ifdef KDTRACE_HOOKS
146227588Spjdstatic int	owner_lockmgr(const struct lock_object *lock,
147227588Spjd		    struct thread **owner);
148192853Ssson#endif
149227588Spjdstatic int	unlock_lockmgr(struct lock_object *lock);
150177957Sattilio
151164246Skmacystruct lock_class lock_class_lockmgr = {
152167366Sjhb	.lc_name = "lockmgr",
153177957Sattilio	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
154173733Sattilio	.lc_assert = assert_lockmgr,
155164246Skmacy#ifdef DDB
156167368Sjhb	.lc_ddb_show = db_show_lockmgr,
157164246Skmacy#endif
158167368Sjhb	.lc_lock = lock_lockmgr,
159192853Ssson	.lc_unlock = unlock_lockmgr,
160192853Ssson#ifdef KDTRACE_HOOKS
161192853Ssson	.lc_owner = owner_lockmgr,
162192853Ssson#endif
163164246Skmacy};
164164246Skmacy
165194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
166194317Sattiliostatic u_int alk_retries = 10;
167194317Sattiliostatic u_int alk_loops = 10000;
168227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
169227309Sed    "lockmgr debugging");
170194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
171194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
172194317Sattilio#endif
173194317Sattilio
174177957Sattiliostatic __inline struct thread *
175227588Spjdlockmgr_xholder(const struct lock *lk)
176177957Sattilio{
177177957Sattilio	uintptr_t x;
178176249Sattilio
179177957Sattilio	x = lk->lk_lock;
180177957Sattilio	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
181177957Sattilio}
182177957Sattilio
18324269Speter/*
184177957Sattilio * It assumes sleepq_lock held and returns with this one unheld.
185177957Sattilio * It also assumes the generic interlock is sane and previously checked.
186177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the
187177957Sattilio * sleep.
18824269Speter */
189177957Sattiliostatic __inline int
190177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
191177957Sattilio    const char *wmesg, int pri, int timo, int queue)
192177957Sattilio{
193178159Sattilio	GIANT_DECLARE;
194177957Sattilio	struct lock_class *class;
195177957Sattilio	int catch, error;
19624269Speter
197177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
198179306Sattilio	catch = pri & PCATCH;
199177957Sattilio	pri &= PRIMASK;
200177957Sattilio	error = 0;
201177957Sattilio
202177957Sattilio	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
203177957Sattilio	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
204177957Sattilio
205177957Sattilio	if (flags & LK_INTERLOCK)
206177957Sattilio		class->lc_unlock(ilk);
207200447Sattilio	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
208200447Sattilio		lk->lk_exslpfail++;
209178159Sattilio	GIANT_SAVE();
210177957Sattilio	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
211177957Sattilio	    SLEEPQ_INTERRUPTIBLE : 0), queue);
212177957Sattilio	if ((flags & LK_TIMELOCK) && timo)
213177957Sattilio		sleepq_set_timeout(&lk->lock_object, timo);
214177957Sattilio
215177957Sattilio	/*
216177957Sattilio	 * Decisional switch for real sleeping.
217177957Sattilio	 */
218177957Sattilio	if ((flags & LK_TIMELOCK) && timo && catch)
219177957Sattilio		error = sleepq_timedwait_sig(&lk->lock_object, pri);
220177957Sattilio	else if ((flags & LK_TIMELOCK) && timo)
221177957Sattilio		error = sleepq_timedwait(&lk->lock_object, pri);
222177957Sattilio	else if (catch)
223177957Sattilio		error = sleepq_wait_sig(&lk->lock_object, pri);
224177957Sattilio	else
225177957Sattilio		sleepq_wait(&lk->lock_object, pri);
226178159Sattilio	GIANT_RESTORE();
227177957Sattilio	if ((flags & LK_SLEEPFAIL) && error == 0)
228177957Sattilio		error = ENOLCK;
229177957Sattilio
230177957Sattilio	return (error);
231177957Sattilio}
232177957Sattilio
233181334Sjhbstatic __inline int
234177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line)
235177957Sattilio{
236177957Sattilio	uintptr_t v, x;
237200447Sattilio	u_int realexslp;
238181334Sjhb	int queue, wakeup_swapper;
239177957Sattilio
240177957Sattilio	TD_LOCKS_DEC(curthread);
241177957Sattilio	TD_SLOCKS_DEC(curthread);
242178159Sattilio	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243177957Sattilio	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
244177957Sattilio
245181334Sjhb	wakeup_swapper = 0;
246177957Sattilio	for (;;) {
247177957Sattilio		x = lk->lk_lock;
248177957Sattilio
249177957Sattilio		/*
250177957Sattilio		 * If there is more than one shared lock held, just drop one
251177957Sattilio		 * and return.
252177957Sattilio		 */
253177957Sattilio		if (LK_SHARERS(x) > 1) {
254197735Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
255177957Sattilio			    x - LK_ONE_SHARER))
256177957Sattilio				break;
257177957Sattilio			continue;
258177957Sattilio		}
259177957Sattilio
260177957Sattilio		/*
261177957Sattilio		 * If there are not waiters on the exclusive queue, drop the
262177957Sattilio		 * lock quickly.
263177957Sattilio		 */
264177957Sattilio		if ((x & LK_ALL_WAITERS) == 0) {
265194317Sattilio			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
266194317Sattilio			    LK_SHARERS_LOCK(1));
267197735Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268177957Sattilio				break;
269177957Sattilio			continue;
270177957Sattilio		}
271177957Sattilio
272177957Sattilio		/*
273177957Sattilio		 * We should have a sharer with waiters, so enter the hard
274177957Sattilio		 * path in order to handle wakeups correctly.
275177957Sattilio		 */
276177957Sattilio		sleepq_lock(&lk->lock_object);
277194317Sattilio		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
278177957Sattilio		v = LK_UNLOCKED;
279177957Sattilio
280177957Sattilio		/*
281177957Sattilio		 * If the lock has exclusive waiters, give them preference in
282177957Sattilio		 * order to avoid deadlock with shared runners up.
283200447Sattilio		 * If interruptible sleeps left the exclusive queue empty
284200447Sattilio		 * avoid a starvation for the threads sleeping on the shared
285200447Sattilio		 * queue by giving them precedence and cleaning up the
286200447Sattilio		 * exclusive waiters bit anyway.
287201709Sattilio		 * Please note that lk_exslpfail count may be lying about
288201709Sattilio		 * the real number of waiters with the LK_SLEEPFAIL flag on
289201709Sattilio		 * because they may be used in conjuction with interruptible
290201710Sattilio		 * sleeps so lk_exslpfail might be considered an 'upper limit'
291201710Sattilio		 * bound, including the edge cases.
292177957Sattilio		 */
293200447Sattilio		realexslp = sleepq_sleepcnt(&lk->lock_object,
294200447Sattilio		    SQ_EXCLUSIVE_QUEUE);
295200447Sattilio		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
296200447Sattilio			if (lk->lk_exslpfail < realexslp) {
297200447Sattilio				lk->lk_exslpfail = 0;
298200447Sattilio				queue = SQ_EXCLUSIVE_QUEUE;
299200447Sattilio				v |= (x & LK_SHARED_WAITERS);
300200447Sattilio			} else {
301200447Sattilio				lk->lk_exslpfail = 0;
302200447Sattilio				LOCK_LOG2(lk,
303200447Sattilio				    "%s: %p has only LK_SLEEPFAIL sleepers",
304200447Sattilio				    __func__, lk);
305200447Sattilio				LOCK_LOG2(lk,
306200447Sattilio			    "%s: %p waking up threads on the exclusive queue",
307200447Sattilio				    __func__, lk);
308200447Sattilio				wakeup_swapper =
309200447Sattilio				    sleepq_broadcast(&lk->lock_object,
310200447Sattilio				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
311200447Sattilio				queue = SQ_SHARED_QUEUE;
312200447Sattilio			}
313200447Sattilio
314177957Sattilio		} else {
315201703Sattilio
316201703Sattilio			/*
317201703Sattilio			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
318201703Sattilio			 * and using interruptible sleeps/timeout may have
319201703Sattilio			 * left spourious lk_exslpfail counts on, so clean
320201703Sattilio			 * it up anyway.
321201703Sattilio			 */
322201703Sattilio			lk->lk_exslpfail = 0;
323177957Sattilio			queue = SQ_SHARED_QUEUE;
324177957Sattilio		}
325177957Sattilio
326197735Sattilio		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
327177957Sattilio		    v)) {
328177957Sattilio			sleepq_release(&lk->lock_object);
329177957Sattilio			continue;
330177957Sattilio		}
331177957Sattilio		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332177957Sattilio		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
333177957Sattilio		    "exclusive");
334200447Sattilio		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
335181334Sjhb		    0, queue);
336177957Sattilio		sleepq_release(&lk->lock_object);
337177957Sattilio		break;
338177957Sattilio	}
339177957Sattilio
340177957Sattilio	lock_profile_release_lock(&lk->lock_object);
341181334Sjhb	return (wakeup_swapper);
342177957Sattilio}
343177957Sattilio
344177957Sattiliostatic void
345227588Spjdassert_lockmgr(const struct lock_object *lock, int what)
346173733Sattilio{
347173733Sattilio
348173733Sattilio	panic("lockmgr locks do not support assertions");
349173733Sattilio}
350173733Sattilio
351177957Sattiliostatic void
352167368Sjhblock_lockmgr(struct lock_object *lock, int how)
353167368Sjhb{
354167368Sjhb
355167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
356167368Sjhb}
357167368Sjhb
358177957Sattiliostatic int
359167368Sjhbunlock_lockmgr(struct lock_object *lock)
360167368Sjhb{
361167368Sjhb
362167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
363167368Sjhb}
364167368Sjhb
365192853Ssson#ifdef KDTRACE_HOOKS
366192853Sssonstatic int
367227588Spjdowner_lockmgr(const struct lock_object *lock, struct thread **owner)
368192853Ssson{
369192853Ssson
370192853Ssson	panic("lockmgr locks do not support owner inquiring");
371192853Ssson}
372192853Ssson#endif
373192853Ssson
374177957Sattiliovoid
375177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
376177957Sattilio{
377177957Sattilio	int iflags;
37829653Sdyson
379177957Sattilio	MPASS((flags & ~LK_INIT_MASK) == 0);
380196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
381196334Sattilio            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
382196334Sattilio            &lk->lk_lock));
38324269Speter
384193307Sattilio	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
385193307Sattilio	if (flags & LK_CANRECURSE)
386193307Sattilio		iflags |= LO_RECURSABLE;
387177957Sattilio	if ((flags & LK_NODUP) == 0)
388177957Sattilio		iflags |= LO_DUPOK;
389177957Sattilio	if (flags & LK_NOPROFILE)
390177957Sattilio		iflags |= LO_NOPROFILE;
391177957Sattilio	if ((flags & LK_NOWITNESS) == 0)
392177957Sattilio		iflags |= LO_WITNESS;
393177957Sattilio	if (flags & LK_QUIET)
394177957Sattilio		iflags |= LO_QUIET;
395194317Sattilio	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
396177957Sattilio
397177957Sattilio	lk->lk_lock = LK_UNLOCKED;
398177957Sattilio	lk->lk_recurse = 0;
399200447Sattilio	lk->lk_exslpfail = 0;
400177957Sattilio	lk->lk_timo = timo;
401177957Sattilio	lk->lk_pri = pri;
402177957Sattilio	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
403177957Sattilio	STACK_ZERO(lk);
40428345Sdyson}
40524269Speter
406211531Sjhb/*
407211531Sjhb * XXX: Gross hacks to manipulate external lock flags after
408211531Sjhb * initialization.  Used for certain vnode and buf locks.
409211531Sjhb */
410177957Sattiliovoid
411211531Sjhblockallowshare(struct lock *lk)
412211531Sjhb{
413211531Sjhb
414211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
415211531Sjhb	lk->lock_object.lo_flags &= ~LK_NOSHARE;
416211531Sjhb}
417211531Sjhb
418211531Sjhbvoid
419211531Sjhblockallowrecurse(struct lock *lk)
420211531Sjhb{
421211531Sjhb
422211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
423211531Sjhb	lk->lock_object.lo_flags |= LO_RECURSABLE;
424211531Sjhb}
425211531Sjhb
426211531Sjhbvoid
427211531Sjhblockdisablerecurse(struct lock *lk)
428211531Sjhb{
429211531Sjhb
430211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
431211531Sjhb	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
432211531Sjhb}
433211531Sjhb
434211531Sjhbvoid
435177957Sattiliolockdestroy(struct lock *lk)
436177957Sattilio{
43742453Seivind
438177957Sattilio	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
439177957Sattilio	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
440200447Sattilio	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
441177957Sattilio	lock_destroy(&lk->lock_object);
44228345Sdyson}
44328345Sdyson
444177957Sattilioint
445177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
446177957Sattilio    const char *wmesg, int pri, int timo, const char *file, int line)
447140711Sjeff{
448178159Sattilio	GIANT_DECLARE;
449177957Sattilio	struct lock_class *class;
450176320Sattilio	const char *iwmesg;
451177957Sattilio	uintptr_t tid, v, x;
452200447Sattilio	u_int op, realexslp;
453189846Sjeff	int error, ipri, itimo, queue, wakeup_swapper;
454189846Sjeff#ifdef LOCK_PROFILING
455189846Sjeff	uint64_t waittime = 0;
456189846Sjeff	int contested = 0;
457189846Sjeff#endif
458194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
459194317Sattilio	volatile struct thread *owner;
460194317Sattilio	u_int i, spintries = 0;
461194317Sattilio#endif
462176320Sattilio
463177957Sattilio	error = 0;
464177957Sattilio	tid = (uintptr_t)curthread;
465177957Sattilio	op = (flags & LK_TYPE_MASK);
466177957Sattilio	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
467177957Sattilio	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
468177957Sattilio	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
469176320Sattilio
470177957Sattilio	MPASS((flags & ~LK_TOTAL_MASK) == 0);
471178150Sattilio	KASSERT((op & (op - 1)) == 0,
472178150Sattilio	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
473177957Sattilio	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
474177957Sattilio	    (op != LK_DOWNGRADE && op != LK_RELEASE),
475177957Sattilio	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
476177957Sattilio	    __func__, file, line));
477177957Sattilio	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
478177957Sattilio	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
479177957Sattilio	    __func__, file, line));
480240424Sattilio	KASSERT(!TD_IS_IDLETHREAD(curthread),
481240424Sattilio	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
482240424Sattilio	    lk->lock_object.lo_name, file, line));
48366615Sjasone
484177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
485177957Sattilio	if (panicstr != NULL) {
486177957Sattilio		if (flags & LK_INTERLOCK)
487177957Sattilio			class->lc_unlock(ilk);
488177957Sattilio		return (0);
48928345Sdyson	}
49028345Sdyson
491224581Skib	if (lk->lock_object.lo_flags & LK_NOSHARE) {
492224581Skib		switch (op) {
493224581Skib		case LK_SHARED:
494224581Skib			op = LK_EXCLUSIVE;
495224581Skib			break;
496224581Skib		case LK_UPGRADE:
497224581Skib		case LK_DOWNGRADE:
498224581Skib			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
499224581Skib			    file, line);
500224581Skib			return (0);
501224581Skib		}
502224581Skib	}
503164159Skmacy
504181334Sjhb	wakeup_swapper = 0;
505177957Sattilio	switch (op) {
506177957Sattilio	case LK_SHARED:
507178159Sattilio		if (LK_CAN_WITNESS(flags))
508178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
509182914Sjhb			    file, line, ilk);
510177957Sattilio		for (;;) {
511177957Sattilio			x = lk->lk_lock;
512174948Sattilio
513177957Sattilio			/*
514177957Sattilio			 * If no other thread has an exclusive lock, or
515177957Sattilio			 * no exclusive waiter is present, bump the count of
516177957Sattilio			 * sharers.  Since we have to preserve the state of
517177957Sattilio			 * waiters, if we fail to acquire the shared lock
518177957Sattilio			 * loop back and retry.
519177957Sattilio			 */
520177957Sattilio			if (LK_CAN_SHARE(x)) {
521177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
522177957Sattilio				    x + LK_ONE_SHARER))
523177957Sattilio					break;
524177957Sattilio				continue;
525177957Sattilio			}
526233628Sfabient#ifdef HWPMC_HOOKS
527233628Sfabient			PMC_SOFT_CALL( , , lock, failed);
528233628Sfabient#endif
529177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
530177957Sattilio			    &contested, &waittime);
53128345Sdyson
532177957Sattilio			/*
533180798Skib			 * If the lock is already held by curthread in
534177957Sattilio			 * exclusive way avoid a deadlock.
535177957Sattilio			 */
536177957Sattilio			if (LK_HOLDER(x) == tid) {
537177957Sattilio				LOCK_LOG2(lk,
538180798Skib				    "%s: %p already held in exclusive mode",
539177957Sattilio				    __func__, lk);
540177957Sattilio				error = EDEADLK;
541177957Sattilio				break;
542177957Sattilio			}
543140711Sjeff
544177957Sattilio			/*
545177957Sattilio			 * If the lock is expected to not sleep just give up
546177957Sattilio			 * and return.
547177957Sattilio			 */
548177957Sattilio			if (LK_TRYOP(flags)) {
549177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
550177957Sattilio				    __func__, lk);
551177957Sattilio				error = EBUSY;
552177957Sattilio				break;
553177957Sattilio			}
55428345Sdyson
555194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
556177957Sattilio			/*
557194317Sattilio			 * If the owner is running on another CPU, spin until
558194317Sattilio			 * the owner stops running or the state of the lock
559196772Sattilio			 * changes.  We need a double-state handle here
560196772Sattilio			 * because for a failed acquisition the lock can be
561196772Sattilio			 * either held in exclusive mode or shared mode
562196772Sattilio			 * (for the writer starvation avoidance technique).
563194317Sattilio			 */
564194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
565194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
566194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
567194317Sattilio				if (LOCK_LOG_TEST(&lk->lock_object, 0))
568194317Sattilio					CTR3(KTR_LOCK,
569194317Sattilio					    "%s: spinning on %p held by %p",
570194317Sattilio					    __func__, lk, owner);
571194317Sattilio
572194317Sattilio				/*
573194317Sattilio				 * If we are holding also an interlock drop it
574194317Sattilio				 * in order to avoid a deadlock if the lockmgr
575194317Sattilio				 * owner is adaptively spinning on the
576194317Sattilio				 * interlock itself.
577194317Sattilio				 */
578194317Sattilio				if (flags & LK_INTERLOCK) {
579194317Sattilio					class->lc_unlock(ilk);
580194317Sattilio					flags &= ~LK_INTERLOCK;
581194317Sattilio				}
582194317Sattilio				GIANT_SAVE();
583194317Sattilio				while (LK_HOLDER(lk->lk_lock) ==
584194317Sattilio				    (uintptr_t)owner && TD_IS_RUNNING(owner))
585194317Sattilio					cpu_spinwait();
586196772Sattilio				GIANT_RESTORE();
587196772Sattilio				continue;
588194317Sattilio			} else if (LK_CAN_ADAPT(lk, flags) &&
589196772Sattilio			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
590194317Sattilio			    spintries < alk_retries) {
591194317Sattilio				if (flags & LK_INTERLOCK) {
592194317Sattilio					class->lc_unlock(ilk);
593194317Sattilio					flags &= ~LK_INTERLOCK;
594194317Sattilio				}
595194317Sattilio				GIANT_SAVE();
596194317Sattilio				spintries++;
597194317Sattilio				for (i = 0; i < alk_loops; i++) {
598194317Sattilio					if (LOCK_LOG_TEST(&lk->lock_object, 0))
599194317Sattilio						CTR4(KTR_LOCK,
600194317Sattilio				    "%s: shared spinning on %p with %u and %u",
601194317Sattilio						    __func__, lk, spintries, i);
602194317Sattilio					x = lk->lk_lock;
603194317Sattilio					if ((x & LK_SHARE) == 0 ||
604194317Sattilio					    LK_CAN_SHARE(x) != 0)
605194317Sattilio						break;
606194317Sattilio					cpu_spinwait();
607194317Sattilio				}
608196772Sattilio				GIANT_RESTORE();
609194317Sattilio				if (i != alk_loops)
610194317Sattilio					continue;
611194317Sattilio			}
612194317Sattilio#endif
613194317Sattilio
614194317Sattilio			/*
615177957Sattilio			 * Acquire the sleepqueue chain lock because we
616177957Sattilio			 * probabilly will need to manipulate waiters flags.
617177957Sattilio			 */
618177957Sattilio			sleepq_lock(&lk->lock_object);
619177957Sattilio			x = lk->lk_lock;
620111463Sjeff
621177957Sattilio			/*
622177957Sattilio			 * if the lock can be acquired in shared mode, try
623177957Sattilio			 * again.
624177957Sattilio			 */
625177957Sattilio			if (LK_CAN_SHARE(x)) {
626177957Sattilio				sleepq_release(&lk->lock_object);
627177957Sattilio				continue;
628177957Sattilio			}
62924269Speter
630194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
631177957Sattilio			/*
632194317Sattilio			 * The current lock owner might have started executing
633194317Sattilio			 * on another CPU (or the lock could have changed
634194317Sattilio			 * owner) while we were waiting on the turnstile
635194317Sattilio			 * chain lock.  If so, drop the turnstile lock and try
636194317Sattilio			 * again.
637194317Sattilio			 */
638194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
639194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
640194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
641194317Sattilio				if (TD_IS_RUNNING(owner)) {
642194317Sattilio					sleepq_release(&lk->lock_object);
643194317Sattilio					continue;
644194317Sattilio				}
645194317Sattilio			}
646194317Sattilio#endif
647194317Sattilio
648194317Sattilio			/*
649177957Sattilio			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
650177957Sattilio			 * loop back and retry.
651177957Sattilio			 */
652177957Sattilio			if ((x & LK_SHARED_WAITERS) == 0) {
653177957Sattilio				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
654177957Sattilio				    x | LK_SHARED_WAITERS)) {
655177957Sattilio					sleepq_release(&lk->lock_object);
656177957Sattilio					continue;
657177957Sattilio				}
658177957Sattilio				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
659177957Sattilio				    __func__, lk);
660177957Sattilio			}
66124269Speter
662177957Sattilio			/*
663177957Sattilio			 * As far as we have been unable to acquire the
664177957Sattilio			 * shared lock and the shared waiters flag is set,
665177957Sattilio			 * we will sleep.
666177957Sattilio			 */
667177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
668177957Sattilio			    SQ_SHARED_QUEUE);
669177957Sattilio			flags &= ~LK_INTERLOCK;
670177957Sattilio			if (error) {
671177957Sattilio				LOCK_LOG3(lk,
672177957Sattilio				    "%s: interrupted sleep for %p with %d",
673177957Sattilio				    __func__, lk, error);
674177957Sattilio				break;
675177957Sattilio			}
676177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
677177957Sattilio			    __func__, lk);
678177957Sattilio		}
679177957Sattilio		if (error == 0) {
680177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
681177957Sattilio			    contested, waittime, file, line);
682177957Sattilio			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
683176014Sattilio			    line);
684178159Sattilio			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
685178159Sattilio			    line);
686177957Sattilio			TD_LOCKS_INC(curthread);
687177957Sattilio			TD_SLOCKS_INC(curthread);
688177957Sattilio			STACK_SAVE(lk);
689177957Sattilio		}
690177957Sattilio		break;
691177957Sattilio	case LK_UPGRADE:
692177957Sattilio		_lockmgr_assert(lk, KA_SLOCKED, file, line);
693194317Sattilio		v = lk->lk_lock;
694194317Sattilio		x = v & LK_ALL_WAITERS;
695194317Sattilio		v &= LK_EXCLUSIVE_SPINNERS;
696177957Sattilio
69744681Sjulian		/*
698177957Sattilio		 * Try to switch from one shared lock to an exclusive one.
699177957Sattilio		 * We need to preserve waiters flags during the operation.
70044681Sjulian		 */
701194317Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
702177957Sattilio		    tid | x)) {
703177957Sattilio			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
704177957Sattilio			    line);
705178159Sattilio			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
706178159Sattilio			    LK_TRYWIT(flags), file, line);
707177957Sattilio			TD_SLOCKS_DEC(curthread);
70824269Speter			break;
70924269Speter		}
710177957Sattilio
71124269Speter		/*
712177957Sattilio		 * We have been unable to succeed in upgrading, so just
713177957Sattilio		 * give up the shared lock.
71424269Speter		 */
715182010Sjhb		wakeup_swapper |= wakeupshlk(lk, file, line);
71624269Speter
717177957Sattilio		/* FALLTHROUGH */
718177957Sattilio	case LK_EXCLUSIVE:
719178159Sattilio		if (LK_CAN_WITNESS(flags))
720178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
721182914Sjhb			    LOP_EXCLUSIVE, file, line, ilk);
72224269Speter
72324269Speter		/*
724180798Skib		 * If curthread already holds the lock and this one is
725177957Sattilio		 * allowed to recurse, simply recurse on it.
72624269Speter		 */
727177957Sattilio		if (lockmgr_xlocked(lk)) {
728177957Sattilio			if ((flags & LK_CANRECURSE) == 0 &&
729193307Sattilio			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
730177957Sattilio
731177957Sattilio				/*
732177957Sattilio				 * If the lock is expected to not panic just
733177957Sattilio				 * give up and return.
734177957Sattilio				 */
735177957Sattilio				if (LK_TRYOP(flags)) {
736177957Sattilio					LOCK_LOG2(lk,
737177957Sattilio					    "%s: %p fails the try operation",
738177957Sattilio					    __func__, lk);
739177957Sattilio					error = EBUSY;
740177957Sattilio					break;
741177957Sattilio				}
742177957Sattilio				if (flags & LK_INTERLOCK)
743177957Sattilio					class->lc_unlock(ilk);
744177957Sattilio		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
745177957Sattilio				    __func__, iwmesg, file, line);
746177957Sattilio			}
747177957Sattilio			lk->lk_recurse++;
748177957Sattilio			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
749177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
750177957Sattilio			    lk->lk_recurse, file, line);
751178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
752178159Sattilio			    LK_TRYWIT(flags), file, line);
753177957Sattilio			TD_LOCKS_INC(curthread);
75424269Speter			break;
75524269Speter		}
756177957Sattilio
757177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
758177957Sattilio		    tid)) {
759233628Sfabient#ifdef HWPMC_HOOKS
760233628Sfabient			PMC_SOFT_CALL( , , lock, failed);
761233628Sfabient#endif
762177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
763177957Sattilio			    &contested, &waittime);
764177957Sattilio
76524269Speter			/*
766177957Sattilio			 * If the lock is expected to not sleep just give up
767177957Sattilio			 * and return.
76824269Speter			 */
769177957Sattilio			if (LK_TRYOP(flags)) {
770177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
771177957Sattilio				    __func__, lk);
772177957Sattilio				error = EBUSY;
773177957Sattilio				break;
774177957Sattilio			}
77534194Sdyson
776194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
777177957Sattilio			/*
778194317Sattilio			 * If the owner is running on another CPU, spin until
779194317Sattilio			 * the owner stops running or the state of the lock
780194317Sattilio			 * changes.
781194317Sattilio			 */
782194317Sattilio			x = lk->lk_lock;
783194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
784194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
785194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
786194317Sattilio				if (LOCK_LOG_TEST(&lk->lock_object, 0))
787194317Sattilio					CTR3(KTR_LOCK,
788194317Sattilio					    "%s: spinning on %p held by %p",
789194317Sattilio					    __func__, lk, owner);
790194317Sattilio
791194317Sattilio				/*
792194317Sattilio				 * If we are holding also an interlock drop it
793194317Sattilio				 * in order to avoid a deadlock if the lockmgr
794194317Sattilio				 * owner is adaptively spinning on the
795194317Sattilio				 * interlock itself.
796194317Sattilio				 */
797194317Sattilio				if (flags & LK_INTERLOCK) {
798194317Sattilio					class->lc_unlock(ilk);
799194317Sattilio					flags &= ~LK_INTERLOCK;
800194317Sattilio				}
801194317Sattilio				GIANT_SAVE();
802194317Sattilio				while (LK_HOLDER(lk->lk_lock) ==
803194317Sattilio				    (uintptr_t)owner && TD_IS_RUNNING(owner))
804194317Sattilio					cpu_spinwait();
805196772Sattilio				GIANT_RESTORE();
806196772Sattilio				continue;
807194317Sattilio			} else if (LK_CAN_ADAPT(lk, flags) &&
808194317Sattilio			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
809194317Sattilio			    spintries < alk_retries) {
810194317Sattilio				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
811194317Sattilio				    !atomic_cmpset_ptr(&lk->lk_lock, x,
812194317Sattilio				    x | LK_EXCLUSIVE_SPINNERS))
813194317Sattilio					continue;
814194317Sattilio				if (flags & LK_INTERLOCK) {
815194317Sattilio					class->lc_unlock(ilk);
816194317Sattilio					flags &= ~LK_INTERLOCK;
817194317Sattilio				}
818194317Sattilio				GIANT_SAVE();
819194317Sattilio				spintries++;
820194317Sattilio				for (i = 0; i < alk_loops; i++) {
821194317Sattilio					if (LOCK_LOG_TEST(&lk->lock_object, 0))
822194317Sattilio						CTR4(KTR_LOCK,
823194317Sattilio				    "%s: shared spinning on %p with %u and %u",
824194317Sattilio						    __func__, lk, spintries, i);
825194317Sattilio					if ((lk->lk_lock &
826194317Sattilio					    LK_EXCLUSIVE_SPINNERS) == 0)
827194317Sattilio						break;
828194317Sattilio					cpu_spinwait();
829194317Sattilio				}
830196772Sattilio				GIANT_RESTORE();
831194317Sattilio				if (i != alk_loops)
832194317Sattilio					continue;
833194317Sattilio			}
834194317Sattilio#endif
835194317Sattilio
836194317Sattilio			/*
837177957Sattilio			 * Acquire the sleepqueue chain lock because we
838177957Sattilio			 * probabilly will need to manipulate waiters flags.
839177957Sattilio			 */
840177957Sattilio			sleepq_lock(&lk->lock_object);
841177957Sattilio			x = lk->lk_lock;
842177957Sattilio
843177957Sattilio			/*
844177957Sattilio			 * if the lock has been released while we spun on
845177957Sattilio			 * the sleepqueue chain lock just try again.
846177957Sattilio			 */
847177957Sattilio			if (x == LK_UNLOCKED) {
848177957Sattilio				sleepq_release(&lk->lock_object);
849177957Sattilio				continue;
850134365Skan			}
85124269Speter
852194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
85324269Speter			/*
854194317Sattilio			 * The current lock owner might have started executing
855194317Sattilio			 * on another CPU (or the lock could have changed
856194317Sattilio			 * owner) while we were waiting on the turnstile
857194317Sattilio			 * chain lock.  If so, drop the turnstile lock and try
858194317Sattilio			 * again.
859194317Sattilio			 */
860194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
861194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
862194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
863194317Sattilio				if (TD_IS_RUNNING(owner)) {
864194317Sattilio					sleepq_release(&lk->lock_object);
865194317Sattilio					continue;
866194317Sattilio				}
867194317Sattilio			}
868194317Sattilio#endif
869194317Sattilio
870194317Sattilio			/*
871177957Sattilio			 * The lock can be in the state where there is a
872177957Sattilio			 * pending queue of waiters, but still no owner.
873177957Sattilio			 * This happens when the lock is contested and an
874177957Sattilio			 * owner is going to claim the lock.
875177957Sattilio			 * If curthread is the one successfully acquiring it
876177957Sattilio			 * claim lock ownership and return, preserving waiters
877177957Sattilio			 * flags.
87824269Speter			 */
879194317Sattilio			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
880194317Sattilio			if ((x & ~v) == LK_UNLOCKED) {
881194317Sattilio				v &= ~LK_EXCLUSIVE_SPINNERS;
882177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
883177957Sattilio				    tid | v)) {
884177957Sattilio					sleepq_release(&lk->lock_object);
885177957Sattilio					LOCK_LOG2(lk,
886177957Sattilio					    "%s: %p claimed by a new writer",
887177957Sattilio					    __func__, lk);
888177957Sattilio					break;
889177957Sattilio				}
890177957Sattilio				sleepq_release(&lk->lock_object);
891177957Sattilio				continue;
892177957Sattilio			}
893177957Sattilio
894177957Sattilio			/*
895177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
896177957Sattilio			 * fail, loop back and retry.
897177957Sattilio			 */
898177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
899177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
900177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
901177957Sattilio					sleepq_release(&lk->lock_object);
902177957Sattilio					continue;
903177957Sattilio				}
904177957Sattilio				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
905177957Sattilio				    __func__, lk);
906177957Sattilio			}
907177957Sattilio
908177957Sattilio			/*
909177957Sattilio			 * As far as we have been unable to acquire the
910177957Sattilio			 * exclusive lock and the exclusive waiters flag
911177957Sattilio			 * is set, we will sleep.
912177957Sattilio			 */
913177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
914177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
915177957Sattilio			flags &= ~LK_INTERLOCK;
916177957Sattilio			if (error) {
917177957Sattilio				LOCK_LOG3(lk,
918177957Sattilio				    "%s: interrupted sleep for %p with %d",
919177957Sattilio				    __func__, lk, error);
92048301Smckusick				break;
92148301Smckusick			}
922177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
923177957Sattilio			    __func__, lk);
92424269Speter		}
925177957Sattilio		if (error == 0) {
926177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
927177957Sattilio			    contested, waittime, file, line);
928177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
929177957Sattilio			    lk->lk_recurse, file, line);
930178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
931178159Sattilio			    LK_TRYWIT(flags), file, line);
932177957Sattilio			TD_LOCKS_INC(curthread);
933177957Sattilio			STACK_SAVE(lk);
934177957Sattilio		}
935177957Sattilio		break;
936177957Sattilio	case LK_DOWNGRADE:
937243900Sattilio		_lockmgr_assert(lk, KA_XLOCKED, file, line);
938178159Sattilio		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
939178159Sattilio		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
940243900Sattilio
941243900Sattilio		/*
942243900Sattilio		 * Panic if the lock is recursed.
943243900Sattilio		 */
944243900Sattilio		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
945243900Sattilio			if (flags & LK_INTERLOCK)
946243900Sattilio				class->lc_unlock(ilk);
947243900Sattilio			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
948243900Sattilio			    __func__, iwmesg, file, line);
949243900Sattilio		}
950178159Sattilio		TD_SLOCKS_INC(curthread);
951177957Sattilio
95224269Speter		/*
953177957Sattilio		 * In order to preserve waiters flags, just spin.
95424269Speter		 */
955177957Sattilio		for (;;) {
956194317Sattilio			x = lk->lk_lock;
957194317Sattilio			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
958194317Sattilio			x &= LK_ALL_WAITERS;
959177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
960178159Sattilio			    LK_SHARERS_LOCK(1) | x))
961177957Sattilio				break;
962177957Sattilio			cpu_spinwait();
96324269Speter		}
96424269Speter		break;
965177957Sattilio	case LK_RELEASE:
966177957Sattilio		_lockmgr_assert(lk, KA_LOCKED, file, line);
967177957Sattilio		x = lk->lk_lock;
96824269Speter
969177957Sattilio		if ((x & LK_SHARE) == 0) {
970177957Sattilio
971177957Sattilio			/*
972177957Sattilio			 * As first option, treact the lock as if it has not
973177957Sattilio			 * any waiter.
974177957Sattilio			 * Fix-up the tid var if the lock has been disowned.
975177957Sattilio			 */
976177957Sattilio			if (LK_HOLDER(x) == LK_KERNPROC)
977177957Sattilio				tid = LK_KERNPROC;
978178159Sattilio			else {
979178159Sattilio				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
980178159Sattilio				    file, line);
981177957Sattilio				TD_LOCKS_DEC(curthread);
982178159Sattilio			}
983177957Sattilio			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
984177957Sattilio			    lk->lk_recurse, file, line);
985177957Sattilio
986177957Sattilio			/*
987177957Sattilio			 * The lock is held in exclusive mode.
988177957Sattilio			 * If the lock is recursed also, then unrecurse it.
989177957Sattilio			 */
990177957Sattilio			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
991177957Sattilio				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
992177957Sattilio				    lk);
993177957Sattilio				lk->lk_recurse--;
994177957Sattilio				break;
995176014Sattilio			}
996189788Sjeff			if (tid != LK_KERNPROC)
997189788Sjeff				lock_profile_release_lock(&lk->lock_object);
998177957Sattilio
999177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1000177957Sattilio			    LK_UNLOCKED))
1001177957Sattilio				break;
1002177957Sattilio
1003177957Sattilio			sleepq_lock(&lk->lock_object);
1004194317Sattilio			x = lk->lk_lock;
1005177957Sattilio			v = LK_UNLOCKED;
1006177957Sattilio
1007177957Sattilio			/*
1008177957Sattilio		 	 * If the lock has exclusive waiters, give them
1009177957Sattilio			 * preference in order to avoid deadlock with
1010177957Sattilio			 * shared runners up.
1011200447Sattilio			 * If interruptible sleeps left the exclusive queue
1012200447Sattilio			 * empty avoid a starvation for the threads sleeping
1013200447Sattilio			 * on the shared queue by giving them precedence
1014200447Sattilio			 * and cleaning up the exclusive waiters bit anyway.
1015201709Sattilio			 * Please note that lk_exslpfail count may be lying
1016201709Sattilio			 * about the real number of waiters with the
1017201709Sattilio			 * LK_SLEEPFAIL flag on because they may be used in
1018201709Sattilio			 * conjuction with interruptible sleeps so
1019201710Sattilio			 * lk_exslpfail might be considered an 'upper limit'
1020201710Sattilio			 * bound, including the edge cases.
1021177957Sattilio			 */
1022194317Sattilio			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1023200447Sattilio			realexslp = sleepq_sleepcnt(&lk->lock_object,
1024200447Sattilio			    SQ_EXCLUSIVE_QUEUE);
1025200447Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1026200447Sattilio				if (lk->lk_exslpfail < realexslp) {
1027200447Sattilio					lk->lk_exslpfail = 0;
1028200447Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
1029200447Sattilio					v |= (x & LK_SHARED_WAITERS);
1030200447Sattilio				} else {
1031200447Sattilio					lk->lk_exslpfail = 0;
1032200447Sattilio					LOCK_LOG2(lk,
1033200447Sattilio					"%s: %p has only LK_SLEEPFAIL sleepers",
1034200447Sattilio					    __func__, lk);
1035200447Sattilio					LOCK_LOG2(lk,
1036200447Sattilio			"%s: %p waking up threads on the exclusive queue",
1037200447Sattilio					    __func__, lk);
1038200447Sattilio					wakeup_swapper =
1039200447Sattilio					    sleepq_broadcast(&lk->lock_object,
1040200447Sattilio					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1041200447Sattilio					queue = SQ_SHARED_QUEUE;
1042200447Sattilio				}
1043177957Sattilio			} else {
1044201703Sattilio
1045201703Sattilio				/*
1046201703Sattilio				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1047201703Sattilio				 * on and using interruptible sleeps/timeout
1048201703Sattilio				 * may have left spourious lk_exslpfail counts
1049201703Sattilio				 * on, so clean it up anyway.
1050201703Sattilio				 */
1051201703Sattilio				lk->lk_exslpfail = 0;
1052177957Sattilio				queue = SQ_SHARED_QUEUE;
105324269Speter			}
1054149723Sssouhlal
1055177957Sattilio			LOCK_LOG3(lk,
1056177957Sattilio			    "%s: %p waking up threads on the %s queue",
1057177957Sattilio			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1058177957Sattilio			    "exclusive");
1059177957Sattilio			atomic_store_rel_ptr(&lk->lk_lock, v);
1060200447Sattilio			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1061181334Sjhb			    SLEEPQ_LK, 0, queue);
1062177957Sattilio			sleepq_release(&lk->lock_object);
1063177957Sattilio			break;
1064177957Sattilio		} else
1065181334Sjhb			wakeup_swapper = wakeupshlk(lk, file, line);
106624269Speter		break;
1067177957Sattilio	case LK_DRAIN:
1068178159Sattilio		if (LK_CAN_WITNESS(flags))
1069178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1070182914Sjhb			    LOP_EXCLUSIVE, file, line, ilk);
107124269Speter
107224269Speter		/*
1073180798Skib		 * Trying to drain a lock we already own will result in a
1074177957Sattilio		 * deadlock.
107524269Speter		 */
1076177957Sattilio		if (lockmgr_xlocked(lk)) {
1077177957Sattilio			if (flags & LK_INTERLOCK)
1078177957Sattilio				class->lc_unlock(ilk);
1079177957Sattilio			panic("%s: draining %s with the lock held @ %s:%d\n",
1080177957Sattilio			    __func__, iwmesg, file, line);
1081177957Sattilio		}
108228345Sdyson
1083177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1084233628Sfabient#ifdef HWPMC_HOOKS
1085233628Sfabient			PMC_SOFT_CALL( , , lock, failed);
1086233628Sfabient#endif
1087177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
1088177957Sattilio			    &contested, &waittime);
108924269Speter
1090177957Sattilio			/*
1091177957Sattilio			 * If the lock is expected to not sleep just give up
1092177957Sattilio			 * and return.
1093177957Sattilio			 */
1094177957Sattilio			if (LK_TRYOP(flags)) {
1095177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
1096177957Sattilio				    __func__, lk);
1097177957Sattilio				error = EBUSY;
1098177957Sattilio				break;
1099177957Sattilio			}
110024269Speter
1101177957Sattilio			/*
1102177957Sattilio			 * Acquire the sleepqueue chain lock because we
1103177957Sattilio			 * probabilly will need to manipulate waiters flags.
1104177957Sattilio			 */
1105177957Sattilio			sleepq_lock(&lk->lock_object);
1106177957Sattilio			x = lk->lk_lock;
110729653Sdyson
1108177957Sattilio			/*
1109177957Sattilio			 * if the lock has been released while we spun on
1110177957Sattilio			 * the sleepqueue chain lock just try again.
1111177957Sattilio			 */
1112177957Sattilio			if (x == LK_UNLOCKED) {
1113177957Sattilio				sleepq_release(&lk->lock_object);
1114177957Sattilio				continue;
1115177957Sattilio			}
1116176320Sattilio
1117194317Sattilio			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1118194317Sattilio			if ((x & ~v) == LK_UNLOCKED) {
1119194317Sattilio				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1120200447Sattilio
1121200447Sattilio				/*
1122200447Sattilio				 * If interruptible sleeps left the exclusive
1123200447Sattilio				 * queue empty avoid a starvation for the
1124200447Sattilio				 * threads sleeping on the shared queue by
1125200447Sattilio				 * giving them precedence and cleaning up the
1126200447Sattilio				 * exclusive waiters bit anyway.
1127201709Sattilio				 * Please note that lk_exslpfail count may be
1128201709Sattilio				 * lying about the real number of waiters with
1129201709Sattilio				 * the LK_SLEEPFAIL flag on because they may
1130201709Sattilio				 * be used in conjuction with interruptible
1131201710Sattilio				 * sleeps so lk_exslpfail might be considered
1132201710Sattilio				 * an 'upper limit' bound, including the edge
1133201709Sattilio				 * cases.
1134200447Sattilio				 */
1135177957Sattilio				if (v & LK_EXCLUSIVE_WAITERS) {
1136177957Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
1137177957Sattilio					v &= ~LK_EXCLUSIVE_WAITERS;
1138177957Sattilio				} else {
1139201703Sattilio
1140201703Sattilio					/*
1141201703Sattilio					 * Exclusive waiters sleeping with
1142201703Sattilio					 * LK_SLEEPFAIL on and using
1143201703Sattilio					 * interruptible sleeps/timeout may
1144201703Sattilio					 * have left spourious lk_exslpfail
1145201703Sattilio					 * counts on, so clean it up anyway.
1146201703Sattilio					 */
1147177957Sattilio					MPASS(v & LK_SHARED_WAITERS);
1148201703Sattilio					lk->lk_exslpfail = 0;
1149177957Sattilio					queue = SQ_SHARED_QUEUE;
1150177957Sattilio					v &= ~LK_SHARED_WAITERS;
1151177957Sattilio				}
1152200447Sattilio				if (queue == SQ_EXCLUSIVE_QUEUE) {
1153200447Sattilio					realexslp =
1154200447Sattilio					    sleepq_sleepcnt(&lk->lock_object,
1155200447Sattilio					    SQ_EXCLUSIVE_QUEUE);
1156200447Sattilio					if (lk->lk_exslpfail >= realexslp) {
1157200447Sattilio						lk->lk_exslpfail = 0;
1158200447Sattilio						queue = SQ_SHARED_QUEUE;
1159200447Sattilio						v &= ~LK_SHARED_WAITERS;
1160200447Sattilio						if (realexslp != 0) {
1161200447Sattilio							LOCK_LOG2(lk,
1162200447Sattilio					"%s: %p has only LK_SLEEPFAIL sleepers",
1163200447Sattilio							    __func__, lk);
1164200447Sattilio							LOCK_LOG2(lk,
1165200447Sattilio			"%s: %p waking up threads on the exclusive queue",
1166200447Sattilio							    __func__, lk);
1167200447Sattilio							wakeup_swapper =
1168200447Sattilio							    sleepq_broadcast(
1169200447Sattilio							    &lk->lock_object,
1170200447Sattilio							    SLEEPQ_LK, 0,
1171200447Sattilio							    SQ_EXCLUSIVE_QUEUE);
1172200447Sattilio						}
1173200447Sattilio					} else
1174200447Sattilio						lk->lk_exslpfail = 0;
1175200447Sattilio				}
1176177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1177177957Sattilio					sleepq_release(&lk->lock_object);
1178177957Sattilio					continue;
1179177957Sattilio				}
1180177957Sattilio				LOCK_LOG3(lk,
1181177957Sattilio				"%s: %p waking up all threads on the %s queue",
1182177957Sattilio				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1183177957Sattilio				    "shared" : "exclusive");
1184182010Sjhb				wakeup_swapper |= sleepq_broadcast(
1185181334Sjhb				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1186177957Sattilio
1187177957Sattilio				/*
1188177957Sattilio				 * If shared waiters have been woken up we need
1189177957Sattilio				 * to wait for one of them to acquire the lock
1190177957Sattilio				 * before to set the exclusive waiters in
1191177957Sattilio				 * order to avoid a deadlock.
1192177957Sattilio				 */
1193177957Sattilio				if (queue == SQ_SHARED_QUEUE) {
1194177957Sattilio					for (v = lk->lk_lock;
1195177957Sattilio					    (v & LK_SHARE) && !LK_SHARERS(v);
1196177957Sattilio					    v = lk->lk_lock)
1197177957Sattilio						cpu_spinwait();
1198177957Sattilio				}
1199177957Sattilio			}
1200177957Sattilio
1201177957Sattilio			/*
1202177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1203177957Sattilio			 * fail, loop back and retry.
1204177957Sattilio			 */
1205177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1206177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1207177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
1208177957Sattilio					sleepq_release(&lk->lock_object);
1209177957Sattilio					continue;
1210177957Sattilio				}
1211177957Sattilio				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1212177957Sattilio				    __func__, lk);
1213177957Sattilio			}
1214177957Sattilio
1215177957Sattilio			/*
1216177957Sattilio			 * As far as we have been unable to acquire the
1217177957Sattilio			 * exclusive lock and the exclusive waiters flag
1218177957Sattilio			 * is set, we will sleep.
1219177957Sattilio			 */
1220177957Sattilio			if (flags & LK_INTERLOCK) {
1221177957Sattilio				class->lc_unlock(ilk);
1222177957Sattilio				flags &= ~LK_INTERLOCK;
1223177957Sattilio			}
1224178159Sattilio			GIANT_SAVE();
1225177957Sattilio			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1226177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
1227177957Sattilio			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1228178159Sattilio			GIANT_RESTORE();
1229177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1230177957Sattilio			    __func__, lk);
123129653Sdyson		}
1232177957Sattilio
1233177957Sattilio		if (error == 0) {
1234177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
1235177957Sattilio			    contested, waittime, file, line);
1236177957Sattilio			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1237177957Sattilio			    lk->lk_recurse, file, line);
1238178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1239178159Sattilio			    LK_TRYWIT(flags), file, line);
1240177957Sattilio			TD_LOCKS_INC(curthread);
1241177957Sattilio			STACK_SAVE(lk);
1242177957Sattilio		}
1243177957Sattilio		break;
1244177957Sattilio	default:
1245177957Sattilio		if (flags & LK_INTERLOCK)
1246177957Sattilio			class->lc_unlock(ilk);
1247177957Sattilio		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
124829653Sdyson	}
1249177957Sattilio
1250177957Sattilio	if (flags & LK_INTERLOCK)
1251177957Sattilio		class->lc_unlock(ilk);
1252181334Sjhb	if (wakeup_swapper)
1253181334Sjhb		kick_proc0();
1254177957Sattilio
1255177957Sattilio	return (error);
125629653Sdyson}
125729653Sdyson
125829653Sdysonvoid
1259177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line)
126029653Sdyson{
1261177957Sattilio	uintptr_t tid, x;
1262176014Sattilio
1263228424Savg	if (SCHEDULER_STOPPED())
1264228424Savg		return;
1265228424Savg
1266177957Sattilio	tid = (uintptr_t)curthread;
1267243900Sattilio	_lockmgr_assert(lk, KA_XLOCKED, file, line);
126829653Sdyson
1269177957Sattilio	/*
1270243900Sattilio	 * Panic if the lock is recursed.
1271243900Sattilio	 */
1272243900Sattilio	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1273243900Sattilio		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1274243900Sattilio		    __func__,  file, line);
1275243900Sattilio
1276243900Sattilio	/*
1277180798Skib	 * If the owner is already LK_KERNPROC just skip the whole operation.
1278177957Sattilio	 */
1279177957Sattilio	if (LK_HOLDER(lk->lk_lock) != tid)
1280177957Sattilio		return;
1281189788Sjeff	lock_profile_release_lock(&lk->lock_object);
1282178159Sattilio	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1283178159Sattilio	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1284178159Sattilio	TD_LOCKS_DEC(curthread);
1285199008Sattilio	STACK_SAVE(lk);
128629653Sdyson
1287177957Sattilio	/*
1288177957Sattilio	 * In order to preserve waiters flags, just spin.
1289177957Sattilio	 */
1290177957Sattilio	for (;;) {
1291194317Sattilio		x = lk->lk_lock;
1292194317Sattilio		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1293194317Sattilio		x &= LK_ALL_WAITERS;
1294178166Sattilio		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1295178159Sattilio		    LK_KERNPROC | x))
1296177957Sattilio			return;
1297177957Sattilio		cpu_spinwait();
1298177957Sattilio	}
129966615Sjasone}
130066615Sjasone
1301175166Sattiliovoid
1302227588Spjdlockmgr_printinfo(const struct lock *lk)
1303175166Sattilio{
1304175166Sattilio	struct thread *td;
1305177957Sattilio	uintptr_t x;
1306175166Sattilio
1307177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
1308188244Sjhb		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1309177957Sattilio	else if (lk->lk_lock & LK_SHARE)
1310188244Sjhb		printf("lock type %s: SHARED (count %ju)\n",
1311177957Sattilio		    lk->lock_object.lo_name,
1312177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1313177957Sattilio	else {
1314177957Sattilio		td = lockmgr_xholder(lk);
1315232547Sivoras		printf("lock type %s: EXCL by thread %p "
1316232547Sivoras		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1317232547Sivoras		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1318177957Sattilio	}
1319175166Sattilio
1320177957Sattilio	x = lk->lk_lock;
1321177957Sattilio	if (x & LK_EXCLUSIVE_WAITERS)
1322177957Sattilio		printf(" with exclusive waiters pending\n");
1323177957Sattilio	if (x & LK_SHARED_WAITERS)
1324177957Sattilio		printf(" with shared waiters pending\n");
1325194317Sattilio	if (x & LK_EXCLUSIVE_SPINNERS)
1326194317Sattilio		printf(" with exclusive spinners pending\n");
1327177957Sattilio
1328177957Sattilio	STACK_PRINT(lk);
1329175166Sattilio}
1330175166Sattilio
133129653Sdysonint
1332227588Spjdlockstatus(const struct lock *lk)
133329653Sdyson{
1334177957Sattilio	uintptr_t v, x;
1335177957Sattilio	int ret;
133629653Sdyson
1337177957Sattilio	ret = LK_SHARED;
1338177957Sattilio	x = lk->lk_lock;
1339177957Sattilio	v = LK_HOLDER(x);
1340175635Sattilio
1341177957Sattilio	if ((x & LK_SHARE) == 0) {
1342177957Sattilio		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1343177957Sattilio			ret = LK_EXCLUSIVE;
134454444Seivind		else
1345177957Sattilio			ret = LK_EXCLOTHER;
1346177957Sattilio	} else if (x == LK_UNLOCKED)
1347177957Sattilio		ret = 0;
134829653Sdyson
1349177957Sattilio	return (ret);
135024269Speter}
1351161322Sjhb
1352176249Sattilio#ifdef INVARIANT_SUPPORT
1353219028Snetchild
1354219028SnetchildFEATURE(invariant_support,
1355219028Snetchild    "Support for modules compiled with INVARIANTS option");
1356219028Snetchild
1357176249Sattilio#ifndef INVARIANTS
1358177957Sattilio#undef	_lockmgr_assert
1359176249Sattilio#endif
1360176249Sattilio
1361176249Sattiliovoid
1362227588Spjd_lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1363176249Sattilio{
1364176249Sattilio	int slocked = 0;
1365176249Sattilio
1366176249Sattilio	if (panicstr != NULL)
1367176249Sattilio		return;
1368176249Sattilio	switch (what) {
1369176249Sattilio	case KA_SLOCKED:
1370176249Sattilio	case KA_SLOCKED | KA_NOTRECURSED:
1371176249Sattilio	case KA_SLOCKED | KA_RECURSED:
1372176249Sattilio		slocked = 1;
1373176249Sattilio	case KA_LOCKED:
1374176249Sattilio	case KA_LOCKED | KA_NOTRECURSED:
1375176249Sattilio	case KA_LOCKED | KA_RECURSED:
1376178159Sattilio#ifdef WITNESS
1377178159Sattilio
1378178159Sattilio		/*
1379178159Sattilio		 * We cannot trust WITNESS if the lock is held in exclusive
1380178159Sattilio		 * mode and a call to lockmgr_disown() happened.
1381178159Sattilio		 * Workaround this skipping the check if the lock is held in
1382178159Sattilio		 * exclusive mode even for the KA_LOCKED case.
1383178159Sattilio		 */
1384178159Sattilio		if (slocked || (lk->lk_lock & LK_SHARE)) {
1385178159Sattilio			witness_assert(&lk->lock_object, what, file, line);
1386178159Sattilio			break;
1387178159Sattilio		}
1388178159Sattilio#endif
1389177957Sattilio		if (lk->lk_lock == LK_UNLOCKED ||
1390177957Sattilio		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1391177957Sattilio		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1392176249Sattilio			panic("Lock %s not %slocked @ %s:%d\n",
1393177957Sattilio			    lk->lock_object.lo_name, slocked ? "share" : "",
1394176249Sattilio			    file, line);
1395177957Sattilio
1396177957Sattilio		if ((lk->lk_lock & LK_SHARE) == 0) {
1397177957Sattilio			if (lockmgr_recursed(lk)) {
1398176249Sattilio				if (what & KA_NOTRECURSED)
1399176249Sattilio					panic("Lock %s recursed @ %s:%d\n",
1400177957Sattilio					    lk->lock_object.lo_name, file,
1401177957Sattilio					    line);
1402176249Sattilio			} else if (what & KA_RECURSED)
1403176249Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1404177957Sattilio				    lk->lock_object.lo_name, file, line);
1405176249Sattilio		}
1406176249Sattilio		break;
1407176249Sattilio	case KA_XLOCKED:
1408176249Sattilio	case KA_XLOCKED | KA_NOTRECURSED:
1409176249Sattilio	case KA_XLOCKED | KA_RECURSED:
1410177957Sattilio		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1411176249Sattilio			panic("Lock %s not exclusively locked @ %s:%d\n",
1412177957Sattilio			    lk->lock_object.lo_name, file, line);
1413177957Sattilio		if (lockmgr_recursed(lk)) {
1414176249Sattilio			if (what & KA_NOTRECURSED)
1415176249Sattilio				panic("Lock %s recursed @ %s:%d\n",
1416177957Sattilio				    lk->lock_object.lo_name, file, line);
1417176249Sattilio		} else if (what & KA_RECURSED)
1418176249Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1419177957Sattilio			    lk->lock_object.lo_name, file, line);
1420176249Sattilio		break;
1421176249Sattilio	case KA_UNLOCKED:
1422177957Sattilio		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1423176249Sattilio			panic("Lock %s exclusively locked @ %s:%d\n",
1424177957Sattilio			    lk->lock_object.lo_name, file, line);
1425176249Sattilio		break;
1426176249Sattilio	default:
1427177957Sattilio		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1428177957Sattilio		    line);
1429176249Sattilio	}
1430176249Sattilio}
1431177957Sattilio#endif
1432176249Sattilio
1433161322Sjhb#ifdef DDB
1434161337Sjhbint
1435161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
1436161337Sjhb{
1437177957Sattilio	struct lock *lk;
1438161337Sjhb
1439177957Sattilio	lk = td->td_wchan;
1440161337Sjhb
1441177957Sattilio	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1442177957Sattilio		return (0);
1443177957Sattilio	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1444177957Sattilio	if (lk->lk_lock & LK_SHARE)
1445177957Sattilio		db_printf("SHARED (count %ju)\n",
1446177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1447177957Sattilio	else
1448177957Sattilio		db_printf("EXCL\n");
1449177957Sattilio	*ownerp = lockmgr_xholder(lk);
1450161337Sjhb
1451161337Sjhb	return (1);
1452161337Sjhb}
1453161337Sjhb
1454177957Sattiliostatic void
1455227588Spjddb_show_lockmgr(const struct lock_object *lock)
1456161322Sjhb{
1457161322Sjhb	struct thread *td;
1458227588Spjd	const struct lock *lk;
1459161322Sjhb
1460227588Spjd	lk = (const struct lock *)lock;
1461161322Sjhb
1462168070Sjhb	db_printf(" state: ");
1463177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
1464161322Sjhb		db_printf("UNLOCKED\n");
1465177957Sattilio	else if (lk->lk_lock & LK_SHARE)
1466177957Sattilio		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1467177957Sattilio	else {
1468177957Sattilio		td = lockmgr_xholder(lk);
1469177957Sattilio		if (td == (struct thread *)LK_KERNPROC)
1470177957Sattilio			db_printf("XLOCK: LK_KERNPROC\n");
1471177957Sattilio		else
1472177957Sattilio			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1473177957Sattilio			    td->td_tid, td->td_proc->p_pid,
1474177957Sattilio			    td->td_proc->p_comm);
1475177957Sattilio		if (lockmgr_recursed(lk))
1476177957Sattilio			db_printf(" recursed: %d\n", lk->lk_recurse);
1477177957Sattilio	}
1478177957Sattilio	db_printf(" waiters: ");
1479177957Sattilio	switch (lk->lk_lock & LK_ALL_WAITERS) {
1480177957Sattilio	case LK_SHARED_WAITERS:
1481177957Sattilio		db_printf("shared\n");
1482192022Strasz		break;
1483177957Sattilio	case LK_EXCLUSIVE_WAITERS:
1484177957Sattilio		db_printf("exclusive\n");
1485177957Sattilio		break;
1486177957Sattilio	case LK_ALL_WAITERS:
1487177957Sattilio		db_printf("shared and exclusive\n");
1488177957Sattilio		break;
1489177957Sattilio	default:
1490177957Sattilio		db_printf("none\n");
1491177957Sattilio	}
1492194317Sattilio	db_printf(" spinners: ");
1493194317Sattilio	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1494194317Sattilio		db_printf("exclusive\n");
1495194317Sattilio	else
1496194317Sattilio		db_printf("none\n");
1497161322Sjhb}
1498161322Sjhb#endif
1499