1139804Simp/*-
2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3177957Sattilio * All rights reserved.
424269Speter *
524269Speter * Redistribution and use in source and binary forms, with or without
624269Speter * modification, are permitted provided that the following conditions
724269Speter * are met:
824269Speter * 1. Redistributions of source code must retain the above copyright
9177957Sattilio *    notice(s), this list of conditions and the following disclaimer as
10177957Sattilio *    the first lines of this file unmodified other than the possible
11177957Sattilio *    addition of one or more copyright notices.
1224269Speter * 2. Redistributions in binary form must reproduce the above copyright
13177957Sattilio *    notice(s), this list of conditions and the following disclaimer in the
1424269Speter *    documentation and/or other materials provided with the distribution.
1524269Speter *
16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19177957Sattilio * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26177957Sattilio * DAMAGE.
2724269Speter */
2824269Speter
29194317Sattilio#include "opt_adaptive_lockmgrs.h"
30177957Sattilio#include "opt_ddb.h"
31233628Sfabient#include "opt_hwpmc_hooks.h"
32192853Ssson#include "opt_kdtrace.h"
33177957Sattilio
34116182Sobrien#include <sys/cdefs.h>
35116182Sobrien__FBSDID("$FreeBSD$");
36116182Sobrien
3724269Speter#include <sys/param.h>
38244582Sattilio#include <sys/kdb.h>
3984812Sjhb#include <sys/ktr.h>
4024269Speter#include <sys/lock.h>
41177957Sattilio#include <sys/lock_profile.h>
42102477Sbde#include <sys/lockmgr.h>
4367353Sjhb#include <sys/mutex.h>
44102477Sbde#include <sys/proc.h>
45177957Sattilio#include <sys/sleepqueue.h>
46148668Sjeff#ifdef DEBUG_LOCKS
47148668Sjeff#include <sys/stack.h>
48148668Sjeff#endif
49194317Sattilio#include <sys/sysctl.h>
50177957Sattilio#include <sys/systm.h>
5124269Speter
52177957Sattilio#include <machine/cpu.h>
53176014Sattilio
54161322Sjhb#ifdef DDB
55161322Sjhb#include <ddb/ddb.h>
56161322Sjhb#endif
57161322Sjhb
58233628Sfabient#ifdef HWPMC_HOOKS
59233628Sfabient#include <sys/pmckern.h>
60233628SfabientPMC_SOFT_DECLARE( , , lock, failed);
61233628Sfabient#endif
62233628Sfabient
63194317SattilioCTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
64194317Sattilio    (LK_ADAPTIVE | LK_NOSHARE));
65194317SattilioCTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
66194317Sattilio    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
67177957Sattilio
68177957Sattilio#define	SQ_EXCLUSIVE_QUEUE	0
69177957Sattilio#define	SQ_SHARED_QUEUE		1
70177957Sattilio
71177957Sattilio#ifndef INVARIANTS
72177957Sattilio#define	_lockmgr_assert(lk, what, file, line)
73177957Sattilio#define	TD_LOCKS_INC(td)
74177957Sattilio#define	TD_LOCKS_DEC(td)
75177957Sattilio#else
76177957Sattilio#define	TD_LOCKS_INC(td)	((td)->td_locks++)
77177957Sattilio#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
78177957Sattilio#endif
79177957Sattilio#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
80177957Sattilio#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
81177957Sattilio
82177957Sattilio#ifndef DEBUG_LOCKS
83177957Sattilio#define	STACK_PRINT(lk)
84177957Sattilio#define	STACK_SAVE(lk)
85177957Sattilio#define	STACK_ZERO(lk)
86177957Sattilio#else
87177957Sattilio#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
88177957Sattilio#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
89177957Sattilio#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
90177957Sattilio#endif
91177957Sattilio
92177957Sattilio#define	LOCK_LOG2(lk, string, arg1, arg2)				\
93177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
94177957Sattilio		CTR2(KTR_LOCK, (string), (arg1), (arg2))
95177957Sattilio#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
96177957Sattilio	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
97177957Sattilio		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98177957Sattilio
99178159Sattilio#define	GIANT_DECLARE							\
100178159Sattilio	int _i = 0;							\
101178159Sattilio	WITNESS_SAVE_DECL(Giant)
102178159Sattilio#define	GIANT_RESTORE() do {						\
103178159Sattilio	if (_i > 0) {							\
104178159Sattilio		while (_i--)						\
105178159Sattilio			mtx_lock(&Giant);				\
106178159Sattilio		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107178159Sattilio	}								\
108178159Sattilio} while (0)
109178159Sattilio#define	GIANT_SAVE() do {						\
110178159Sattilio	if (mtx_owned(&Giant)) {					\
111178159Sattilio		WITNESS_SAVE(&Giant.lock_object, Giant);		\
112178159Sattilio		while (mtx_owned(&Giant)) {				\
113178159Sattilio			_i++;						\
114178159Sattilio			mtx_unlock(&Giant);				\
115178159Sattilio		}							\
116178159Sattilio	}								\
117178159Sattilio} while (0)
118178159Sattilio
119177957Sattilio#define	LK_CAN_SHARE(x)							\
120177957Sattilio	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
121194317Sattilio	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
122177982Sattilio	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
123178159Sattilio#define	LK_TRYOP(x)							\
124178159Sattilio	((x) & LK_NOWAIT)
125177957Sattilio
126178159Sattilio#define	LK_CAN_WITNESS(x)						\
127178159Sattilio	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
128178159Sattilio#define	LK_TRYWIT(x)							\
129178159Sattilio	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
130178159Sattilio
131194317Sattilio#define	LK_CAN_ADAPT(lk, f)						\
132194317Sattilio	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
133194317Sattilio	((f) & LK_SLEEPFAIL) == 0)
134194317Sattilio
135177957Sattilio#define	lockmgr_disowned(lk)						\
136177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137177957Sattilio
138177957Sattilio#define	lockmgr_xlocked(lk)						\
139177957Sattilio	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140177957Sattilio
141227588Spjdstatic void	assert_lockmgr(const struct lock_object *lock, int how);
142177957Sattilio#ifdef DDB
143227588Spjdstatic void	db_show_lockmgr(const struct lock_object *lock);
144177957Sattilio#endif
145255745Sdavidestatic void	lock_lockmgr(struct lock_object *lock, uintptr_t how);
146192853Ssson#ifdef KDTRACE_HOOKS
147227588Spjdstatic int	owner_lockmgr(const struct lock_object *lock,
148227588Spjd		    struct thread **owner);
149192853Ssson#endif
150255745Sdavidestatic uintptr_t unlock_lockmgr(struct lock_object *lock);
151177957Sattilio
152164246Skmacystruct lock_class lock_class_lockmgr = {
153167366Sjhb	.lc_name = "lockmgr",
154177957Sattilio	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
155173733Sattilio	.lc_assert = assert_lockmgr,
156164246Skmacy#ifdef DDB
157167368Sjhb	.lc_ddb_show = db_show_lockmgr,
158164246Skmacy#endif
159167368Sjhb	.lc_lock = lock_lockmgr,
160192853Ssson	.lc_unlock = unlock_lockmgr,
161192853Ssson#ifdef KDTRACE_HOOKS
162192853Ssson	.lc_owner = owner_lockmgr,
163192853Ssson#endif
164164246Skmacy};
165164246Skmacy
166194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
167194317Sattiliostatic u_int alk_retries = 10;
168194317Sattiliostatic u_int alk_loops = 10000;
169227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
170227309Sed    "lockmgr debugging");
171194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
172194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
173194317Sattilio#endif
174194317Sattilio
175177957Sattiliostatic __inline struct thread *
176227588Spjdlockmgr_xholder(const struct lock *lk)
177177957Sattilio{
178177957Sattilio	uintptr_t x;
179176249Sattilio
180177957Sattilio	x = lk->lk_lock;
181177957Sattilio	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
182177957Sattilio}
183177957Sattilio
18424269Speter/*
185177957Sattilio * It assumes sleepq_lock held and returns with this one unheld.
186177957Sattilio * It also assumes the generic interlock is sane and previously checked.
187177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the
188177957Sattilio * sleep.
18924269Speter */
190177957Sattiliostatic __inline int
191177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
192177957Sattilio    const char *wmesg, int pri, int timo, int queue)
193177957Sattilio{
194178159Sattilio	GIANT_DECLARE;
195177957Sattilio	struct lock_class *class;
196177957Sattilio	int catch, error;
19724269Speter
198177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
199179306Sattilio	catch = pri & PCATCH;
200177957Sattilio	pri &= PRIMASK;
201177957Sattilio	error = 0;
202177957Sattilio
203177957Sattilio	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
204177957Sattilio	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
205177957Sattilio
206177957Sattilio	if (flags & LK_INTERLOCK)
207177957Sattilio		class->lc_unlock(ilk);
208200447Sattilio	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
209200447Sattilio		lk->lk_exslpfail++;
210178159Sattilio	GIANT_SAVE();
211177957Sattilio	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
212177957Sattilio	    SLEEPQ_INTERRUPTIBLE : 0), queue);
213177957Sattilio	if ((flags & LK_TIMELOCK) && timo)
214177957Sattilio		sleepq_set_timeout(&lk->lock_object, timo);
215177957Sattilio
216177957Sattilio	/*
217177957Sattilio	 * Decisional switch for real sleeping.
218177957Sattilio	 */
219177957Sattilio	if ((flags & LK_TIMELOCK) && timo && catch)
220177957Sattilio		error = sleepq_timedwait_sig(&lk->lock_object, pri);
221177957Sattilio	else if ((flags & LK_TIMELOCK) && timo)
222177957Sattilio		error = sleepq_timedwait(&lk->lock_object, pri);
223177957Sattilio	else if (catch)
224177957Sattilio		error = sleepq_wait_sig(&lk->lock_object, pri);
225177957Sattilio	else
226177957Sattilio		sleepq_wait(&lk->lock_object, pri);
227178159Sattilio	GIANT_RESTORE();
228177957Sattilio	if ((flags & LK_SLEEPFAIL) && error == 0)
229177957Sattilio		error = ENOLCK;
230177957Sattilio
231177957Sattilio	return (error);
232177957Sattilio}
233177957Sattilio
234181334Sjhbstatic __inline int
235177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line)
236177957Sattilio{
237177957Sattilio	uintptr_t v, x;
238200447Sattilio	u_int realexslp;
239181334Sjhb	int queue, wakeup_swapper;
240177957Sattilio
241178159Sattilio	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
242177957Sattilio	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
243177957Sattilio
244181334Sjhb	wakeup_swapper = 0;
245177957Sattilio	for (;;) {
246177957Sattilio		x = lk->lk_lock;
247177957Sattilio
248177957Sattilio		/*
249177957Sattilio		 * If there is more than one shared lock held, just drop one
250177957Sattilio		 * and return.
251177957Sattilio		 */
252177957Sattilio		if (LK_SHARERS(x) > 1) {
253197735Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
254177957Sattilio			    x - LK_ONE_SHARER))
255177957Sattilio				break;
256177957Sattilio			continue;
257177957Sattilio		}
258177957Sattilio
259177957Sattilio		/*
260177957Sattilio		 * If there are not waiters on the exclusive queue, drop the
261177957Sattilio		 * lock quickly.
262177957Sattilio		 */
263177957Sattilio		if ((x & LK_ALL_WAITERS) == 0) {
264194317Sattilio			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
265194317Sattilio			    LK_SHARERS_LOCK(1));
266197735Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
267177957Sattilio				break;
268177957Sattilio			continue;
269177957Sattilio		}
270177957Sattilio
271177957Sattilio		/*
272177957Sattilio		 * We should have a sharer with waiters, so enter the hard
273177957Sattilio		 * path in order to handle wakeups correctly.
274177957Sattilio		 */
275177957Sattilio		sleepq_lock(&lk->lock_object);
276194317Sattilio		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
277177957Sattilio		v = LK_UNLOCKED;
278177957Sattilio
279177957Sattilio		/*
280177957Sattilio		 * If the lock has exclusive waiters, give them preference in
281177957Sattilio		 * order to avoid deadlock with shared runners up.
282200447Sattilio		 * If interruptible sleeps left the exclusive queue empty
283200447Sattilio		 * avoid a starvation for the threads sleeping on the shared
284200447Sattilio		 * queue by giving them precedence and cleaning up the
285200447Sattilio		 * exclusive waiters bit anyway.
286201709Sattilio		 * Please note that lk_exslpfail count may be lying about
287201709Sattilio		 * the real number of waiters with the LK_SLEEPFAIL flag on
288201709Sattilio		 * because they may be used in conjuction with interruptible
289201710Sattilio		 * sleeps so lk_exslpfail might be considered an 'upper limit'
290201710Sattilio		 * bound, including the edge cases.
291177957Sattilio		 */
292200447Sattilio		realexslp = sleepq_sleepcnt(&lk->lock_object,
293200447Sattilio		    SQ_EXCLUSIVE_QUEUE);
294200447Sattilio		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
295200447Sattilio			if (lk->lk_exslpfail < realexslp) {
296200447Sattilio				lk->lk_exslpfail = 0;
297200447Sattilio				queue = SQ_EXCLUSIVE_QUEUE;
298200447Sattilio				v |= (x & LK_SHARED_WAITERS);
299200447Sattilio			} else {
300200447Sattilio				lk->lk_exslpfail = 0;
301200447Sattilio				LOCK_LOG2(lk,
302200447Sattilio				    "%s: %p has only LK_SLEEPFAIL sleepers",
303200447Sattilio				    __func__, lk);
304200447Sattilio				LOCK_LOG2(lk,
305200447Sattilio			    "%s: %p waking up threads on the exclusive queue",
306200447Sattilio				    __func__, lk);
307200447Sattilio				wakeup_swapper =
308200447Sattilio				    sleepq_broadcast(&lk->lock_object,
309200447Sattilio				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
310200447Sattilio				queue = SQ_SHARED_QUEUE;
311200447Sattilio			}
312200447Sattilio
313177957Sattilio		} else {
314201703Sattilio
315201703Sattilio			/*
316201703Sattilio			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
317201703Sattilio			 * and using interruptible sleeps/timeout may have
318201703Sattilio			 * left spourious lk_exslpfail counts on, so clean
319201703Sattilio			 * it up anyway.
320201703Sattilio			 */
321201703Sattilio			lk->lk_exslpfail = 0;
322177957Sattilio			queue = SQ_SHARED_QUEUE;
323177957Sattilio		}
324177957Sattilio
325197735Sattilio		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
326177957Sattilio		    v)) {
327177957Sattilio			sleepq_release(&lk->lock_object);
328177957Sattilio			continue;
329177957Sattilio		}
330177957Sattilio		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
331177957Sattilio		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
332177957Sattilio		    "exclusive");
333200447Sattilio		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
334181334Sjhb		    0, queue);
335177957Sattilio		sleepq_release(&lk->lock_object);
336177957Sattilio		break;
337177957Sattilio	}
338177957Sattilio
339177957Sattilio	lock_profile_release_lock(&lk->lock_object);
340252212Sjhb	TD_LOCKS_DEC(curthread);
341252212Sjhb	TD_SLOCKS_DEC(curthread);
342181334Sjhb	return (wakeup_swapper);
343177957Sattilio}
344177957Sattilio
345177957Sattiliostatic void
346227588Spjdassert_lockmgr(const struct lock_object *lock, int what)
347173733Sattilio{
348173733Sattilio
349173733Sattilio	panic("lockmgr locks do not support assertions");
350173733Sattilio}
351173733Sattilio
352177957Sattiliostatic void
353255745Sdavidelock_lockmgr(struct lock_object *lock, uintptr_t how)
354167368Sjhb{
355167368Sjhb
356167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
357167368Sjhb}
358167368Sjhb
359255745Sdavidestatic uintptr_t
360167368Sjhbunlock_lockmgr(struct lock_object *lock)
361167368Sjhb{
362167368Sjhb
363167368Sjhb	panic("lockmgr locks do not support sleep interlocking");
364167368Sjhb}
365167368Sjhb
366192853Ssson#ifdef KDTRACE_HOOKS
367192853Sssonstatic int
368227588Spjdowner_lockmgr(const struct lock_object *lock, struct thread **owner)
369192853Ssson{
370192853Ssson
371192853Ssson	panic("lockmgr locks do not support owner inquiring");
372192853Ssson}
373192853Ssson#endif
374192853Ssson
375177957Sattiliovoid
376177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
377177957Sattilio{
378177957Sattilio	int iflags;
37929653Sdyson
380177957Sattilio	MPASS((flags & ~LK_INIT_MASK) == 0);
381196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
382196334Sattilio            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
383196334Sattilio            &lk->lk_lock));
38424269Speter
385193307Sattilio	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
386193307Sattilio	if (flags & LK_CANRECURSE)
387193307Sattilio		iflags |= LO_RECURSABLE;
388177957Sattilio	if ((flags & LK_NODUP) == 0)
389177957Sattilio		iflags |= LO_DUPOK;
390177957Sattilio	if (flags & LK_NOPROFILE)
391177957Sattilio		iflags |= LO_NOPROFILE;
392177957Sattilio	if ((flags & LK_NOWITNESS) == 0)
393177957Sattilio		iflags |= LO_WITNESS;
394177957Sattilio	if (flags & LK_QUIET)
395177957Sattilio		iflags |= LO_QUIET;
396250411Smarcel	if (flags & LK_IS_VNODE)
397250411Smarcel		iflags |= LO_IS_VNODE;
398194317Sattilio	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
399177957Sattilio
400252212Sjhb	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
401177957Sattilio	lk->lk_lock = LK_UNLOCKED;
402177957Sattilio	lk->lk_recurse = 0;
403200447Sattilio	lk->lk_exslpfail = 0;
404177957Sattilio	lk->lk_timo = timo;
405177957Sattilio	lk->lk_pri = pri;
406177957Sattilio	STACK_ZERO(lk);
40728345Sdyson}
40824269Speter
409211531Sjhb/*
410211531Sjhb * XXX: Gross hacks to manipulate external lock flags after
411211531Sjhb * initialization.  Used for certain vnode and buf locks.
412211531Sjhb */
413177957Sattiliovoid
414211531Sjhblockallowshare(struct lock *lk)
415211531Sjhb{
416211531Sjhb
417211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
418211531Sjhb	lk->lock_object.lo_flags &= ~LK_NOSHARE;
419211531Sjhb}
420211531Sjhb
421211531Sjhbvoid
422271161Skiblockdisableshare(struct lock *lk)
423271161Skib{
424271161Skib
425271161Skib	lockmgr_assert(lk, KA_XLOCKED);
426271161Skib	lk->lock_object.lo_flags |= LK_NOSHARE;
427271161Skib}
428271161Skib
429271161Skibvoid
430211531Sjhblockallowrecurse(struct lock *lk)
431211531Sjhb{
432211531Sjhb
433211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
434211531Sjhb	lk->lock_object.lo_flags |= LO_RECURSABLE;
435211531Sjhb}
436211531Sjhb
437211531Sjhbvoid
438211531Sjhblockdisablerecurse(struct lock *lk)
439211531Sjhb{
440211531Sjhb
441211531Sjhb	lockmgr_assert(lk, KA_XLOCKED);
442211531Sjhb	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
443211531Sjhb}
444211531Sjhb
445211531Sjhbvoid
446177957Sattiliolockdestroy(struct lock *lk)
447177957Sattilio{
44842453Seivind
449177957Sattilio	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
450177957Sattilio	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
451200447Sattilio	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
452177957Sattilio	lock_destroy(&lk->lock_object);
45328345Sdyson}
45428345Sdyson
455177957Sattilioint
456177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
457177957Sattilio    const char *wmesg, int pri, int timo, const char *file, int line)
458140711Sjeff{
459178159Sattilio	GIANT_DECLARE;
460177957Sattilio	struct lock_class *class;
461176320Sattilio	const char *iwmesg;
462177957Sattilio	uintptr_t tid, v, x;
463200447Sattilio	u_int op, realexslp;
464189846Sjeff	int error, ipri, itimo, queue, wakeup_swapper;
465189846Sjeff#ifdef LOCK_PROFILING
466189846Sjeff	uint64_t waittime = 0;
467189846Sjeff	int contested = 0;
468189846Sjeff#endif
469194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
470194317Sattilio	volatile struct thread *owner;
471194317Sattilio	u_int i, spintries = 0;
472194317Sattilio#endif
473176320Sattilio
474177957Sattilio	error = 0;
475177957Sattilio	tid = (uintptr_t)curthread;
476177957Sattilio	op = (flags & LK_TYPE_MASK);
477177957Sattilio	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
478177957Sattilio	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
479177957Sattilio	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
480176320Sattilio
481177957Sattilio	MPASS((flags & ~LK_TOTAL_MASK) == 0);
482178150Sattilio	KASSERT((op & (op - 1)) == 0,
483178150Sattilio	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
484177957Sattilio	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
485177957Sattilio	    (op != LK_DOWNGRADE && op != LK_RELEASE),
486177957Sattilio	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
487177957Sattilio	    __func__, file, line));
488177957Sattilio	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
489177957Sattilio	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
490177957Sattilio	    __func__, file, line));
491244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
492240424Sattilio	    ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
493240424Sattilio	    lk->lock_object.lo_name, file, line));
49466615Sjasone
495177957Sattilio	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
496177957Sattilio	if (panicstr != NULL) {
497177957Sattilio		if (flags & LK_INTERLOCK)
498177957Sattilio			class->lc_unlock(ilk);
499177957Sattilio		return (0);
50028345Sdyson	}
50128345Sdyson
502224581Skib	if (lk->lock_object.lo_flags & LK_NOSHARE) {
503224581Skib		switch (op) {
504224581Skib		case LK_SHARED:
505224581Skib			op = LK_EXCLUSIVE;
506224581Skib			break;
507224581Skib		case LK_UPGRADE:
508255940Skib		case LK_TRYUPGRADE:
509224581Skib		case LK_DOWNGRADE:
510224581Skib			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
511224581Skib			    file, line);
512245113Smjg			if (flags & LK_INTERLOCK)
513245113Smjg				class->lc_unlock(ilk);
514224581Skib			return (0);
515224581Skib		}
516224581Skib	}
517164159Skmacy
518181334Sjhb	wakeup_swapper = 0;
519177957Sattilio	switch (op) {
520177957Sattilio	case LK_SHARED:
521178159Sattilio		if (LK_CAN_WITNESS(flags))
522178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
523251326Sjhb			    file, line, flags & LK_INTERLOCK ? ilk : NULL);
524177957Sattilio		for (;;) {
525177957Sattilio			x = lk->lk_lock;
526174948Sattilio
527177957Sattilio			/*
528177957Sattilio			 * If no other thread has an exclusive lock, or
529177957Sattilio			 * no exclusive waiter is present, bump the count of
530177957Sattilio			 * sharers.  Since we have to preserve the state of
531177957Sattilio			 * waiters, if we fail to acquire the shared lock
532177957Sattilio			 * loop back and retry.
533177957Sattilio			 */
534177957Sattilio			if (LK_CAN_SHARE(x)) {
535177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
536177957Sattilio				    x + LK_ONE_SHARER))
537177957Sattilio					break;
538177957Sattilio				continue;
539177957Sattilio			}
540233628Sfabient#ifdef HWPMC_HOOKS
541233628Sfabient			PMC_SOFT_CALL( , , lock, failed);
542233628Sfabient#endif
543177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
544177957Sattilio			    &contested, &waittime);
54528345Sdyson
546177957Sattilio			/*
547180798Skib			 * If the lock is already held by curthread in
548177957Sattilio			 * exclusive way avoid a deadlock.
549177957Sattilio			 */
550177957Sattilio			if (LK_HOLDER(x) == tid) {
551177957Sattilio				LOCK_LOG2(lk,
552180798Skib				    "%s: %p already held in exclusive mode",
553177957Sattilio				    __func__, lk);
554177957Sattilio				error = EDEADLK;
555177957Sattilio				break;
556177957Sattilio			}
557140711Sjeff
558177957Sattilio			/*
559177957Sattilio			 * If the lock is expected to not sleep just give up
560177957Sattilio			 * and return.
561177957Sattilio			 */
562177957Sattilio			if (LK_TRYOP(flags)) {
563177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
564177957Sattilio				    __func__, lk);
565177957Sattilio				error = EBUSY;
566177957Sattilio				break;
567177957Sattilio			}
56828345Sdyson
569194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
570177957Sattilio			/*
571194317Sattilio			 * If the owner is running on another CPU, spin until
572194317Sattilio			 * the owner stops running or the state of the lock
573196772Sattilio			 * changes.  We need a double-state handle here
574196772Sattilio			 * because for a failed acquisition the lock can be
575196772Sattilio			 * either held in exclusive mode or shared mode
576196772Sattilio			 * (for the writer starvation avoidance technique).
577194317Sattilio			 */
578194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
579194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
580194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
581194317Sattilio				if (LOCK_LOG_TEST(&lk->lock_object, 0))
582194317Sattilio					CTR3(KTR_LOCK,
583194317Sattilio					    "%s: spinning on %p held by %p",
584194317Sattilio					    __func__, lk, owner);
585194317Sattilio
586194317Sattilio				/*
587194317Sattilio				 * If we are holding also an interlock drop it
588194317Sattilio				 * in order to avoid a deadlock if the lockmgr
589194317Sattilio				 * owner is adaptively spinning on the
590194317Sattilio				 * interlock itself.
591194317Sattilio				 */
592194317Sattilio				if (flags & LK_INTERLOCK) {
593194317Sattilio					class->lc_unlock(ilk);
594194317Sattilio					flags &= ~LK_INTERLOCK;
595194317Sattilio				}
596194317Sattilio				GIANT_SAVE();
597194317Sattilio				while (LK_HOLDER(lk->lk_lock) ==
598194317Sattilio				    (uintptr_t)owner && TD_IS_RUNNING(owner))
599194317Sattilio					cpu_spinwait();
600196772Sattilio				GIANT_RESTORE();
601196772Sattilio				continue;
602194317Sattilio			} else if (LK_CAN_ADAPT(lk, flags) &&
603196772Sattilio			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
604194317Sattilio			    spintries < alk_retries) {
605194317Sattilio				if (flags & LK_INTERLOCK) {
606194317Sattilio					class->lc_unlock(ilk);
607194317Sattilio					flags &= ~LK_INTERLOCK;
608194317Sattilio				}
609194317Sattilio				GIANT_SAVE();
610194317Sattilio				spintries++;
611194317Sattilio				for (i = 0; i < alk_loops; i++) {
612194317Sattilio					if (LOCK_LOG_TEST(&lk->lock_object, 0))
613194317Sattilio						CTR4(KTR_LOCK,
614194317Sattilio				    "%s: shared spinning on %p with %u and %u",
615194317Sattilio						    __func__, lk, spintries, i);
616194317Sattilio					x = lk->lk_lock;
617194317Sattilio					if ((x & LK_SHARE) == 0 ||
618194317Sattilio					    LK_CAN_SHARE(x) != 0)
619194317Sattilio						break;
620194317Sattilio					cpu_spinwait();
621194317Sattilio				}
622196772Sattilio				GIANT_RESTORE();
623194317Sattilio				if (i != alk_loops)
624194317Sattilio					continue;
625194317Sattilio			}
626194317Sattilio#endif
627194317Sattilio
628194317Sattilio			/*
629177957Sattilio			 * Acquire the sleepqueue chain lock because we
630177957Sattilio			 * probabilly will need to manipulate waiters flags.
631177957Sattilio			 */
632177957Sattilio			sleepq_lock(&lk->lock_object);
633177957Sattilio			x = lk->lk_lock;
634111463Sjeff
635177957Sattilio			/*
636177957Sattilio			 * if the lock can be acquired in shared mode, try
637177957Sattilio			 * again.
638177957Sattilio			 */
639177957Sattilio			if (LK_CAN_SHARE(x)) {
640177957Sattilio				sleepq_release(&lk->lock_object);
641177957Sattilio				continue;
642177957Sattilio			}
64324269Speter
644194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
645177957Sattilio			/*
646194317Sattilio			 * The current lock owner might have started executing
647194317Sattilio			 * on another CPU (or the lock could have changed
648194317Sattilio			 * owner) while we were waiting on the turnstile
649194317Sattilio			 * chain lock.  If so, drop the turnstile lock and try
650194317Sattilio			 * again.
651194317Sattilio			 */
652194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
653194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
654194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
655194317Sattilio				if (TD_IS_RUNNING(owner)) {
656194317Sattilio					sleepq_release(&lk->lock_object);
657194317Sattilio					continue;
658194317Sattilio				}
659194317Sattilio			}
660194317Sattilio#endif
661194317Sattilio
662194317Sattilio			/*
663177957Sattilio			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
664177957Sattilio			 * loop back and retry.
665177957Sattilio			 */
666177957Sattilio			if ((x & LK_SHARED_WAITERS) == 0) {
667177957Sattilio				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
668177957Sattilio				    x | LK_SHARED_WAITERS)) {
669177957Sattilio					sleepq_release(&lk->lock_object);
670177957Sattilio					continue;
671177957Sattilio				}
672177957Sattilio				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
673177957Sattilio				    __func__, lk);
674177957Sattilio			}
67524269Speter
676177957Sattilio			/*
677177957Sattilio			 * As far as we have been unable to acquire the
678177957Sattilio			 * shared lock and the shared waiters flag is set,
679177957Sattilio			 * we will sleep.
680177957Sattilio			 */
681177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
682177957Sattilio			    SQ_SHARED_QUEUE);
683177957Sattilio			flags &= ~LK_INTERLOCK;
684177957Sattilio			if (error) {
685177957Sattilio				LOCK_LOG3(lk,
686177957Sattilio				    "%s: interrupted sleep for %p with %d",
687177957Sattilio				    __func__, lk, error);
688177957Sattilio				break;
689177957Sattilio			}
690177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
691177957Sattilio			    __func__, lk);
692177957Sattilio		}
693177957Sattilio		if (error == 0) {
694177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
695177957Sattilio			    contested, waittime, file, line);
696177957Sattilio			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
697176014Sattilio			    line);
698178159Sattilio			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
699178159Sattilio			    line);
700177957Sattilio			TD_LOCKS_INC(curthread);
701177957Sattilio			TD_SLOCKS_INC(curthread);
702177957Sattilio			STACK_SAVE(lk);
703177957Sattilio		}
704177957Sattilio		break;
705177957Sattilio	case LK_UPGRADE:
706255940Skib	case LK_TRYUPGRADE:
707177957Sattilio		_lockmgr_assert(lk, KA_SLOCKED, file, line);
708194317Sattilio		v = lk->lk_lock;
709194317Sattilio		x = v & LK_ALL_WAITERS;
710194317Sattilio		v &= LK_EXCLUSIVE_SPINNERS;
711177957Sattilio
71244681Sjulian		/*
713177957Sattilio		 * Try to switch from one shared lock to an exclusive one.
714177957Sattilio		 * We need to preserve waiters flags during the operation.
71544681Sjulian		 */
716194317Sattilio		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
717177957Sattilio		    tid | x)) {
718177957Sattilio			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
719177957Sattilio			    line);
720178159Sattilio			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
721178159Sattilio			    LK_TRYWIT(flags), file, line);
722177957Sattilio			TD_SLOCKS_DEC(curthread);
72324269Speter			break;
72424269Speter		}
725177957Sattilio
72624269Speter		/*
727255940Skib		 * In LK_TRYUPGRADE mode, do not drop the lock,
728255940Skib		 * returning EBUSY instead.
729255940Skib		 */
730255940Skib		if (op == LK_TRYUPGRADE) {
731255940Skib			LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
732255940Skib			    __func__, lk);
733255940Skib			error = EBUSY;
734255940Skib			break;
735255940Skib		}
736255940Skib
737255940Skib		/*
738177957Sattilio		 * We have been unable to succeed in upgrading, so just
739177957Sattilio		 * give up the shared lock.
74024269Speter		 */
741182010Sjhb		wakeup_swapper |= wakeupshlk(lk, file, line);
74224269Speter
743177957Sattilio		/* FALLTHROUGH */
744177957Sattilio	case LK_EXCLUSIVE:
745178159Sattilio		if (LK_CAN_WITNESS(flags))
746178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
747251326Sjhb			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
748251326Sjhb			    ilk : NULL);
74924269Speter
75024269Speter		/*
751180798Skib		 * If curthread already holds the lock and this one is
752177957Sattilio		 * allowed to recurse, simply recurse on it.
75324269Speter		 */
754177957Sattilio		if (lockmgr_xlocked(lk)) {
755177957Sattilio			if ((flags & LK_CANRECURSE) == 0 &&
756193307Sattilio			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
757177957Sattilio
758177957Sattilio				/*
759177957Sattilio				 * If the lock is expected to not panic just
760177957Sattilio				 * give up and return.
761177957Sattilio				 */
762177957Sattilio				if (LK_TRYOP(flags)) {
763177957Sattilio					LOCK_LOG2(lk,
764177957Sattilio					    "%s: %p fails the try operation",
765177957Sattilio					    __func__, lk);
766177957Sattilio					error = EBUSY;
767177957Sattilio					break;
768177957Sattilio				}
769177957Sattilio				if (flags & LK_INTERLOCK)
770177957Sattilio					class->lc_unlock(ilk);
771177957Sattilio		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
772177957Sattilio				    __func__, iwmesg, file, line);
773177957Sattilio			}
774177957Sattilio			lk->lk_recurse++;
775177957Sattilio			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
776177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
777177957Sattilio			    lk->lk_recurse, file, line);
778178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
779178159Sattilio			    LK_TRYWIT(flags), file, line);
780177957Sattilio			TD_LOCKS_INC(curthread);
78124269Speter			break;
78224269Speter		}
783177957Sattilio
784177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
785177957Sattilio		    tid)) {
786233628Sfabient#ifdef HWPMC_HOOKS
787233628Sfabient			PMC_SOFT_CALL( , , lock, failed);
788233628Sfabient#endif
789177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
790177957Sattilio			    &contested, &waittime);
791177957Sattilio
79224269Speter			/*
793177957Sattilio			 * If the lock is expected to not sleep just give up
794177957Sattilio			 * and return.
79524269Speter			 */
796177957Sattilio			if (LK_TRYOP(flags)) {
797177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
798177957Sattilio				    __func__, lk);
799177957Sattilio				error = EBUSY;
800177957Sattilio				break;
801177957Sattilio			}
80234194Sdyson
803194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
804177957Sattilio			/*
805194317Sattilio			 * If the owner is running on another CPU, spin until
806194317Sattilio			 * the owner stops running or the state of the lock
807194317Sattilio			 * changes.
808194317Sattilio			 */
809194317Sattilio			x = lk->lk_lock;
810194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
811194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
812194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
813194317Sattilio				if (LOCK_LOG_TEST(&lk->lock_object, 0))
814194317Sattilio					CTR3(KTR_LOCK,
815194317Sattilio					    "%s: spinning on %p held by %p",
816194317Sattilio					    __func__, lk, owner);
817194317Sattilio
818194317Sattilio				/*
819194317Sattilio				 * If we are holding also an interlock drop it
820194317Sattilio				 * in order to avoid a deadlock if the lockmgr
821194317Sattilio				 * owner is adaptively spinning on the
822194317Sattilio				 * interlock itself.
823194317Sattilio				 */
824194317Sattilio				if (flags & LK_INTERLOCK) {
825194317Sattilio					class->lc_unlock(ilk);
826194317Sattilio					flags &= ~LK_INTERLOCK;
827194317Sattilio				}
828194317Sattilio				GIANT_SAVE();
829194317Sattilio				while (LK_HOLDER(lk->lk_lock) ==
830194317Sattilio				    (uintptr_t)owner && TD_IS_RUNNING(owner))
831194317Sattilio					cpu_spinwait();
832196772Sattilio				GIANT_RESTORE();
833196772Sattilio				continue;
834194317Sattilio			} else if (LK_CAN_ADAPT(lk, flags) &&
835194317Sattilio			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
836194317Sattilio			    spintries < alk_retries) {
837194317Sattilio				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
838194317Sattilio				    !atomic_cmpset_ptr(&lk->lk_lock, x,
839194317Sattilio				    x | LK_EXCLUSIVE_SPINNERS))
840194317Sattilio					continue;
841194317Sattilio				if (flags & LK_INTERLOCK) {
842194317Sattilio					class->lc_unlock(ilk);
843194317Sattilio					flags &= ~LK_INTERLOCK;
844194317Sattilio				}
845194317Sattilio				GIANT_SAVE();
846194317Sattilio				spintries++;
847194317Sattilio				for (i = 0; i < alk_loops; i++) {
848194317Sattilio					if (LOCK_LOG_TEST(&lk->lock_object, 0))
849194317Sattilio						CTR4(KTR_LOCK,
850194317Sattilio				    "%s: shared spinning on %p with %u and %u",
851194317Sattilio						    __func__, lk, spintries, i);
852194317Sattilio					if ((lk->lk_lock &
853194317Sattilio					    LK_EXCLUSIVE_SPINNERS) == 0)
854194317Sattilio						break;
855194317Sattilio					cpu_spinwait();
856194317Sattilio				}
857196772Sattilio				GIANT_RESTORE();
858194317Sattilio				if (i != alk_loops)
859194317Sattilio					continue;
860194317Sattilio			}
861194317Sattilio#endif
862194317Sattilio
863194317Sattilio			/*
864177957Sattilio			 * Acquire the sleepqueue chain lock because we
865177957Sattilio			 * probabilly will need to manipulate waiters flags.
866177957Sattilio			 */
867177957Sattilio			sleepq_lock(&lk->lock_object);
868177957Sattilio			x = lk->lk_lock;
869177957Sattilio
870177957Sattilio			/*
871177957Sattilio			 * if the lock has been released while we spun on
872177957Sattilio			 * the sleepqueue chain lock just try again.
873177957Sattilio			 */
874177957Sattilio			if (x == LK_UNLOCKED) {
875177957Sattilio				sleepq_release(&lk->lock_object);
876177957Sattilio				continue;
877134365Skan			}
87824269Speter
879194317Sattilio#ifdef ADAPTIVE_LOCKMGRS
88024269Speter			/*
881194317Sattilio			 * The current lock owner might have started executing
882194317Sattilio			 * on another CPU (or the lock could have changed
883194317Sattilio			 * owner) while we were waiting on the turnstile
884194317Sattilio			 * chain lock.  If so, drop the turnstile lock and try
885194317Sattilio			 * again.
886194317Sattilio			 */
887194317Sattilio			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
888194317Sattilio			    LK_HOLDER(x) != LK_KERNPROC) {
889194317Sattilio				owner = (struct thread *)LK_HOLDER(x);
890194317Sattilio				if (TD_IS_RUNNING(owner)) {
891194317Sattilio					sleepq_release(&lk->lock_object);
892194317Sattilio					continue;
893194317Sattilio				}
894194317Sattilio			}
895194317Sattilio#endif
896194317Sattilio
897194317Sattilio			/*
898177957Sattilio			 * The lock can be in the state where there is a
899177957Sattilio			 * pending queue of waiters, but still no owner.
900177957Sattilio			 * This happens when the lock is contested and an
901177957Sattilio			 * owner is going to claim the lock.
902177957Sattilio			 * If curthread is the one successfully acquiring it
903177957Sattilio			 * claim lock ownership and return, preserving waiters
904177957Sattilio			 * flags.
90524269Speter			 */
906194317Sattilio			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
907194317Sattilio			if ((x & ~v) == LK_UNLOCKED) {
908194317Sattilio				v &= ~LK_EXCLUSIVE_SPINNERS;
909177957Sattilio				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
910177957Sattilio				    tid | v)) {
911177957Sattilio					sleepq_release(&lk->lock_object);
912177957Sattilio					LOCK_LOG2(lk,
913177957Sattilio					    "%s: %p claimed by a new writer",
914177957Sattilio					    __func__, lk);
915177957Sattilio					break;
916177957Sattilio				}
917177957Sattilio				sleepq_release(&lk->lock_object);
918177957Sattilio				continue;
919177957Sattilio			}
920177957Sattilio
921177957Sattilio			/*
922177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
923177957Sattilio			 * fail, loop back and retry.
924177957Sattilio			 */
925177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
926177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
927177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
928177957Sattilio					sleepq_release(&lk->lock_object);
929177957Sattilio					continue;
930177957Sattilio				}
931177957Sattilio				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
932177957Sattilio				    __func__, lk);
933177957Sattilio			}
934177957Sattilio
935177957Sattilio			/*
936177957Sattilio			 * As far as we have been unable to acquire the
937177957Sattilio			 * exclusive lock and the exclusive waiters flag
938177957Sattilio			 * is set, we will sleep.
939177957Sattilio			 */
940177957Sattilio			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
941177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
942177957Sattilio			flags &= ~LK_INTERLOCK;
943177957Sattilio			if (error) {
944177957Sattilio				LOCK_LOG3(lk,
945177957Sattilio				    "%s: interrupted sleep for %p with %d",
946177957Sattilio				    __func__, lk, error);
94748301Smckusick				break;
94848301Smckusick			}
949177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
950177957Sattilio			    __func__, lk);
95124269Speter		}
952177957Sattilio		if (error == 0) {
953177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
954177957Sattilio			    contested, waittime, file, line);
955177957Sattilio			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
956177957Sattilio			    lk->lk_recurse, file, line);
957178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
958178159Sattilio			    LK_TRYWIT(flags), file, line);
959177957Sattilio			TD_LOCKS_INC(curthread);
960177957Sattilio			STACK_SAVE(lk);
961177957Sattilio		}
962177957Sattilio		break;
963177957Sattilio	case LK_DOWNGRADE:
964243900Sattilio		_lockmgr_assert(lk, KA_XLOCKED, file, line);
965178159Sattilio		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
966178159Sattilio		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
967243900Sattilio
968243900Sattilio		/*
969243900Sattilio		 * Panic if the lock is recursed.
970243900Sattilio		 */
971243900Sattilio		if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
972243900Sattilio			if (flags & LK_INTERLOCK)
973243900Sattilio				class->lc_unlock(ilk);
974243900Sattilio			panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
975243900Sattilio			    __func__, iwmesg, file, line);
976243900Sattilio		}
977178159Sattilio		TD_SLOCKS_INC(curthread);
978177957Sattilio
97924269Speter		/*
980177957Sattilio		 * In order to preserve waiters flags, just spin.
98124269Speter		 */
982177957Sattilio		for (;;) {
983194317Sattilio			x = lk->lk_lock;
984194317Sattilio			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
985194317Sattilio			x &= LK_ALL_WAITERS;
986177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
987178159Sattilio			    LK_SHARERS_LOCK(1) | x))
988177957Sattilio				break;
989177957Sattilio			cpu_spinwait();
99024269Speter		}
99124269Speter		break;
992177957Sattilio	case LK_RELEASE:
993177957Sattilio		_lockmgr_assert(lk, KA_LOCKED, file, line);
994177957Sattilio		x = lk->lk_lock;
99524269Speter
996177957Sattilio		if ((x & LK_SHARE) == 0) {
997177957Sattilio
998177957Sattilio			/*
999177957Sattilio			 * As first option, treact the lock as if it has not
1000177957Sattilio			 * any waiter.
1001177957Sattilio			 * Fix-up the tid var if the lock has been disowned.
1002177957Sattilio			 */
1003177957Sattilio			if (LK_HOLDER(x) == LK_KERNPROC)
1004177957Sattilio				tid = LK_KERNPROC;
1005178159Sattilio			else {
1006178159Sattilio				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
1007178159Sattilio				    file, line);
1008177957Sattilio				TD_LOCKS_DEC(curthread);
1009178159Sattilio			}
1010177957Sattilio			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
1011177957Sattilio			    lk->lk_recurse, file, line);
1012177957Sattilio
1013177957Sattilio			/*
1014177957Sattilio			 * The lock is held in exclusive mode.
1015177957Sattilio			 * If the lock is recursed also, then unrecurse it.
1016177957Sattilio			 */
1017177957Sattilio			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1018177957Sattilio				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
1019177957Sattilio				    lk);
1020177957Sattilio				lk->lk_recurse--;
1021177957Sattilio				break;
1022176014Sattilio			}
1023189788Sjeff			if (tid != LK_KERNPROC)
1024189788Sjeff				lock_profile_release_lock(&lk->lock_object);
1025177957Sattilio
1026177957Sattilio			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
1027177957Sattilio			    LK_UNLOCKED))
1028177957Sattilio				break;
1029177957Sattilio
1030177957Sattilio			sleepq_lock(&lk->lock_object);
1031194317Sattilio			x = lk->lk_lock;
1032177957Sattilio			v = LK_UNLOCKED;
1033177957Sattilio
1034177957Sattilio			/*
1035177957Sattilio		 	 * If the lock has exclusive waiters, give them
1036177957Sattilio			 * preference in order to avoid deadlock with
1037177957Sattilio			 * shared runners up.
1038200447Sattilio			 * If interruptible sleeps left the exclusive queue
1039200447Sattilio			 * empty avoid a starvation for the threads sleeping
1040200447Sattilio			 * on the shared queue by giving them precedence
1041200447Sattilio			 * and cleaning up the exclusive waiters bit anyway.
1042201709Sattilio			 * Please note that lk_exslpfail count may be lying
1043201709Sattilio			 * about the real number of waiters with the
1044201709Sattilio			 * LK_SLEEPFAIL flag on because they may be used in
1045201709Sattilio			 * conjuction with interruptible sleeps so
1046201710Sattilio			 * lk_exslpfail might be considered an 'upper limit'
1047201710Sattilio			 * bound, including the edge cases.
1048177957Sattilio			 */
1049194317Sattilio			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1050200447Sattilio			realexslp = sleepq_sleepcnt(&lk->lock_object,
1051200447Sattilio			    SQ_EXCLUSIVE_QUEUE);
1052200447Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1053200447Sattilio				if (lk->lk_exslpfail < realexslp) {
1054200447Sattilio					lk->lk_exslpfail = 0;
1055200447Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
1056200447Sattilio					v |= (x & LK_SHARED_WAITERS);
1057200447Sattilio				} else {
1058200447Sattilio					lk->lk_exslpfail = 0;
1059200447Sattilio					LOCK_LOG2(lk,
1060200447Sattilio					"%s: %p has only LK_SLEEPFAIL sleepers",
1061200447Sattilio					    __func__, lk);
1062200447Sattilio					LOCK_LOG2(lk,
1063200447Sattilio			"%s: %p waking up threads on the exclusive queue",
1064200447Sattilio					    __func__, lk);
1065200447Sattilio					wakeup_swapper =
1066200447Sattilio					    sleepq_broadcast(&lk->lock_object,
1067200447Sattilio					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1068200447Sattilio					queue = SQ_SHARED_QUEUE;
1069200447Sattilio				}
1070177957Sattilio			} else {
1071201703Sattilio
1072201703Sattilio				/*
1073201703Sattilio				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1074201703Sattilio				 * on and using interruptible sleeps/timeout
1075201703Sattilio				 * may have left spourious lk_exslpfail counts
1076201703Sattilio				 * on, so clean it up anyway.
1077201703Sattilio				 */
1078201703Sattilio				lk->lk_exslpfail = 0;
1079177957Sattilio				queue = SQ_SHARED_QUEUE;
108024269Speter			}
1081149723Sssouhlal
1082177957Sattilio			LOCK_LOG3(lk,
1083177957Sattilio			    "%s: %p waking up threads on the %s queue",
1084177957Sattilio			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1085177957Sattilio			    "exclusive");
1086177957Sattilio			atomic_store_rel_ptr(&lk->lk_lock, v);
1087200447Sattilio			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1088181334Sjhb			    SLEEPQ_LK, 0, queue);
1089177957Sattilio			sleepq_release(&lk->lock_object);
1090177957Sattilio			break;
1091177957Sattilio		} else
1092181334Sjhb			wakeup_swapper = wakeupshlk(lk, file, line);
109324269Speter		break;
1094177957Sattilio	case LK_DRAIN:
1095178159Sattilio		if (LK_CAN_WITNESS(flags))
1096178159Sattilio			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1097251326Sjhb			    LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1098251326Sjhb			    ilk : NULL);
109924269Speter
110024269Speter		/*
1101180798Skib		 * Trying to drain a lock we already own will result in a
1102177957Sattilio		 * deadlock.
110324269Speter		 */
1104177957Sattilio		if (lockmgr_xlocked(lk)) {
1105177957Sattilio			if (flags & LK_INTERLOCK)
1106177957Sattilio				class->lc_unlock(ilk);
1107177957Sattilio			panic("%s: draining %s with the lock held @ %s:%d\n",
1108177957Sattilio			    __func__, iwmesg, file, line);
1109177957Sattilio		}
111028345Sdyson
1111177957Sattilio		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1112233628Sfabient#ifdef HWPMC_HOOKS
1113233628Sfabient			PMC_SOFT_CALL( , , lock, failed);
1114233628Sfabient#endif
1115177957Sattilio			lock_profile_obtain_lock_failed(&lk->lock_object,
1116177957Sattilio			    &contested, &waittime);
111724269Speter
1118177957Sattilio			/*
1119177957Sattilio			 * If the lock is expected to not sleep just give up
1120177957Sattilio			 * and return.
1121177957Sattilio			 */
1122177957Sattilio			if (LK_TRYOP(flags)) {
1123177957Sattilio				LOCK_LOG2(lk, "%s: %p fails the try operation",
1124177957Sattilio				    __func__, lk);
1125177957Sattilio				error = EBUSY;
1126177957Sattilio				break;
1127177957Sattilio			}
112824269Speter
1129177957Sattilio			/*
1130177957Sattilio			 * Acquire the sleepqueue chain lock because we
1131177957Sattilio			 * probabilly will need to manipulate waiters flags.
1132177957Sattilio			 */
1133177957Sattilio			sleepq_lock(&lk->lock_object);
1134177957Sattilio			x = lk->lk_lock;
113529653Sdyson
1136177957Sattilio			/*
1137177957Sattilio			 * if the lock has been released while we spun on
1138177957Sattilio			 * the sleepqueue chain lock just try again.
1139177957Sattilio			 */
1140177957Sattilio			if (x == LK_UNLOCKED) {
1141177957Sattilio				sleepq_release(&lk->lock_object);
1142177957Sattilio				continue;
1143177957Sattilio			}
1144176320Sattilio
1145194317Sattilio			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1146194317Sattilio			if ((x & ~v) == LK_UNLOCKED) {
1147194317Sattilio				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1148200447Sattilio
1149200447Sattilio				/*
1150200447Sattilio				 * If interruptible sleeps left the exclusive
1151200447Sattilio				 * queue empty avoid a starvation for the
1152200447Sattilio				 * threads sleeping on the shared queue by
1153200447Sattilio				 * giving them precedence and cleaning up the
1154200447Sattilio				 * exclusive waiters bit anyway.
1155201709Sattilio				 * Please note that lk_exslpfail count may be
1156201709Sattilio				 * lying about the real number of waiters with
1157201709Sattilio				 * the LK_SLEEPFAIL flag on because they may
1158201709Sattilio				 * be used in conjuction with interruptible
1159201710Sattilio				 * sleeps so lk_exslpfail might be considered
1160201710Sattilio				 * an 'upper limit' bound, including the edge
1161201709Sattilio				 * cases.
1162200447Sattilio				 */
1163177957Sattilio				if (v & LK_EXCLUSIVE_WAITERS) {
1164177957Sattilio					queue = SQ_EXCLUSIVE_QUEUE;
1165177957Sattilio					v &= ~LK_EXCLUSIVE_WAITERS;
1166177957Sattilio				} else {
1167201703Sattilio
1168201703Sattilio					/*
1169201703Sattilio					 * Exclusive waiters sleeping with
1170201703Sattilio					 * LK_SLEEPFAIL on and using
1171201703Sattilio					 * interruptible sleeps/timeout may
1172201703Sattilio					 * have left spourious lk_exslpfail
1173201703Sattilio					 * counts on, so clean it up anyway.
1174201703Sattilio					 */
1175177957Sattilio					MPASS(v & LK_SHARED_WAITERS);
1176201703Sattilio					lk->lk_exslpfail = 0;
1177177957Sattilio					queue = SQ_SHARED_QUEUE;
1178177957Sattilio					v &= ~LK_SHARED_WAITERS;
1179177957Sattilio				}
1180200447Sattilio				if (queue == SQ_EXCLUSIVE_QUEUE) {
1181200447Sattilio					realexslp =
1182200447Sattilio					    sleepq_sleepcnt(&lk->lock_object,
1183200447Sattilio					    SQ_EXCLUSIVE_QUEUE);
1184200447Sattilio					if (lk->lk_exslpfail >= realexslp) {
1185200447Sattilio						lk->lk_exslpfail = 0;
1186200447Sattilio						queue = SQ_SHARED_QUEUE;
1187200447Sattilio						v &= ~LK_SHARED_WAITERS;
1188200447Sattilio						if (realexslp != 0) {
1189200447Sattilio							LOCK_LOG2(lk,
1190200447Sattilio					"%s: %p has only LK_SLEEPFAIL sleepers",
1191200447Sattilio							    __func__, lk);
1192200447Sattilio							LOCK_LOG2(lk,
1193200447Sattilio			"%s: %p waking up threads on the exclusive queue",
1194200447Sattilio							    __func__, lk);
1195200447Sattilio							wakeup_swapper =
1196200447Sattilio							    sleepq_broadcast(
1197200447Sattilio							    &lk->lock_object,
1198200447Sattilio							    SLEEPQ_LK, 0,
1199200447Sattilio							    SQ_EXCLUSIVE_QUEUE);
1200200447Sattilio						}
1201200447Sattilio					} else
1202200447Sattilio						lk->lk_exslpfail = 0;
1203200447Sattilio				}
1204177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1205177957Sattilio					sleepq_release(&lk->lock_object);
1206177957Sattilio					continue;
1207177957Sattilio				}
1208177957Sattilio				LOCK_LOG3(lk,
1209177957Sattilio				"%s: %p waking up all threads on the %s queue",
1210177957Sattilio				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1211177957Sattilio				    "shared" : "exclusive");
1212182010Sjhb				wakeup_swapper |= sleepq_broadcast(
1213181334Sjhb				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1214177957Sattilio
1215177957Sattilio				/*
1216177957Sattilio				 * If shared waiters have been woken up we need
1217177957Sattilio				 * to wait for one of them to acquire the lock
1218177957Sattilio				 * before to set the exclusive waiters in
1219177957Sattilio				 * order to avoid a deadlock.
1220177957Sattilio				 */
1221177957Sattilio				if (queue == SQ_SHARED_QUEUE) {
1222177957Sattilio					for (v = lk->lk_lock;
1223177957Sattilio					    (v & LK_SHARE) && !LK_SHARERS(v);
1224177957Sattilio					    v = lk->lk_lock)
1225177957Sattilio						cpu_spinwait();
1226177957Sattilio				}
1227177957Sattilio			}
1228177957Sattilio
1229177957Sattilio			/*
1230177957Sattilio			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1231177957Sattilio			 * fail, loop back and retry.
1232177957Sattilio			 */
1233177957Sattilio			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1234177957Sattilio				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1235177957Sattilio				    x | LK_EXCLUSIVE_WAITERS)) {
1236177957Sattilio					sleepq_release(&lk->lock_object);
1237177957Sattilio					continue;
1238177957Sattilio				}
1239177957Sattilio				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1240177957Sattilio				    __func__, lk);
1241177957Sattilio			}
1242177957Sattilio
1243177957Sattilio			/*
1244177957Sattilio			 * As far as we have been unable to acquire the
1245177957Sattilio			 * exclusive lock and the exclusive waiters flag
1246177957Sattilio			 * is set, we will sleep.
1247177957Sattilio			 */
1248177957Sattilio			if (flags & LK_INTERLOCK) {
1249177957Sattilio				class->lc_unlock(ilk);
1250177957Sattilio				flags &= ~LK_INTERLOCK;
1251177957Sattilio			}
1252178159Sattilio			GIANT_SAVE();
1253177957Sattilio			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1254177957Sattilio			    SQ_EXCLUSIVE_QUEUE);
1255177957Sattilio			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1256178159Sattilio			GIANT_RESTORE();
1257177957Sattilio			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1258177957Sattilio			    __func__, lk);
125929653Sdyson		}
1260177957Sattilio
1261177957Sattilio		if (error == 0) {
1262177957Sattilio			lock_profile_obtain_lock_success(&lk->lock_object,
1263177957Sattilio			    contested, waittime, file, line);
1264177957Sattilio			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1265177957Sattilio			    lk->lk_recurse, file, line);
1266178159Sattilio			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1267178159Sattilio			    LK_TRYWIT(flags), file, line);
1268177957Sattilio			TD_LOCKS_INC(curthread);
1269177957Sattilio			STACK_SAVE(lk);
1270177957Sattilio		}
1271177957Sattilio		break;
1272177957Sattilio	default:
1273177957Sattilio		if (flags & LK_INTERLOCK)
1274177957Sattilio			class->lc_unlock(ilk);
1275177957Sattilio		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
127629653Sdyson	}
1277177957Sattilio
1278177957Sattilio	if (flags & LK_INTERLOCK)
1279177957Sattilio		class->lc_unlock(ilk);
1280181334Sjhb	if (wakeup_swapper)
1281181334Sjhb		kick_proc0();
1282177957Sattilio
1283177957Sattilio	return (error);
128429653Sdyson}
128529653Sdyson
128629653Sdysonvoid
1287177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line)
128829653Sdyson{
1289177957Sattilio	uintptr_t tid, x;
1290176014Sattilio
1291228424Savg	if (SCHEDULER_STOPPED())
1292228424Savg		return;
1293228424Savg
1294177957Sattilio	tid = (uintptr_t)curthread;
1295243900Sattilio	_lockmgr_assert(lk, KA_XLOCKED, file, line);
129629653Sdyson
1297177957Sattilio	/*
1298243900Sattilio	 * Panic if the lock is recursed.
1299243900Sattilio	 */
1300243900Sattilio	if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1301243900Sattilio		panic("%s: disown a recursed lockmgr @ %s:%d\n",
1302243900Sattilio		    __func__,  file, line);
1303243900Sattilio
1304243900Sattilio	/*
1305180798Skib	 * If the owner is already LK_KERNPROC just skip the whole operation.
1306177957Sattilio	 */
1307177957Sattilio	if (LK_HOLDER(lk->lk_lock) != tid)
1308177957Sattilio		return;
1309189788Sjeff	lock_profile_release_lock(&lk->lock_object);
1310178159Sattilio	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1311178159Sattilio	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1312178159Sattilio	TD_LOCKS_DEC(curthread);
1313199008Sattilio	STACK_SAVE(lk);
131429653Sdyson
1315177957Sattilio	/*
1316177957Sattilio	 * In order to preserve waiters flags, just spin.
1317177957Sattilio	 */
1318177957Sattilio	for (;;) {
1319194317Sattilio		x = lk->lk_lock;
1320194317Sattilio		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1321194317Sattilio		x &= LK_ALL_WAITERS;
1322178166Sattilio		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1323178159Sattilio		    LK_KERNPROC | x))
1324177957Sattilio			return;
1325177957Sattilio		cpu_spinwait();
1326177957Sattilio	}
132766615Sjasone}
132866615Sjasone
1329175166Sattiliovoid
1330227588Spjdlockmgr_printinfo(const struct lock *lk)
1331175166Sattilio{
1332175166Sattilio	struct thread *td;
1333177957Sattilio	uintptr_t x;
1334175166Sattilio
1335177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
1336188244Sjhb		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1337177957Sattilio	else if (lk->lk_lock & LK_SHARE)
1338188244Sjhb		printf("lock type %s: SHARED (count %ju)\n",
1339177957Sattilio		    lk->lock_object.lo_name,
1340177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1341177957Sattilio	else {
1342177957Sattilio		td = lockmgr_xholder(lk);
1343232547Sivoras		printf("lock type %s: EXCL by thread %p "
1344232547Sivoras		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1345232547Sivoras		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1346177957Sattilio	}
1347175166Sattilio
1348177957Sattilio	x = lk->lk_lock;
1349177957Sattilio	if (x & LK_EXCLUSIVE_WAITERS)
1350177957Sattilio		printf(" with exclusive waiters pending\n");
1351177957Sattilio	if (x & LK_SHARED_WAITERS)
1352177957Sattilio		printf(" with shared waiters pending\n");
1353194317Sattilio	if (x & LK_EXCLUSIVE_SPINNERS)
1354194317Sattilio		printf(" with exclusive spinners pending\n");
1355177957Sattilio
1356177957Sattilio	STACK_PRINT(lk);
1357175166Sattilio}
1358175166Sattilio
135929653Sdysonint
1360227588Spjdlockstatus(const struct lock *lk)
136129653Sdyson{
1362177957Sattilio	uintptr_t v, x;
1363177957Sattilio	int ret;
136429653Sdyson
1365177957Sattilio	ret = LK_SHARED;
1366177957Sattilio	x = lk->lk_lock;
1367177957Sattilio	v = LK_HOLDER(x);
1368175635Sattilio
1369177957Sattilio	if ((x & LK_SHARE) == 0) {
1370177957Sattilio		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1371177957Sattilio			ret = LK_EXCLUSIVE;
137254444Seivind		else
1373177957Sattilio			ret = LK_EXCLOTHER;
1374177957Sattilio	} else if (x == LK_UNLOCKED)
1375177957Sattilio		ret = 0;
137629653Sdyson
1377177957Sattilio	return (ret);
137824269Speter}
1379161322Sjhb
1380176249Sattilio#ifdef INVARIANT_SUPPORT
1381219028Snetchild
1382219028SnetchildFEATURE(invariant_support,
1383219028Snetchild    "Support for modules compiled with INVARIANTS option");
1384219028Snetchild
1385176249Sattilio#ifndef INVARIANTS
1386177957Sattilio#undef	_lockmgr_assert
1387176249Sattilio#endif
1388176249Sattilio
1389176249Sattiliovoid
1390227588Spjd_lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1391176249Sattilio{
1392176249Sattilio	int slocked = 0;
1393176249Sattilio
1394176249Sattilio	if (panicstr != NULL)
1395176249Sattilio		return;
1396176249Sattilio	switch (what) {
1397176249Sattilio	case KA_SLOCKED:
1398176249Sattilio	case KA_SLOCKED | KA_NOTRECURSED:
1399176249Sattilio	case KA_SLOCKED | KA_RECURSED:
1400176249Sattilio		slocked = 1;
1401176249Sattilio	case KA_LOCKED:
1402176249Sattilio	case KA_LOCKED | KA_NOTRECURSED:
1403176249Sattilio	case KA_LOCKED | KA_RECURSED:
1404178159Sattilio#ifdef WITNESS
1405178159Sattilio
1406178159Sattilio		/*
1407178159Sattilio		 * We cannot trust WITNESS if the lock is held in exclusive
1408178159Sattilio		 * mode and a call to lockmgr_disown() happened.
1409178159Sattilio		 * Workaround this skipping the check if the lock is held in
1410178159Sattilio		 * exclusive mode even for the KA_LOCKED case.
1411178159Sattilio		 */
1412178159Sattilio		if (slocked || (lk->lk_lock & LK_SHARE)) {
1413178159Sattilio			witness_assert(&lk->lock_object, what, file, line);
1414178159Sattilio			break;
1415178159Sattilio		}
1416178159Sattilio#endif
1417177957Sattilio		if (lk->lk_lock == LK_UNLOCKED ||
1418177957Sattilio		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1419177957Sattilio		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1420176249Sattilio			panic("Lock %s not %slocked @ %s:%d\n",
1421177957Sattilio			    lk->lock_object.lo_name, slocked ? "share" : "",
1422176249Sattilio			    file, line);
1423177957Sattilio
1424177957Sattilio		if ((lk->lk_lock & LK_SHARE) == 0) {
1425177957Sattilio			if (lockmgr_recursed(lk)) {
1426176249Sattilio				if (what & KA_NOTRECURSED)
1427176249Sattilio					panic("Lock %s recursed @ %s:%d\n",
1428177957Sattilio					    lk->lock_object.lo_name, file,
1429177957Sattilio					    line);
1430176249Sattilio			} else if (what & KA_RECURSED)
1431176249Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1432177957Sattilio				    lk->lock_object.lo_name, file, line);
1433176249Sattilio		}
1434176249Sattilio		break;
1435176249Sattilio	case KA_XLOCKED:
1436176249Sattilio	case KA_XLOCKED | KA_NOTRECURSED:
1437176249Sattilio	case KA_XLOCKED | KA_RECURSED:
1438177957Sattilio		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1439176249Sattilio			panic("Lock %s not exclusively locked @ %s:%d\n",
1440177957Sattilio			    lk->lock_object.lo_name, file, line);
1441177957Sattilio		if (lockmgr_recursed(lk)) {
1442176249Sattilio			if (what & KA_NOTRECURSED)
1443176249Sattilio				panic("Lock %s recursed @ %s:%d\n",
1444177957Sattilio				    lk->lock_object.lo_name, file, line);
1445176249Sattilio		} else if (what & KA_RECURSED)
1446176249Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1447177957Sattilio			    lk->lock_object.lo_name, file, line);
1448176249Sattilio		break;
1449176249Sattilio	case KA_UNLOCKED:
1450177957Sattilio		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1451176249Sattilio			panic("Lock %s exclusively locked @ %s:%d\n",
1452177957Sattilio			    lk->lock_object.lo_name, file, line);
1453176249Sattilio		break;
1454176249Sattilio	default:
1455177957Sattilio		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1456177957Sattilio		    line);
1457176249Sattilio	}
1458176249Sattilio}
1459177957Sattilio#endif
1460176249Sattilio
1461161322Sjhb#ifdef DDB
1462161337Sjhbint
1463161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp)
1464161337Sjhb{
1465177957Sattilio	struct lock *lk;
1466161337Sjhb
1467177957Sattilio	lk = td->td_wchan;
1468161337Sjhb
1469177957Sattilio	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1470177957Sattilio		return (0);
1471177957Sattilio	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1472177957Sattilio	if (lk->lk_lock & LK_SHARE)
1473177957Sattilio		db_printf("SHARED (count %ju)\n",
1474177957Sattilio		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1475177957Sattilio	else
1476177957Sattilio		db_printf("EXCL\n");
1477177957Sattilio	*ownerp = lockmgr_xholder(lk);
1478161337Sjhb
1479161337Sjhb	return (1);
1480161337Sjhb}
1481161337Sjhb
1482177957Sattiliostatic void
1483227588Spjddb_show_lockmgr(const struct lock_object *lock)
1484161322Sjhb{
1485161322Sjhb	struct thread *td;
1486227588Spjd	const struct lock *lk;
1487161322Sjhb
1488227588Spjd	lk = (const struct lock *)lock;
1489161322Sjhb
1490168070Sjhb	db_printf(" state: ");
1491177957Sattilio	if (lk->lk_lock == LK_UNLOCKED)
1492161322Sjhb		db_printf("UNLOCKED\n");
1493177957Sattilio	else if (lk->lk_lock & LK_SHARE)
1494177957Sattilio		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1495177957Sattilio	else {
1496177957Sattilio		td = lockmgr_xholder(lk);
1497177957Sattilio		if (td == (struct thread *)LK_KERNPROC)
1498177957Sattilio			db_printf("XLOCK: LK_KERNPROC\n");
1499177957Sattilio		else
1500177957Sattilio			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1501177957Sattilio			    td->td_tid, td->td_proc->p_pid,
1502177957Sattilio			    td->td_proc->p_comm);
1503177957Sattilio		if (lockmgr_recursed(lk))
1504177957Sattilio			db_printf(" recursed: %d\n", lk->lk_recurse);
1505177957Sattilio	}
1506177957Sattilio	db_printf(" waiters: ");
1507177957Sattilio	switch (lk->lk_lock & LK_ALL_WAITERS) {
1508177957Sattilio	case LK_SHARED_WAITERS:
1509177957Sattilio		db_printf("shared\n");
1510192022Strasz		break;
1511177957Sattilio	case LK_EXCLUSIVE_WAITERS:
1512177957Sattilio		db_printf("exclusive\n");
1513177957Sattilio		break;
1514177957Sattilio	case LK_ALL_WAITERS:
1515177957Sattilio		db_printf("shared and exclusive\n");
1516177957Sattilio		break;
1517177957Sattilio	default:
1518177957Sattilio		db_printf("none\n");
1519177957Sattilio	}
1520194317Sattilio	db_printf(" spinners: ");
1521194317Sattilio	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1522194317Sattilio		db_printf("exclusive\n");
1523194317Sattilio	else
1524194317Sattilio		db_printf("none\n");
1525161322Sjhb}
1526161322Sjhb#endif
1527