kern_lock.c revision 201709
1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_adaptive_lockmgrs.h"
30#include "opt_ddb.h"
31#include "opt_kdtrace.h"
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 201709 2010-01-07 01:19:01Z attilio $");
35
36#include <sys/param.h>
37#include <sys/ktr.h>
38#include <sys/linker_set.h>
39#include <sys/lock.h>
40#include <sys/lock_profile.h>
41#include <sys/lockmgr.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/sleepqueue.h>
45#ifdef DEBUG_LOCKS
46#include <sys/stack.h>
47#endif
48#include <sys/sysctl.h>
49#include <sys/systm.h>
50
51#include <machine/cpu.h>
52
53#ifdef DDB
54#include <ddb/ddb.h>
55#endif
56
57CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58    (LK_ADAPTIVE | LK_NOSHARE));
59CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61
62#define	SQ_EXCLUSIVE_QUEUE	0
63#define	SQ_SHARED_QUEUE		1
64
65#ifndef INVARIANTS
66#define	_lockmgr_assert(lk, what, file, line)
67#define	TD_LOCKS_INC(td)
68#define	TD_LOCKS_DEC(td)
69#else
70#define	TD_LOCKS_INC(td)	((td)->td_locks++)
71#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
72#endif
73#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
74#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
75
76#ifndef DEBUG_LOCKS
77#define	STACK_PRINT(lk)
78#define	STACK_SAVE(lk)
79#define	STACK_ZERO(lk)
80#else
81#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
82#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
83#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
84#endif
85
86#define	LOCK_LOG2(lk, string, arg1, arg2)				\
87	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
88		CTR2(KTR_LOCK, (string), (arg1), (arg2))
89#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
90	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
91		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92
93#define	GIANT_DECLARE							\
94	int _i = 0;							\
95	WITNESS_SAVE_DECL(Giant)
96#define	GIANT_RESTORE() do {						\
97	if (_i > 0) {							\
98		while (_i--)						\
99			mtx_lock(&Giant);				\
100		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
101	}								\
102} while (0)
103#define	GIANT_SAVE() do {						\
104	if (mtx_owned(&Giant)) {					\
105		WITNESS_SAVE(&Giant.lock_object, Giant);		\
106		while (mtx_owned(&Giant)) {				\
107			_i++;						\
108			mtx_unlock(&Giant);				\
109		}							\
110	}								\
111} while (0)
112
113#define	LK_CAN_SHARE(x)							\
114	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
115	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
116	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117#define	LK_TRYOP(x)							\
118	((x) & LK_NOWAIT)
119
120#define	LK_CAN_WITNESS(x)						\
121	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122#define	LK_TRYWIT(x)							\
123	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124
125#define	LK_CAN_ADAPT(lk, f)						\
126	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
127	((f) & LK_SLEEPFAIL) == 0)
128
129#define	lockmgr_disowned(lk)						\
130	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131
132#define	lockmgr_xlocked(lk)						\
133	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134
135static void	 assert_lockmgr(struct lock_object *lock, int how);
136#ifdef DDB
137static void	 db_show_lockmgr(struct lock_object *lock);
138#endif
139static void	 lock_lockmgr(struct lock_object *lock, int how);
140#ifdef KDTRACE_HOOKS
141static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
142#endif
143static int	 unlock_lockmgr(struct lock_object *lock);
144
145struct lock_class lock_class_lockmgr = {
146	.lc_name = "lockmgr",
147	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148	.lc_assert = assert_lockmgr,
149#ifdef DDB
150	.lc_ddb_show = db_show_lockmgr,
151#endif
152	.lc_lock = lock_lockmgr,
153	.lc_unlock = unlock_lockmgr,
154#ifdef KDTRACE_HOOKS
155	.lc_owner = owner_lockmgr,
156#endif
157};
158
159#ifdef ADAPTIVE_LOCKMGRS
160static u_int alk_retries = 10;
161static u_int alk_loops = 10000;
162SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165#endif
166
167static __inline struct thread *
168lockmgr_xholder(struct lock *lk)
169{
170	uintptr_t x;
171
172	x = lk->lk_lock;
173	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174}
175
176/*
177 * It assumes sleepq_lock held and returns with this one unheld.
178 * It also assumes the generic interlock is sane and previously checked.
179 * If LK_INTERLOCK is specified the interlock is not reacquired after the
180 * sleep.
181 */
182static __inline int
183sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184    const char *wmesg, int pri, int timo, int queue)
185{
186	GIANT_DECLARE;
187	struct lock_class *class;
188	int catch, error;
189
190	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191	catch = pri & PCATCH;
192	pri &= PRIMASK;
193	error = 0;
194
195	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197
198	if (flags & LK_INTERLOCK)
199		class->lc_unlock(ilk);
200	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
201		lk->lk_exslpfail++;
202	GIANT_SAVE();
203	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
204	    SLEEPQ_INTERRUPTIBLE : 0), queue);
205	if ((flags & LK_TIMELOCK) && timo)
206		sleepq_set_timeout(&lk->lock_object, timo);
207
208	/*
209	 * Decisional switch for real sleeping.
210	 */
211	if ((flags & LK_TIMELOCK) && timo && catch)
212		error = sleepq_timedwait_sig(&lk->lock_object, pri);
213	else if ((flags & LK_TIMELOCK) && timo)
214		error = sleepq_timedwait(&lk->lock_object, pri);
215	else if (catch)
216		error = sleepq_wait_sig(&lk->lock_object, pri);
217	else
218		sleepq_wait(&lk->lock_object, pri);
219	GIANT_RESTORE();
220	if ((flags & LK_SLEEPFAIL) && error == 0)
221		error = ENOLCK;
222
223	return (error);
224}
225
226static __inline int
227wakeupshlk(struct lock *lk, const char *file, int line)
228{
229	uintptr_t v, x;
230	u_int realexslp;
231	int queue, wakeup_swapper;
232
233	TD_LOCKS_DEC(curthread);
234	TD_SLOCKS_DEC(curthread);
235	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
236	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
237
238	wakeup_swapper = 0;
239	for (;;) {
240		x = lk->lk_lock;
241
242		/*
243		 * If there is more than one shared lock held, just drop one
244		 * and return.
245		 */
246		if (LK_SHARERS(x) > 1) {
247			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
248			    x - LK_ONE_SHARER))
249				break;
250			continue;
251		}
252
253		/*
254		 * If there are not waiters on the exclusive queue, drop the
255		 * lock quickly.
256		 */
257		if ((x & LK_ALL_WAITERS) == 0) {
258			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
259			    LK_SHARERS_LOCK(1));
260			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
261				break;
262			continue;
263		}
264
265		/*
266		 * We should have a sharer with waiters, so enter the hard
267		 * path in order to handle wakeups correctly.
268		 */
269		sleepq_lock(&lk->lock_object);
270		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
271		v = LK_UNLOCKED;
272
273		/*
274		 * If the lock has exclusive waiters, give them preference in
275		 * order to avoid deadlock with shared runners up.
276		 * If interruptible sleeps left the exclusive queue empty
277		 * avoid a starvation for the threads sleeping on the shared
278		 * queue by giving them precedence and cleaning up the
279		 * exclusive waiters bit anyway.
280		 * Please note that lk_exslpfail count may be lying about
281		 * the real number of waiters with the LK_SLEEPFAIL flag on
282		 * because they may be used in conjuction with interruptible
283		 * sleeps so lk_exslpfail is consider as a 'upper limit'
284		 * bound, considering the edge cases.
285		 */
286		realexslp = sleepq_sleepcnt(&lk->lock_object,
287		    SQ_EXCLUSIVE_QUEUE);
288		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
289			if (lk->lk_exslpfail < realexslp) {
290				lk->lk_exslpfail = 0;
291				queue = SQ_EXCLUSIVE_QUEUE;
292				v |= (x & LK_SHARED_WAITERS);
293			} else {
294				lk->lk_exslpfail = 0;
295				LOCK_LOG2(lk,
296				    "%s: %p has only LK_SLEEPFAIL sleepers",
297				    __func__, lk);
298				LOCK_LOG2(lk,
299			    "%s: %p waking up threads on the exclusive queue",
300				    __func__, lk);
301				wakeup_swapper =
302				    sleepq_broadcast(&lk->lock_object,
303				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
304				queue = SQ_SHARED_QUEUE;
305			}
306
307		} else {
308
309			/*
310			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
311			 * and using interruptible sleeps/timeout may have
312			 * left spourious lk_exslpfail counts on, so clean
313			 * it up anyway.
314			 */
315			lk->lk_exslpfail = 0;
316			queue = SQ_SHARED_QUEUE;
317		}
318
319		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
320		    v)) {
321			sleepq_release(&lk->lock_object);
322			continue;
323		}
324		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
325		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
326		    "exclusive");
327		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
328		    0, queue);
329		sleepq_release(&lk->lock_object);
330		break;
331	}
332
333	lock_profile_release_lock(&lk->lock_object);
334	return (wakeup_swapper);
335}
336
337static void
338assert_lockmgr(struct lock_object *lock, int what)
339{
340
341	panic("lockmgr locks do not support assertions");
342}
343
344static void
345lock_lockmgr(struct lock_object *lock, int how)
346{
347
348	panic("lockmgr locks do not support sleep interlocking");
349}
350
351static int
352unlock_lockmgr(struct lock_object *lock)
353{
354
355	panic("lockmgr locks do not support sleep interlocking");
356}
357
358#ifdef KDTRACE_HOOKS
359static int
360owner_lockmgr(struct lock_object *lock, struct thread **owner)
361{
362
363	panic("lockmgr locks do not support owner inquiring");
364}
365#endif
366
367void
368lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
369{
370	int iflags;
371
372	MPASS((flags & ~LK_INIT_MASK) == 0);
373	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
374            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
375            &lk->lk_lock));
376
377	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
378	if (flags & LK_CANRECURSE)
379		iflags |= LO_RECURSABLE;
380	if ((flags & LK_NODUP) == 0)
381		iflags |= LO_DUPOK;
382	if (flags & LK_NOPROFILE)
383		iflags |= LO_NOPROFILE;
384	if ((flags & LK_NOWITNESS) == 0)
385		iflags |= LO_WITNESS;
386	if (flags & LK_QUIET)
387		iflags |= LO_QUIET;
388	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
389
390	lk->lk_lock = LK_UNLOCKED;
391	lk->lk_recurse = 0;
392	lk->lk_exslpfail = 0;
393	lk->lk_timo = timo;
394	lk->lk_pri = pri;
395	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
396	STACK_ZERO(lk);
397}
398
399void
400lockdestroy(struct lock *lk)
401{
402
403	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
404	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
405	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
406	lock_destroy(&lk->lock_object);
407}
408
409int
410__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
411    const char *wmesg, int pri, int timo, const char *file, int line)
412{
413	GIANT_DECLARE;
414	struct lock_class *class;
415	const char *iwmesg;
416	uintptr_t tid, v, x;
417	u_int op, realexslp;
418	int error, ipri, itimo, queue, wakeup_swapper;
419#ifdef LOCK_PROFILING
420	uint64_t waittime = 0;
421	int contested = 0;
422#endif
423#ifdef ADAPTIVE_LOCKMGRS
424	volatile struct thread *owner;
425	u_int i, spintries = 0;
426#endif
427
428	error = 0;
429	tid = (uintptr_t)curthread;
430	op = (flags & LK_TYPE_MASK);
431	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
432	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
433	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
434
435	MPASS((flags & ~LK_TOTAL_MASK) == 0);
436	KASSERT((op & (op - 1)) == 0,
437	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
438	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
439	    (op != LK_DOWNGRADE && op != LK_RELEASE),
440	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
441	    __func__, file, line));
442	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
443	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
444	    __func__, file, line));
445
446	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
447	if (panicstr != NULL) {
448		if (flags & LK_INTERLOCK)
449			class->lc_unlock(ilk);
450		return (0);
451	}
452
453	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
454		op = LK_EXCLUSIVE;
455
456	wakeup_swapper = 0;
457	switch (op) {
458	case LK_SHARED:
459		if (LK_CAN_WITNESS(flags))
460			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
461			    file, line, ilk);
462		for (;;) {
463			x = lk->lk_lock;
464
465			/*
466			 * If no other thread has an exclusive lock, or
467			 * no exclusive waiter is present, bump the count of
468			 * sharers.  Since we have to preserve the state of
469			 * waiters, if we fail to acquire the shared lock
470			 * loop back and retry.
471			 */
472			if (LK_CAN_SHARE(x)) {
473				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
474				    x + LK_ONE_SHARER))
475					break;
476				continue;
477			}
478			lock_profile_obtain_lock_failed(&lk->lock_object,
479			    &contested, &waittime);
480
481			/*
482			 * If the lock is already held by curthread in
483			 * exclusive way avoid a deadlock.
484			 */
485			if (LK_HOLDER(x) == tid) {
486				LOCK_LOG2(lk,
487				    "%s: %p already held in exclusive mode",
488				    __func__, lk);
489				error = EDEADLK;
490				break;
491			}
492
493			/*
494			 * If the lock is expected to not sleep just give up
495			 * and return.
496			 */
497			if (LK_TRYOP(flags)) {
498				LOCK_LOG2(lk, "%s: %p fails the try operation",
499				    __func__, lk);
500				error = EBUSY;
501				break;
502			}
503
504#ifdef ADAPTIVE_LOCKMGRS
505			/*
506			 * If the owner is running on another CPU, spin until
507			 * the owner stops running or the state of the lock
508			 * changes.  We need a double-state handle here
509			 * because for a failed acquisition the lock can be
510			 * either held in exclusive mode or shared mode
511			 * (for the writer starvation avoidance technique).
512			 */
513			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
514			    LK_HOLDER(x) != LK_KERNPROC) {
515				owner = (struct thread *)LK_HOLDER(x);
516				if (LOCK_LOG_TEST(&lk->lock_object, 0))
517					CTR3(KTR_LOCK,
518					    "%s: spinning on %p held by %p",
519					    __func__, lk, owner);
520
521				/*
522				 * If we are holding also an interlock drop it
523				 * in order to avoid a deadlock if the lockmgr
524				 * owner is adaptively spinning on the
525				 * interlock itself.
526				 */
527				if (flags & LK_INTERLOCK) {
528					class->lc_unlock(ilk);
529					flags &= ~LK_INTERLOCK;
530				}
531				GIANT_SAVE();
532				while (LK_HOLDER(lk->lk_lock) ==
533				    (uintptr_t)owner && TD_IS_RUNNING(owner))
534					cpu_spinwait();
535				GIANT_RESTORE();
536				continue;
537			} else if (LK_CAN_ADAPT(lk, flags) &&
538			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
539			    spintries < alk_retries) {
540				if (flags & LK_INTERLOCK) {
541					class->lc_unlock(ilk);
542					flags &= ~LK_INTERLOCK;
543				}
544				GIANT_SAVE();
545				spintries++;
546				for (i = 0; i < alk_loops; i++) {
547					if (LOCK_LOG_TEST(&lk->lock_object, 0))
548						CTR4(KTR_LOCK,
549				    "%s: shared spinning on %p with %u and %u",
550						    __func__, lk, spintries, i);
551					x = lk->lk_lock;
552					if ((x & LK_SHARE) == 0 ||
553					    LK_CAN_SHARE(x) != 0)
554						break;
555					cpu_spinwait();
556				}
557				GIANT_RESTORE();
558				if (i != alk_loops)
559					continue;
560			}
561#endif
562
563			/*
564			 * Acquire the sleepqueue chain lock because we
565			 * probabilly will need to manipulate waiters flags.
566			 */
567			sleepq_lock(&lk->lock_object);
568			x = lk->lk_lock;
569
570			/*
571			 * if the lock can be acquired in shared mode, try
572			 * again.
573			 */
574			if (LK_CAN_SHARE(x)) {
575				sleepq_release(&lk->lock_object);
576				continue;
577			}
578
579#ifdef ADAPTIVE_LOCKMGRS
580			/*
581			 * The current lock owner might have started executing
582			 * on another CPU (or the lock could have changed
583			 * owner) while we were waiting on the turnstile
584			 * chain lock.  If so, drop the turnstile lock and try
585			 * again.
586			 */
587			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
588			    LK_HOLDER(x) != LK_KERNPROC) {
589				owner = (struct thread *)LK_HOLDER(x);
590				if (TD_IS_RUNNING(owner)) {
591					sleepq_release(&lk->lock_object);
592					continue;
593				}
594			}
595#endif
596
597			/*
598			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
599			 * loop back and retry.
600			 */
601			if ((x & LK_SHARED_WAITERS) == 0) {
602				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
603				    x | LK_SHARED_WAITERS)) {
604					sleepq_release(&lk->lock_object);
605					continue;
606				}
607				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
608				    __func__, lk);
609			}
610
611			/*
612			 * As far as we have been unable to acquire the
613			 * shared lock and the shared waiters flag is set,
614			 * we will sleep.
615			 */
616			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
617			    SQ_SHARED_QUEUE);
618			flags &= ~LK_INTERLOCK;
619			if (error) {
620				LOCK_LOG3(lk,
621				    "%s: interrupted sleep for %p with %d",
622				    __func__, lk, error);
623				break;
624			}
625			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
626			    __func__, lk);
627		}
628		if (error == 0) {
629			lock_profile_obtain_lock_success(&lk->lock_object,
630			    contested, waittime, file, line);
631			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
632			    line);
633			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
634			    line);
635			TD_LOCKS_INC(curthread);
636			TD_SLOCKS_INC(curthread);
637			STACK_SAVE(lk);
638		}
639		break;
640	case LK_UPGRADE:
641		_lockmgr_assert(lk, KA_SLOCKED, file, line);
642		v = lk->lk_lock;
643		x = v & LK_ALL_WAITERS;
644		v &= LK_EXCLUSIVE_SPINNERS;
645
646		/*
647		 * Try to switch from one shared lock to an exclusive one.
648		 * We need to preserve waiters flags during the operation.
649		 */
650		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
651		    tid | x)) {
652			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
653			    line);
654			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
655			    LK_TRYWIT(flags), file, line);
656			TD_SLOCKS_DEC(curthread);
657			break;
658		}
659
660		/*
661		 * We have been unable to succeed in upgrading, so just
662		 * give up the shared lock.
663		 */
664		wakeup_swapper |= wakeupshlk(lk, file, line);
665
666		/* FALLTHROUGH */
667	case LK_EXCLUSIVE:
668		if (LK_CAN_WITNESS(flags))
669			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
670			    LOP_EXCLUSIVE, file, line, ilk);
671
672		/*
673		 * If curthread already holds the lock and this one is
674		 * allowed to recurse, simply recurse on it.
675		 */
676		if (lockmgr_xlocked(lk)) {
677			if ((flags & LK_CANRECURSE) == 0 &&
678			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
679
680				/*
681				 * If the lock is expected to not panic just
682				 * give up and return.
683				 */
684				if (LK_TRYOP(flags)) {
685					LOCK_LOG2(lk,
686					    "%s: %p fails the try operation",
687					    __func__, lk);
688					error = EBUSY;
689					break;
690				}
691				if (flags & LK_INTERLOCK)
692					class->lc_unlock(ilk);
693		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
694				    __func__, iwmesg, file, line);
695			}
696			lk->lk_recurse++;
697			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
698			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
699			    lk->lk_recurse, file, line);
700			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
701			    LK_TRYWIT(flags), file, line);
702			TD_LOCKS_INC(curthread);
703			break;
704		}
705
706		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
707		    tid)) {
708			lock_profile_obtain_lock_failed(&lk->lock_object,
709			    &contested, &waittime);
710
711			/*
712			 * If the lock is expected to not sleep just give up
713			 * and return.
714			 */
715			if (LK_TRYOP(flags)) {
716				LOCK_LOG2(lk, "%s: %p fails the try operation",
717				    __func__, lk);
718				error = EBUSY;
719				break;
720			}
721
722#ifdef ADAPTIVE_LOCKMGRS
723			/*
724			 * If the owner is running on another CPU, spin until
725			 * the owner stops running or the state of the lock
726			 * changes.
727			 */
728			x = lk->lk_lock;
729			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
730			    LK_HOLDER(x) != LK_KERNPROC) {
731				owner = (struct thread *)LK_HOLDER(x);
732				if (LOCK_LOG_TEST(&lk->lock_object, 0))
733					CTR3(KTR_LOCK,
734					    "%s: spinning on %p held by %p",
735					    __func__, lk, owner);
736
737				/*
738				 * If we are holding also an interlock drop it
739				 * in order to avoid a deadlock if the lockmgr
740				 * owner is adaptively spinning on the
741				 * interlock itself.
742				 */
743				if (flags & LK_INTERLOCK) {
744					class->lc_unlock(ilk);
745					flags &= ~LK_INTERLOCK;
746				}
747				GIANT_SAVE();
748				while (LK_HOLDER(lk->lk_lock) ==
749				    (uintptr_t)owner && TD_IS_RUNNING(owner))
750					cpu_spinwait();
751				GIANT_RESTORE();
752				continue;
753			} else if (LK_CAN_ADAPT(lk, flags) &&
754			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
755			    spintries < alk_retries) {
756				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
757				    !atomic_cmpset_ptr(&lk->lk_lock, x,
758				    x | LK_EXCLUSIVE_SPINNERS))
759					continue;
760				if (flags & LK_INTERLOCK) {
761					class->lc_unlock(ilk);
762					flags &= ~LK_INTERLOCK;
763				}
764				GIANT_SAVE();
765				spintries++;
766				for (i = 0; i < alk_loops; i++) {
767					if (LOCK_LOG_TEST(&lk->lock_object, 0))
768						CTR4(KTR_LOCK,
769				    "%s: shared spinning on %p with %u and %u",
770						    __func__, lk, spintries, i);
771					if ((lk->lk_lock &
772					    LK_EXCLUSIVE_SPINNERS) == 0)
773						break;
774					cpu_spinwait();
775				}
776				GIANT_RESTORE();
777				if (i != alk_loops)
778					continue;
779			}
780#endif
781
782			/*
783			 * Acquire the sleepqueue chain lock because we
784			 * probabilly will need to manipulate waiters flags.
785			 */
786			sleepq_lock(&lk->lock_object);
787			x = lk->lk_lock;
788
789			/*
790			 * if the lock has been released while we spun on
791			 * the sleepqueue chain lock just try again.
792			 */
793			if (x == LK_UNLOCKED) {
794				sleepq_release(&lk->lock_object);
795				continue;
796			}
797
798#ifdef ADAPTIVE_LOCKMGRS
799			/*
800			 * The current lock owner might have started executing
801			 * on another CPU (or the lock could have changed
802			 * owner) while we were waiting on the turnstile
803			 * chain lock.  If so, drop the turnstile lock and try
804			 * again.
805			 */
806			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
807			    LK_HOLDER(x) != LK_KERNPROC) {
808				owner = (struct thread *)LK_HOLDER(x);
809				if (TD_IS_RUNNING(owner)) {
810					sleepq_release(&lk->lock_object);
811					continue;
812				}
813			}
814#endif
815
816			/*
817			 * The lock can be in the state where there is a
818			 * pending queue of waiters, but still no owner.
819			 * This happens when the lock is contested and an
820			 * owner is going to claim the lock.
821			 * If curthread is the one successfully acquiring it
822			 * claim lock ownership and return, preserving waiters
823			 * flags.
824			 */
825			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
826			if ((x & ~v) == LK_UNLOCKED) {
827				v &= ~LK_EXCLUSIVE_SPINNERS;
828				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
829				    tid | v)) {
830					sleepq_release(&lk->lock_object);
831					LOCK_LOG2(lk,
832					    "%s: %p claimed by a new writer",
833					    __func__, lk);
834					break;
835				}
836				sleepq_release(&lk->lock_object);
837				continue;
838			}
839
840			/*
841			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
842			 * fail, loop back and retry.
843			 */
844			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
845				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
846				    x | LK_EXCLUSIVE_WAITERS)) {
847					sleepq_release(&lk->lock_object);
848					continue;
849				}
850				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
851				    __func__, lk);
852			}
853
854			/*
855			 * As far as we have been unable to acquire the
856			 * exclusive lock and the exclusive waiters flag
857			 * is set, we will sleep.
858			 */
859			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
860			    SQ_EXCLUSIVE_QUEUE);
861			flags &= ~LK_INTERLOCK;
862			if (error) {
863				LOCK_LOG3(lk,
864				    "%s: interrupted sleep for %p with %d",
865				    __func__, lk, error);
866				break;
867			}
868			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
869			    __func__, lk);
870		}
871		if (error == 0) {
872			lock_profile_obtain_lock_success(&lk->lock_object,
873			    contested, waittime, file, line);
874			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
875			    lk->lk_recurse, file, line);
876			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
877			    LK_TRYWIT(flags), file, line);
878			TD_LOCKS_INC(curthread);
879			STACK_SAVE(lk);
880		}
881		break;
882	case LK_DOWNGRADE:
883		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
884		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
885		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
886		TD_SLOCKS_INC(curthread);
887
888		/*
889		 * In order to preserve waiters flags, just spin.
890		 */
891		for (;;) {
892			x = lk->lk_lock;
893			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
894			x &= LK_ALL_WAITERS;
895			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
896			    LK_SHARERS_LOCK(1) | x))
897				break;
898			cpu_spinwait();
899		}
900		break;
901	case LK_RELEASE:
902		_lockmgr_assert(lk, KA_LOCKED, file, line);
903		x = lk->lk_lock;
904
905		if ((x & LK_SHARE) == 0) {
906
907			/*
908			 * As first option, treact the lock as if it has not
909			 * any waiter.
910			 * Fix-up the tid var if the lock has been disowned.
911			 */
912			if (LK_HOLDER(x) == LK_KERNPROC)
913				tid = LK_KERNPROC;
914			else {
915				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
916				    file, line);
917				TD_LOCKS_DEC(curthread);
918			}
919			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
920			    lk->lk_recurse, file, line);
921
922			/*
923			 * The lock is held in exclusive mode.
924			 * If the lock is recursed also, then unrecurse it.
925			 */
926			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
927				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
928				    lk);
929				lk->lk_recurse--;
930				break;
931			}
932			if (tid != LK_KERNPROC)
933				lock_profile_release_lock(&lk->lock_object);
934
935			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
936			    LK_UNLOCKED))
937				break;
938
939			sleepq_lock(&lk->lock_object);
940			x = lk->lk_lock;
941			v = LK_UNLOCKED;
942
943			/*
944		 	 * If the lock has exclusive waiters, give them
945			 * preference in order to avoid deadlock with
946			 * shared runners up.
947			 * If interruptible sleeps left the exclusive queue
948			 * empty avoid a starvation for the threads sleeping
949			 * on the shared queue by giving them precedence
950			 * and cleaning up the exclusive waiters bit anyway.
951			 * Please note that lk_exslpfail count may be lying
952			 * about the real number of waiters with the
953			 * LK_SLEEPFAIL flag on because they may be used in
954			 * conjuction with interruptible sleeps so
955			 * lk_exslpfail is consider as a 'upper limit' bound,
956			 * considering the edge cases.
957			 */
958			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
959			realexslp = sleepq_sleepcnt(&lk->lock_object,
960			    SQ_EXCLUSIVE_QUEUE);
961			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
962				if (lk->lk_exslpfail < realexslp) {
963					lk->lk_exslpfail = 0;
964					queue = SQ_EXCLUSIVE_QUEUE;
965					v |= (x & LK_SHARED_WAITERS);
966				} else {
967					lk->lk_exslpfail = 0;
968					LOCK_LOG2(lk,
969					"%s: %p has only LK_SLEEPFAIL sleepers",
970					    __func__, lk);
971					LOCK_LOG2(lk,
972			"%s: %p waking up threads on the exclusive queue",
973					    __func__, lk);
974					wakeup_swapper =
975					    sleepq_broadcast(&lk->lock_object,
976					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
977					queue = SQ_SHARED_QUEUE;
978				}
979			} else {
980
981				/*
982				 * Exclusive waiters sleeping with LK_SLEEPFAIL
983				 * on and using interruptible sleeps/timeout
984				 * may have left spourious lk_exslpfail counts
985				 * on, so clean it up anyway.
986				 */
987				lk->lk_exslpfail = 0;
988				queue = SQ_SHARED_QUEUE;
989			}
990
991			LOCK_LOG3(lk,
992			    "%s: %p waking up threads on the %s queue",
993			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
994			    "exclusive");
995			atomic_store_rel_ptr(&lk->lk_lock, v);
996			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
997			    SLEEPQ_LK, 0, queue);
998			sleepq_release(&lk->lock_object);
999			break;
1000		} else
1001			wakeup_swapper = wakeupshlk(lk, file, line);
1002		break;
1003	case LK_DRAIN:
1004		if (LK_CAN_WITNESS(flags))
1005			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1006			    LOP_EXCLUSIVE, file, line, ilk);
1007
1008		/*
1009		 * Trying to drain a lock we already own will result in a
1010		 * deadlock.
1011		 */
1012		if (lockmgr_xlocked(lk)) {
1013			if (flags & LK_INTERLOCK)
1014				class->lc_unlock(ilk);
1015			panic("%s: draining %s with the lock held @ %s:%d\n",
1016			    __func__, iwmesg, file, line);
1017		}
1018
1019		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1020			lock_profile_obtain_lock_failed(&lk->lock_object,
1021			    &contested, &waittime);
1022
1023			/*
1024			 * If the lock is expected to not sleep just give up
1025			 * and return.
1026			 */
1027			if (LK_TRYOP(flags)) {
1028				LOCK_LOG2(lk, "%s: %p fails the try operation",
1029				    __func__, lk);
1030				error = EBUSY;
1031				break;
1032			}
1033
1034			/*
1035			 * Acquire the sleepqueue chain lock because we
1036			 * probabilly will need to manipulate waiters flags.
1037			 */
1038			sleepq_lock(&lk->lock_object);
1039			x = lk->lk_lock;
1040
1041			/*
1042			 * if the lock has been released while we spun on
1043			 * the sleepqueue chain lock just try again.
1044			 */
1045			if (x == LK_UNLOCKED) {
1046				sleepq_release(&lk->lock_object);
1047				continue;
1048			}
1049
1050			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1051			if ((x & ~v) == LK_UNLOCKED) {
1052				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1053
1054				/*
1055				 * If interruptible sleeps left the exclusive
1056				 * queue empty avoid a starvation for the
1057				 * threads sleeping on the shared queue by
1058				 * giving them precedence and cleaning up the
1059				 * exclusive waiters bit anyway.
1060				 * Please note that lk_exslpfail count may be
1061				 * lying about the real number of waiters with
1062				 * the LK_SLEEPFAIL flag on because they may
1063				 * be used in conjuction with interruptible
1064				 * sleeps so lk_exslpfail is consider as a
1065				 * 'upper limit' bound, considering the edge
1066				 * cases.
1067				 */
1068				if (v & LK_EXCLUSIVE_WAITERS) {
1069					queue = SQ_EXCLUSIVE_QUEUE;
1070					v &= ~LK_EXCLUSIVE_WAITERS;
1071				} else {
1072
1073					/*
1074					 * Exclusive waiters sleeping with
1075					 * LK_SLEEPFAIL on and using
1076					 * interruptible sleeps/timeout may
1077					 * have left spourious lk_exslpfail
1078					 * counts on, so clean it up anyway.
1079					 */
1080					MPASS(v & LK_SHARED_WAITERS);
1081					lk->lk_exslpfail = 0;
1082					queue = SQ_SHARED_QUEUE;
1083					v &= ~LK_SHARED_WAITERS;
1084				}
1085				if (queue == SQ_EXCLUSIVE_QUEUE) {
1086					realexslp =
1087					    sleepq_sleepcnt(&lk->lock_object,
1088					    SQ_EXCLUSIVE_QUEUE);
1089					if (lk->lk_exslpfail >= realexslp) {
1090						lk->lk_exslpfail = 0;
1091						queue = SQ_SHARED_QUEUE;
1092						v &= ~LK_SHARED_WAITERS;
1093						if (realexslp != 0) {
1094							LOCK_LOG2(lk,
1095					"%s: %p has only LK_SLEEPFAIL sleepers",
1096							    __func__, lk);
1097							LOCK_LOG2(lk,
1098			"%s: %p waking up threads on the exclusive queue",
1099							    __func__, lk);
1100							wakeup_swapper =
1101							    sleepq_broadcast(
1102							    &lk->lock_object,
1103							    SLEEPQ_LK, 0,
1104							    SQ_EXCLUSIVE_QUEUE);
1105						}
1106					} else
1107						lk->lk_exslpfail = 0;
1108				}
1109				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1110					sleepq_release(&lk->lock_object);
1111					continue;
1112				}
1113				LOCK_LOG3(lk,
1114				"%s: %p waking up all threads on the %s queue",
1115				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1116				    "shared" : "exclusive");
1117				wakeup_swapper |= sleepq_broadcast(
1118				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1119
1120				/*
1121				 * If shared waiters have been woken up we need
1122				 * to wait for one of them to acquire the lock
1123				 * before to set the exclusive waiters in
1124				 * order to avoid a deadlock.
1125				 */
1126				if (queue == SQ_SHARED_QUEUE) {
1127					for (v = lk->lk_lock;
1128					    (v & LK_SHARE) && !LK_SHARERS(v);
1129					    v = lk->lk_lock)
1130						cpu_spinwait();
1131				}
1132			}
1133
1134			/*
1135			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1136			 * fail, loop back and retry.
1137			 */
1138			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1139				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1140				    x | LK_EXCLUSIVE_WAITERS)) {
1141					sleepq_release(&lk->lock_object);
1142					continue;
1143				}
1144				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1145				    __func__, lk);
1146			}
1147
1148			/*
1149			 * As far as we have been unable to acquire the
1150			 * exclusive lock and the exclusive waiters flag
1151			 * is set, we will sleep.
1152			 */
1153			if (flags & LK_INTERLOCK) {
1154				class->lc_unlock(ilk);
1155				flags &= ~LK_INTERLOCK;
1156			}
1157			GIANT_SAVE();
1158			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1159			    SQ_EXCLUSIVE_QUEUE);
1160			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1161			GIANT_RESTORE();
1162			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1163			    __func__, lk);
1164		}
1165
1166		if (error == 0) {
1167			lock_profile_obtain_lock_success(&lk->lock_object,
1168			    contested, waittime, file, line);
1169			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1170			    lk->lk_recurse, file, line);
1171			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1172			    LK_TRYWIT(flags), file, line);
1173			TD_LOCKS_INC(curthread);
1174			STACK_SAVE(lk);
1175		}
1176		break;
1177	default:
1178		if (flags & LK_INTERLOCK)
1179			class->lc_unlock(ilk);
1180		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1181	}
1182
1183	if (flags & LK_INTERLOCK)
1184		class->lc_unlock(ilk);
1185	if (wakeup_swapper)
1186		kick_proc0();
1187
1188	return (error);
1189}
1190
1191void
1192_lockmgr_disown(struct lock *lk, const char *file, int line)
1193{
1194	uintptr_t tid, x;
1195
1196	tid = (uintptr_t)curthread;
1197	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1198
1199	/*
1200	 * If the owner is already LK_KERNPROC just skip the whole operation.
1201	 */
1202	if (LK_HOLDER(lk->lk_lock) != tid)
1203		return;
1204	lock_profile_release_lock(&lk->lock_object);
1205	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1206	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1207	TD_LOCKS_DEC(curthread);
1208	STACK_SAVE(lk);
1209
1210	/*
1211	 * In order to preserve waiters flags, just spin.
1212	 */
1213	for (;;) {
1214		x = lk->lk_lock;
1215		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1216		x &= LK_ALL_WAITERS;
1217		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1218		    LK_KERNPROC | x))
1219			return;
1220		cpu_spinwait();
1221	}
1222}
1223
1224void
1225lockmgr_printinfo(struct lock *lk)
1226{
1227	struct thread *td;
1228	uintptr_t x;
1229
1230	if (lk->lk_lock == LK_UNLOCKED)
1231		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1232	else if (lk->lk_lock & LK_SHARE)
1233		printf("lock type %s: SHARED (count %ju)\n",
1234		    lk->lock_object.lo_name,
1235		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1236	else {
1237		td = lockmgr_xholder(lk);
1238		printf("lock type %s: EXCL by thread %p (pid %d)\n",
1239		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
1240	}
1241
1242	x = lk->lk_lock;
1243	if (x & LK_EXCLUSIVE_WAITERS)
1244		printf(" with exclusive waiters pending\n");
1245	if (x & LK_SHARED_WAITERS)
1246		printf(" with shared waiters pending\n");
1247	if (x & LK_EXCLUSIVE_SPINNERS)
1248		printf(" with exclusive spinners pending\n");
1249
1250	STACK_PRINT(lk);
1251}
1252
1253int
1254lockstatus(struct lock *lk)
1255{
1256	uintptr_t v, x;
1257	int ret;
1258
1259	ret = LK_SHARED;
1260	x = lk->lk_lock;
1261	v = LK_HOLDER(x);
1262
1263	if ((x & LK_SHARE) == 0) {
1264		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1265			ret = LK_EXCLUSIVE;
1266		else
1267			ret = LK_EXCLOTHER;
1268	} else if (x == LK_UNLOCKED)
1269		ret = 0;
1270
1271	return (ret);
1272}
1273
1274#ifdef INVARIANT_SUPPORT
1275#ifndef INVARIANTS
1276#undef	_lockmgr_assert
1277#endif
1278
1279void
1280_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1281{
1282	int slocked = 0;
1283
1284	if (panicstr != NULL)
1285		return;
1286	switch (what) {
1287	case KA_SLOCKED:
1288	case KA_SLOCKED | KA_NOTRECURSED:
1289	case KA_SLOCKED | KA_RECURSED:
1290		slocked = 1;
1291	case KA_LOCKED:
1292	case KA_LOCKED | KA_NOTRECURSED:
1293	case KA_LOCKED | KA_RECURSED:
1294#ifdef WITNESS
1295
1296		/*
1297		 * We cannot trust WITNESS if the lock is held in exclusive
1298		 * mode and a call to lockmgr_disown() happened.
1299		 * Workaround this skipping the check if the lock is held in
1300		 * exclusive mode even for the KA_LOCKED case.
1301		 */
1302		if (slocked || (lk->lk_lock & LK_SHARE)) {
1303			witness_assert(&lk->lock_object, what, file, line);
1304			break;
1305		}
1306#endif
1307		if (lk->lk_lock == LK_UNLOCKED ||
1308		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1309		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1310			panic("Lock %s not %slocked @ %s:%d\n",
1311			    lk->lock_object.lo_name, slocked ? "share" : "",
1312			    file, line);
1313
1314		if ((lk->lk_lock & LK_SHARE) == 0) {
1315			if (lockmgr_recursed(lk)) {
1316				if (what & KA_NOTRECURSED)
1317					panic("Lock %s recursed @ %s:%d\n",
1318					    lk->lock_object.lo_name, file,
1319					    line);
1320			} else if (what & KA_RECURSED)
1321				panic("Lock %s not recursed @ %s:%d\n",
1322				    lk->lock_object.lo_name, file, line);
1323		}
1324		break;
1325	case KA_XLOCKED:
1326	case KA_XLOCKED | KA_NOTRECURSED:
1327	case KA_XLOCKED | KA_RECURSED:
1328		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1329			panic("Lock %s not exclusively locked @ %s:%d\n",
1330			    lk->lock_object.lo_name, file, line);
1331		if (lockmgr_recursed(lk)) {
1332			if (what & KA_NOTRECURSED)
1333				panic("Lock %s recursed @ %s:%d\n",
1334				    lk->lock_object.lo_name, file, line);
1335		} else if (what & KA_RECURSED)
1336			panic("Lock %s not recursed @ %s:%d\n",
1337			    lk->lock_object.lo_name, file, line);
1338		break;
1339	case KA_UNLOCKED:
1340		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1341			panic("Lock %s exclusively locked @ %s:%d\n",
1342			    lk->lock_object.lo_name, file, line);
1343		break;
1344	default:
1345		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1346		    line);
1347	}
1348}
1349#endif
1350
1351#ifdef DDB
1352int
1353lockmgr_chain(struct thread *td, struct thread **ownerp)
1354{
1355	struct lock *lk;
1356
1357	lk = td->td_wchan;
1358
1359	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1360		return (0);
1361	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1362	if (lk->lk_lock & LK_SHARE)
1363		db_printf("SHARED (count %ju)\n",
1364		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1365	else
1366		db_printf("EXCL\n");
1367	*ownerp = lockmgr_xholder(lk);
1368
1369	return (1);
1370}
1371
1372static void
1373db_show_lockmgr(struct lock_object *lock)
1374{
1375	struct thread *td;
1376	struct lock *lk;
1377
1378	lk = (struct lock *)lock;
1379
1380	db_printf(" state: ");
1381	if (lk->lk_lock == LK_UNLOCKED)
1382		db_printf("UNLOCKED\n");
1383	else if (lk->lk_lock & LK_SHARE)
1384		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1385	else {
1386		td = lockmgr_xholder(lk);
1387		if (td == (struct thread *)LK_KERNPROC)
1388			db_printf("XLOCK: LK_KERNPROC\n");
1389		else
1390			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1391			    td->td_tid, td->td_proc->p_pid,
1392			    td->td_proc->p_comm);
1393		if (lockmgr_recursed(lk))
1394			db_printf(" recursed: %d\n", lk->lk_recurse);
1395	}
1396	db_printf(" waiters: ");
1397	switch (lk->lk_lock & LK_ALL_WAITERS) {
1398	case LK_SHARED_WAITERS:
1399		db_printf("shared\n");
1400		break;
1401	case LK_EXCLUSIVE_WAITERS:
1402		db_printf("exclusive\n");
1403		break;
1404	case LK_ALL_WAITERS:
1405		db_printf("shared and exclusive\n");
1406		break;
1407	default:
1408		db_printf("none\n");
1409	}
1410	db_printf(" spinners: ");
1411	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1412		db_printf("exclusive\n");
1413	else
1414		db_printf("none\n");
1415}
1416#endif
1417