kern_lock.c revision 232547
1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_adaptive_lockmgrs.h"
30#include "opt_ddb.h"
31#include "opt_kdtrace.h"
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 232547 2012-03-05 14:19:43Z ivoras $");
35
36#include <sys/param.h>
37#include <sys/ktr.h>
38#include <sys/lock.h>
39#include <sys/lock_profile.h>
40#include <sys/lockmgr.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/sleepqueue.h>
44#ifdef DEBUG_LOCKS
45#include <sys/stack.h>
46#endif
47#include <sys/sysctl.h>
48#include <sys/systm.h>
49
50#include <machine/cpu.h>
51
52#ifdef DDB
53#include <ddb/ddb.h>
54#endif
55
56CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
57    (LK_ADAPTIVE | LK_NOSHARE));
58CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
59    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
60
61#define	SQ_EXCLUSIVE_QUEUE	0
62#define	SQ_SHARED_QUEUE		1
63
64#ifndef INVARIANTS
65#define	_lockmgr_assert(lk, what, file, line)
66#define	TD_LOCKS_INC(td)
67#define	TD_LOCKS_DEC(td)
68#else
69#define	TD_LOCKS_INC(td)	((td)->td_locks++)
70#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
71#endif
72#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
73#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
74
75#ifndef DEBUG_LOCKS
76#define	STACK_PRINT(lk)
77#define	STACK_SAVE(lk)
78#define	STACK_ZERO(lk)
79#else
80#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
81#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
82#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
83#endif
84
85#define	LOCK_LOG2(lk, string, arg1, arg2)				\
86	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
87		CTR2(KTR_LOCK, (string), (arg1), (arg2))
88#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
89	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
90		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
91
92#define	GIANT_DECLARE							\
93	int _i = 0;							\
94	WITNESS_SAVE_DECL(Giant)
95#define	GIANT_RESTORE() do {						\
96	if (_i > 0) {							\
97		while (_i--)						\
98			mtx_lock(&Giant);				\
99		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
100	}								\
101} while (0)
102#define	GIANT_SAVE() do {						\
103	if (mtx_owned(&Giant)) {					\
104		WITNESS_SAVE(&Giant.lock_object, Giant);		\
105		while (mtx_owned(&Giant)) {				\
106			_i++;						\
107			mtx_unlock(&Giant);				\
108		}							\
109	}								\
110} while (0)
111
112#define	LK_CAN_SHARE(x)							\
113	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
114	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
115	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
116#define	LK_TRYOP(x)							\
117	((x) & LK_NOWAIT)
118
119#define	LK_CAN_WITNESS(x)						\
120	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
121#define	LK_TRYWIT(x)							\
122	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
123
124#define	LK_CAN_ADAPT(lk, f)						\
125	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
126	((f) & LK_SLEEPFAIL) == 0)
127
128#define	lockmgr_disowned(lk)						\
129	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
130
131#define	lockmgr_xlocked(lk)						\
132	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
133
134static void	assert_lockmgr(const struct lock_object *lock, int how);
135#ifdef DDB
136static void	db_show_lockmgr(const struct lock_object *lock);
137#endif
138static void	lock_lockmgr(struct lock_object *lock, int how);
139#ifdef KDTRACE_HOOKS
140static int	owner_lockmgr(const struct lock_object *lock,
141		    struct thread **owner);
142#endif
143static int	unlock_lockmgr(struct lock_object *lock);
144
145struct lock_class lock_class_lockmgr = {
146	.lc_name = "lockmgr",
147	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148	.lc_assert = assert_lockmgr,
149#ifdef DDB
150	.lc_ddb_show = db_show_lockmgr,
151#endif
152	.lc_lock = lock_lockmgr,
153	.lc_unlock = unlock_lockmgr,
154#ifdef KDTRACE_HOOKS
155	.lc_owner = owner_lockmgr,
156#endif
157};
158
159#ifdef ADAPTIVE_LOCKMGRS
160static u_int alk_retries = 10;
161static u_int alk_loops = 10000;
162static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
163    "lockmgr debugging");
164SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
165SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
166#endif
167
168static __inline struct thread *
169lockmgr_xholder(const struct lock *lk)
170{
171	uintptr_t x;
172
173	x = lk->lk_lock;
174	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
175}
176
177/*
178 * It assumes sleepq_lock held and returns with this one unheld.
179 * It also assumes the generic interlock is sane and previously checked.
180 * If LK_INTERLOCK is specified the interlock is not reacquired after the
181 * sleep.
182 */
183static __inline int
184sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
185    const char *wmesg, int pri, int timo, int queue)
186{
187	GIANT_DECLARE;
188	struct lock_class *class;
189	int catch, error;
190
191	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
192	catch = pri & PCATCH;
193	pri &= PRIMASK;
194	error = 0;
195
196	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
197	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
198
199	if (flags & LK_INTERLOCK)
200		class->lc_unlock(ilk);
201	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
202		lk->lk_exslpfail++;
203	GIANT_SAVE();
204	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
205	    SLEEPQ_INTERRUPTIBLE : 0), queue);
206	if ((flags & LK_TIMELOCK) && timo)
207		sleepq_set_timeout(&lk->lock_object, timo);
208
209	/*
210	 * Decisional switch for real sleeping.
211	 */
212	if ((flags & LK_TIMELOCK) && timo && catch)
213		error = sleepq_timedwait_sig(&lk->lock_object, pri);
214	else if ((flags & LK_TIMELOCK) && timo)
215		error = sleepq_timedwait(&lk->lock_object, pri);
216	else if (catch)
217		error = sleepq_wait_sig(&lk->lock_object, pri);
218	else
219		sleepq_wait(&lk->lock_object, pri);
220	GIANT_RESTORE();
221	if ((flags & LK_SLEEPFAIL) && error == 0)
222		error = ENOLCK;
223
224	return (error);
225}
226
227static __inline int
228wakeupshlk(struct lock *lk, const char *file, int line)
229{
230	uintptr_t v, x;
231	u_int realexslp;
232	int queue, wakeup_swapper;
233
234	TD_LOCKS_DEC(curthread);
235	TD_SLOCKS_DEC(curthread);
236	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
237	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
238
239	wakeup_swapper = 0;
240	for (;;) {
241		x = lk->lk_lock;
242
243		/*
244		 * If there is more than one shared lock held, just drop one
245		 * and return.
246		 */
247		if (LK_SHARERS(x) > 1) {
248			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
249			    x - LK_ONE_SHARER))
250				break;
251			continue;
252		}
253
254		/*
255		 * If there are not waiters on the exclusive queue, drop the
256		 * lock quickly.
257		 */
258		if ((x & LK_ALL_WAITERS) == 0) {
259			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
260			    LK_SHARERS_LOCK(1));
261			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
262				break;
263			continue;
264		}
265
266		/*
267		 * We should have a sharer with waiters, so enter the hard
268		 * path in order to handle wakeups correctly.
269		 */
270		sleepq_lock(&lk->lock_object);
271		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
272		v = LK_UNLOCKED;
273
274		/*
275		 * If the lock has exclusive waiters, give them preference in
276		 * order to avoid deadlock with shared runners up.
277		 * If interruptible sleeps left the exclusive queue empty
278		 * avoid a starvation for the threads sleeping on the shared
279		 * queue by giving them precedence and cleaning up the
280		 * exclusive waiters bit anyway.
281		 * Please note that lk_exslpfail count may be lying about
282		 * the real number of waiters with the LK_SLEEPFAIL flag on
283		 * because they may be used in conjuction with interruptible
284		 * sleeps so lk_exslpfail might be considered an 'upper limit'
285		 * bound, including the edge cases.
286		 */
287		realexslp = sleepq_sleepcnt(&lk->lock_object,
288		    SQ_EXCLUSIVE_QUEUE);
289		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
290			if (lk->lk_exslpfail < realexslp) {
291				lk->lk_exslpfail = 0;
292				queue = SQ_EXCLUSIVE_QUEUE;
293				v |= (x & LK_SHARED_WAITERS);
294			} else {
295				lk->lk_exslpfail = 0;
296				LOCK_LOG2(lk,
297				    "%s: %p has only LK_SLEEPFAIL sleepers",
298				    __func__, lk);
299				LOCK_LOG2(lk,
300			    "%s: %p waking up threads on the exclusive queue",
301				    __func__, lk);
302				wakeup_swapper =
303				    sleepq_broadcast(&lk->lock_object,
304				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
305				queue = SQ_SHARED_QUEUE;
306			}
307
308		} else {
309
310			/*
311			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
312			 * and using interruptible sleeps/timeout may have
313			 * left spourious lk_exslpfail counts on, so clean
314			 * it up anyway.
315			 */
316			lk->lk_exslpfail = 0;
317			queue = SQ_SHARED_QUEUE;
318		}
319
320		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
321		    v)) {
322			sleepq_release(&lk->lock_object);
323			continue;
324		}
325		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
326		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
327		    "exclusive");
328		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
329		    0, queue);
330		sleepq_release(&lk->lock_object);
331		break;
332	}
333
334	lock_profile_release_lock(&lk->lock_object);
335	return (wakeup_swapper);
336}
337
338static void
339assert_lockmgr(const struct lock_object *lock, int what)
340{
341
342	panic("lockmgr locks do not support assertions");
343}
344
345static void
346lock_lockmgr(struct lock_object *lock, int how)
347{
348
349	panic("lockmgr locks do not support sleep interlocking");
350}
351
352static int
353unlock_lockmgr(struct lock_object *lock)
354{
355
356	panic("lockmgr locks do not support sleep interlocking");
357}
358
359#ifdef KDTRACE_HOOKS
360static int
361owner_lockmgr(const struct lock_object *lock, struct thread **owner)
362{
363
364	panic("lockmgr locks do not support owner inquiring");
365}
366#endif
367
368void
369lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
370{
371	int iflags;
372
373	MPASS((flags & ~LK_INIT_MASK) == 0);
374	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
375            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
376            &lk->lk_lock));
377
378	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
379	if (flags & LK_CANRECURSE)
380		iflags |= LO_RECURSABLE;
381	if ((flags & LK_NODUP) == 0)
382		iflags |= LO_DUPOK;
383	if (flags & LK_NOPROFILE)
384		iflags |= LO_NOPROFILE;
385	if ((flags & LK_NOWITNESS) == 0)
386		iflags |= LO_WITNESS;
387	if (flags & LK_QUIET)
388		iflags |= LO_QUIET;
389	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
390
391	lk->lk_lock = LK_UNLOCKED;
392	lk->lk_recurse = 0;
393	lk->lk_exslpfail = 0;
394	lk->lk_timo = timo;
395	lk->lk_pri = pri;
396	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
397	STACK_ZERO(lk);
398}
399
400/*
401 * XXX: Gross hacks to manipulate external lock flags after
402 * initialization.  Used for certain vnode and buf locks.
403 */
404void
405lockallowshare(struct lock *lk)
406{
407
408	lockmgr_assert(lk, KA_XLOCKED);
409	lk->lock_object.lo_flags &= ~LK_NOSHARE;
410}
411
412void
413lockallowrecurse(struct lock *lk)
414{
415
416	lockmgr_assert(lk, KA_XLOCKED);
417	lk->lock_object.lo_flags |= LO_RECURSABLE;
418}
419
420void
421lockdisablerecurse(struct lock *lk)
422{
423
424	lockmgr_assert(lk, KA_XLOCKED);
425	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
426}
427
428void
429lockdestroy(struct lock *lk)
430{
431
432	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
433	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
434	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
435	lock_destroy(&lk->lock_object);
436}
437
438int
439__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
440    const char *wmesg, int pri, int timo, const char *file, int line)
441{
442	GIANT_DECLARE;
443	struct lock_class *class;
444	const char *iwmesg;
445	uintptr_t tid, v, x;
446	u_int op, realexslp;
447	int error, ipri, itimo, queue, wakeup_swapper;
448#ifdef LOCK_PROFILING
449	uint64_t waittime = 0;
450	int contested = 0;
451#endif
452#ifdef ADAPTIVE_LOCKMGRS
453	volatile struct thread *owner;
454	u_int i, spintries = 0;
455#endif
456
457	error = 0;
458	tid = (uintptr_t)curthread;
459	op = (flags & LK_TYPE_MASK);
460	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
461	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
462	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
463
464	MPASS((flags & ~LK_TOTAL_MASK) == 0);
465	KASSERT((op & (op - 1)) == 0,
466	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
467	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
468	    (op != LK_DOWNGRADE && op != LK_RELEASE),
469	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
470	    __func__, file, line));
471	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
472	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
473	    __func__, file, line));
474
475	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
476	if (panicstr != NULL) {
477		if (flags & LK_INTERLOCK)
478			class->lc_unlock(ilk);
479		return (0);
480	}
481
482	if (lk->lock_object.lo_flags & LK_NOSHARE) {
483		switch (op) {
484		case LK_SHARED:
485			op = LK_EXCLUSIVE;
486			break;
487		case LK_UPGRADE:
488		case LK_DOWNGRADE:
489			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
490			    file, line);
491			return (0);
492		}
493	}
494
495	wakeup_swapper = 0;
496	switch (op) {
497	case LK_SHARED:
498		if (LK_CAN_WITNESS(flags))
499			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
500			    file, line, ilk);
501		for (;;) {
502			x = lk->lk_lock;
503
504			/*
505			 * If no other thread has an exclusive lock, or
506			 * no exclusive waiter is present, bump the count of
507			 * sharers.  Since we have to preserve the state of
508			 * waiters, if we fail to acquire the shared lock
509			 * loop back and retry.
510			 */
511			if (LK_CAN_SHARE(x)) {
512				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
513				    x + LK_ONE_SHARER))
514					break;
515				continue;
516			}
517			lock_profile_obtain_lock_failed(&lk->lock_object,
518			    &contested, &waittime);
519
520			/*
521			 * If the lock is already held by curthread in
522			 * exclusive way avoid a deadlock.
523			 */
524			if (LK_HOLDER(x) == tid) {
525				LOCK_LOG2(lk,
526				    "%s: %p already held in exclusive mode",
527				    __func__, lk);
528				error = EDEADLK;
529				break;
530			}
531
532			/*
533			 * If the lock is expected to not sleep just give up
534			 * and return.
535			 */
536			if (LK_TRYOP(flags)) {
537				LOCK_LOG2(lk, "%s: %p fails the try operation",
538				    __func__, lk);
539				error = EBUSY;
540				break;
541			}
542
543#ifdef ADAPTIVE_LOCKMGRS
544			/*
545			 * If the owner is running on another CPU, spin until
546			 * the owner stops running or the state of the lock
547			 * changes.  We need a double-state handle here
548			 * because for a failed acquisition the lock can be
549			 * either held in exclusive mode or shared mode
550			 * (for the writer starvation avoidance technique).
551			 */
552			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
553			    LK_HOLDER(x) != LK_KERNPROC) {
554				owner = (struct thread *)LK_HOLDER(x);
555				if (LOCK_LOG_TEST(&lk->lock_object, 0))
556					CTR3(KTR_LOCK,
557					    "%s: spinning on %p held by %p",
558					    __func__, lk, owner);
559
560				/*
561				 * If we are holding also an interlock drop it
562				 * in order to avoid a deadlock if the lockmgr
563				 * owner is adaptively spinning on the
564				 * interlock itself.
565				 */
566				if (flags & LK_INTERLOCK) {
567					class->lc_unlock(ilk);
568					flags &= ~LK_INTERLOCK;
569				}
570				GIANT_SAVE();
571				while (LK_HOLDER(lk->lk_lock) ==
572				    (uintptr_t)owner && TD_IS_RUNNING(owner))
573					cpu_spinwait();
574				GIANT_RESTORE();
575				continue;
576			} else if (LK_CAN_ADAPT(lk, flags) &&
577			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
578			    spintries < alk_retries) {
579				if (flags & LK_INTERLOCK) {
580					class->lc_unlock(ilk);
581					flags &= ~LK_INTERLOCK;
582				}
583				GIANT_SAVE();
584				spintries++;
585				for (i = 0; i < alk_loops; i++) {
586					if (LOCK_LOG_TEST(&lk->lock_object, 0))
587						CTR4(KTR_LOCK,
588				    "%s: shared spinning on %p with %u and %u",
589						    __func__, lk, spintries, i);
590					x = lk->lk_lock;
591					if ((x & LK_SHARE) == 0 ||
592					    LK_CAN_SHARE(x) != 0)
593						break;
594					cpu_spinwait();
595				}
596				GIANT_RESTORE();
597				if (i != alk_loops)
598					continue;
599			}
600#endif
601
602			/*
603			 * Acquire the sleepqueue chain lock because we
604			 * probabilly will need to manipulate waiters flags.
605			 */
606			sleepq_lock(&lk->lock_object);
607			x = lk->lk_lock;
608
609			/*
610			 * if the lock can be acquired in shared mode, try
611			 * again.
612			 */
613			if (LK_CAN_SHARE(x)) {
614				sleepq_release(&lk->lock_object);
615				continue;
616			}
617
618#ifdef ADAPTIVE_LOCKMGRS
619			/*
620			 * The current lock owner might have started executing
621			 * on another CPU (or the lock could have changed
622			 * owner) while we were waiting on the turnstile
623			 * chain lock.  If so, drop the turnstile lock and try
624			 * again.
625			 */
626			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
627			    LK_HOLDER(x) != LK_KERNPROC) {
628				owner = (struct thread *)LK_HOLDER(x);
629				if (TD_IS_RUNNING(owner)) {
630					sleepq_release(&lk->lock_object);
631					continue;
632				}
633			}
634#endif
635
636			/*
637			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
638			 * loop back and retry.
639			 */
640			if ((x & LK_SHARED_WAITERS) == 0) {
641				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
642				    x | LK_SHARED_WAITERS)) {
643					sleepq_release(&lk->lock_object);
644					continue;
645				}
646				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
647				    __func__, lk);
648			}
649
650			/*
651			 * As far as we have been unable to acquire the
652			 * shared lock and the shared waiters flag is set,
653			 * we will sleep.
654			 */
655			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
656			    SQ_SHARED_QUEUE);
657			flags &= ~LK_INTERLOCK;
658			if (error) {
659				LOCK_LOG3(lk,
660				    "%s: interrupted sleep for %p with %d",
661				    __func__, lk, error);
662				break;
663			}
664			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
665			    __func__, lk);
666		}
667		if (error == 0) {
668			lock_profile_obtain_lock_success(&lk->lock_object,
669			    contested, waittime, file, line);
670			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
671			    line);
672			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
673			    line);
674			TD_LOCKS_INC(curthread);
675			TD_SLOCKS_INC(curthread);
676			STACK_SAVE(lk);
677		}
678		break;
679	case LK_UPGRADE:
680		_lockmgr_assert(lk, KA_SLOCKED, file, line);
681		v = lk->lk_lock;
682		x = v & LK_ALL_WAITERS;
683		v &= LK_EXCLUSIVE_SPINNERS;
684
685		/*
686		 * Try to switch from one shared lock to an exclusive one.
687		 * We need to preserve waiters flags during the operation.
688		 */
689		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
690		    tid | x)) {
691			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
692			    line);
693			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
694			    LK_TRYWIT(flags), file, line);
695			TD_SLOCKS_DEC(curthread);
696			break;
697		}
698
699		/*
700		 * We have been unable to succeed in upgrading, so just
701		 * give up the shared lock.
702		 */
703		wakeup_swapper |= wakeupshlk(lk, file, line);
704
705		/* FALLTHROUGH */
706	case LK_EXCLUSIVE:
707		if (LK_CAN_WITNESS(flags))
708			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
709			    LOP_EXCLUSIVE, file, line, ilk);
710
711		/*
712		 * If curthread already holds the lock and this one is
713		 * allowed to recurse, simply recurse on it.
714		 */
715		if (lockmgr_xlocked(lk)) {
716			if ((flags & LK_CANRECURSE) == 0 &&
717			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
718
719				/*
720				 * If the lock is expected to not panic just
721				 * give up and return.
722				 */
723				if (LK_TRYOP(flags)) {
724					LOCK_LOG2(lk,
725					    "%s: %p fails the try operation",
726					    __func__, lk);
727					error = EBUSY;
728					break;
729				}
730				if (flags & LK_INTERLOCK)
731					class->lc_unlock(ilk);
732		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
733				    __func__, iwmesg, file, line);
734			}
735			lk->lk_recurse++;
736			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
737			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
738			    lk->lk_recurse, file, line);
739			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
740			    LK_TRYWIT(flags), file, line);
741			TD_LOCKS_INC(curthread);
742			break;
743		}
744
745		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
746		    tid)) {
747			lock_profile_obtain_lock_failed(&lk->lock_object,
748			    &contested, &waittime);
749
750			/*
751			 * If the lock is expected to not sleep just give up
752			 * and return.
753			 */
754			if (LK_TRYOP(flags)) {
755				LOCK_LOG2(lk, "%s: %p fails the try operation",
756				    __func__, lk);
757				error = EBUSY;
758				break;
759			}
760
761#ifdef ADAPTIVE_LOCKMGRS
762			/*
763			 * If the owner is running on another CPU, spin until
764			 * the owner stops running or the state of the lock
765			 * changes.
766			 */
767			x = lk->lk_lock;
768			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
769			    LK_HOLDER(x) != LK_KERNPROC) {
770				owner = (struct thread *)LK_HOLDER(x);
771				if (LOCK_LOG_TEST(&lk->lock_object, 0))
772					CTR3(KTR_LOCK,
773					    "%s: spinning on %p held by %p",
774					    __func__, lk, owner);
775
776				/*
777				 * If we are holding also an interlock drop it
778				 * in order to avoid a deadlock if the lockmgr
779				 * owner is adaptively spinning on the
780				 * interlock itself.
781				 */
782				if (flags & LK_INTERLOCK) {
783					class->lc_unlock(ilk);
784					flags &= ~LK_INTERLOCK;
785				}
786				GIANT_SAVE();
787				while (LK_HOLDER(lk->lk_lock) ==
788				    (uintptr_t)owner && TD_IS_RUNNING(owner))
789					cpu_spinwait();
790				GIANT_RESTORE();
791				continue;
792			} else if (LK_CAN_ADAPT(lk, flags) &&
793			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
794			    spintries < alk_retries) {
795				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
796				    !atomic_cmpset_ptr(&lk->lk_lock, x,
797				    x | LK_EXCLUSIVE_SPINNERS))
798					continue;
799				if (flags & LK_INTERLOCK) {
800					class->lc_unlock(ilk);
801					flags &= ~LK_INTERLOCK;
802				}
803				GIANT_SAVE();
804				spintries++;
805				for (i = 0; i < alk_loops; i++) {
806					if (LOCK_LOG_TEST(&lk->lock_object, 0))
807						CTR4(KTR_LOCK,
808				    "%s: shared spinning on %p with %u and %u",
809						    __func__, lk, spintries, i);
810					if ((lk->lk_lock &
811					    LK_EXCLUSIVE_SPINNERS) == 0)
812						break;
813					cpu_spinwait();
814				}
815				GIANT_RESTORE();
816				if (i != alk_loops)
817					continue;
818			}
819#endif
820
821			/*
822			 * Acquire the sleepqueue chain lock because we
823			 * probabilly will need to manipulate waiters flags.
824			 */
825			sleepq_lock(&lk->lock_object);
826			x = lk->lk_lock;
827
828			/*
829			 * if the lock has been released while we spun on
830			 * the sleepqueue chain lock just try again.
831			 */
832			if (x == LK_UNLOCKED) {
833				sleepq_release(&lk->lock_object);
834				continue;
835			}
836
837#ifdef ADAPTIVE_LOCKMGRS
838			/*
839			 * The current lock owner might have started executing
840			 * on another CPU (or the lock could have changed
841			 * owner) while we were waiting on the turnstile
842			 * chain lock.  If so, drop the turnstile lock and try
843			 * again.
844			 */
845			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
846			    LK_HOLDER(x) != LK_KERNPROC) {
847				owner = (struct thread *)LK_HOLDER(x);
848				if (TD_IS_RUNNING(owner)) {
849					sleepq_release(&lk->lock_object);
850					continue;
851				}
852			}
853#endif
854
855			/*
856			 * The lock can be in the state where there is a
857			 * pending queue of waiters, but still no owner.
858			 * This happens when the lock is contested and an
859			 * owner is going to claim the lock.
860			 * If curthread is the one successfully acquiring it
861			 * claim lock ownership and return, preserving waiters
862			 * flags.
863			 */
864			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
865			if ((x & ~v) == LK_UNLOCKED) {
866				v &= ~LK_EXCLUSIVE_SPINNERS;
867				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
868				    tid | v)) {
869					sleepq_release(&lk->lock_object);
870					LOCK_LOG2(lk,
871					    "%s: %p claimed by a new writer",
872					    __func__, lk);
873					break;
874				}
875				sleepq_release(&lk->lock_object);
876				continue;
877			}
878
879			/*
880			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
881			 * fail, loop back and retry.
882			 */
883			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
884				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
885				    x | LK_EXCLUSIVE_WAITERS)) {
886					sleepq_release(&lk->lock_object);
887					continue;
888				}
889				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
890				    __func__, lk);
891			}
892
893			/*
894			 * As far as we have been unable to acquire the
895			 * exclusive lock and the exclusive waiters flag
896			 * is set, we will sleep.
897			 */
898			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
899			    SQ_EXCLUSIVE_QUEUE);
900			flags &= ~LK_INTERLOCK;
901			if (error) {
902				LOCK_LOG3(lk,
903				    "%s: interrupted sleep for %p with %d",
904				    __func__, lk, error);
905				break;
906			}
907			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
908			    __func__, lk);
909		}
910		if (error == 0) {
911			lock_profile_obtain_lock_success(&lk->lock_object,
912			    contested, waittime, file, line);
913			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
914			    lk->lk_recurse, file, line);
915			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
916			    LK_TRYWIT(flags), file, line);
917			TD_LOCKS_INC(curthread);
918			STACK_SAVE(lk);
919		}
920		break;
921	case LK_DOWNGRADE:
922		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
923		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
924		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
925		TD_SLOCKS_INC(curthread);
926
927		/*
928		 * In order to preserve waiters flags, just spin.
929		 */
930		for (;;) {
931			x = lk->lk_lock;
932			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
933			x &= LK_ALL_WAITERS;
934			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
935			    LK_SHARERS_LOCK(1) | x))
936				break;
937			cpu_spinwait();
938		}
939		break;
940	case LK_RELEASE:
941		_lockmgr_assert(lk, KA_LOCKED, file, line);
942		x = lk->lk_lock;
943
944		if ((x & LK_SHARE) == 0) {
945
946			/*
947			 * As first option, treact the lock as if it has not
948			 * any waiter.
949			 * Fix-up the tid var if the lock has been disowned.
950			 */
951			if (LK_HOLDER(x) == LK_KERNPROC)
952				tid = LK_KERNPROC;
953			else {
954				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
955				    file, line);
956				TD_LOCKS_DEC(curthread);
957			}
958			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
959			    lk->lk_recurse, file, line);
960
961			/*
962			 * The lock is held in exclusive mode.
963			 * If the lock is recursed also, then unrecurse it.
964			 */
965			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
966				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
967				    lk);
968				lk->lk_recurse--;
969				break;
970			}
971			if (tid != LK_KERNPROC)
972				lock_profile_release_lock(&lk->lock_object);
973
974			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
975			    LK_UNLOCKED))
976				break;
977
978			sleepq_lock(&lk->lock_object);
979			x = lk->lk_lock;
980			v = LK_UNLOCKED;
981
982			/*
983		 	 * If the lock has exclusive waiters, give them
984			 * preference in order to avoid deadlock with
985			 * shared runners up.
986			 * If interruptible sleeps left the exclusive queue
987			 * empty avoid a starvation for the threads sleeping
988			 * on the shared queue by giving them precedence
989			 * and cleaning up the exclusive waiters bit anyway.
990			 * Please note that lk_exslpfail count may be lying
991			 * about the real number of waiters with the
992			 * LK_SLEEPFAIL flag on because they may be used in
993			 * conjuction with interruptible sleeps so
994			 * lk_exslpfail might be considered an 'upper limit'
995			 * bound, including the edge cases.
996			 */
997			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
998			realexslp = sleepq_sleepcnt(&lk->lock_object,
999			    SQ_EXCLUSIVE_QUEUE);
1000			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1001				if (lk->lk_exslpfail < realexslp) {
1002					lk->lk_exslpfail = 0;
1003					queue = SQ_EXCLUSIVE_QUEUE;
1004					v |= (x & LK_SHARED_WAITERS);
1005				} else {
1006					lk->lk_exslpfail = 0;
1007					LOCK_LOG2(lk,
1008					"%s: %p has only LK_SLEEPFAIL sleepers",
1009					    __func__, lk);
1010					LOCK_LOG2(lk,
1011			"%s: %p waking up threads on the exclusive queue",
1012					    __func__, lk);
1013					wakeup_swapper =
1014					    sleepq_broadcast(&lk->lock_object,
1015					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1016					queue = SQ_SHARED_QUEUE;
1017				}
1018			} else {
1019
1020				/*
1021				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1022				 * on and using interruptible sleeps/timeout
1023				 * may have left spourious lk_exslpfail counts
1024				 * on, so clean it up anyway.
1025				 */
1026				lk->lk_exslpfail = 0;
1027				queue = SQ_SHARED_QUEUE;
1028			}
1029
1030			LOCK_LOG3(lk,
1031			    "%s: %p waking up threads on the %s queue",
1032			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1033			    "exclusive");
1034			atomic_store_rel_ptr(&lk->lk_lock, v);
1035			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1036			    SLEEPQ_LK, 0, queue);
1037			sleepq_release(&lk->lock_object);
1038			break;
1039		} else
1040			wakeup_swapper = wakeupshlk(lk, file, line);
1041		break;
1042	case LK_DRAIN:
1043		if (LK_CAN_WITNESS(flags))
1044			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1045			    LOP_EXCLUSIVE, file, line, ilk);
1046
1047		/*
1048		 * Trying to drain a lock we already own will result in a
1049		 * deadlock.
1050		 */
1051		if (lockmgr_xlocked(lk)) {
1052			if (flags & LK_INTERLOCK)
1053				class->lc_unlock(ilk);
1054			panic("%s: draining %s with the lock held @ %s:%d\n",
1055			    __func__, iwmesg, file, line);
1056		}
1057
1058		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1059			lock_profile_obtain_lock_failed(&lk->lock_object,
1060			    &contested, &waittime);
1061
1062			/*
1063			 * If the lock is expected to not sleep just give up
1064			 * and return.
1065			 */
1066			if (LK_TRYOP(flags)) {
1067				LOCK_LOG2(lk, "%s: %p fails the try operation",
1068				    __func__, lk);
1069				error = EBUSY;
1070				break;
1071			}
1072
1073			/*
1074			 * Acquire the sleepqueue chain lock because we
1075			 * probabilly will need to manipulate waiters flags.
1076			 */
1077			sleepq_lock(&lk->lock_object);
1078			x = lk->lk_lock;
1079
1080			/*
1081			 * if the lock has been released while we spun on
1082			 * the sleepqueue chain lock just try again.
1083			 */
1084			if (x == LK_UNLOCKED) {
1085				sleepq_release(&lk->lock_object);
1086				continue;
1087			}
1088
1089			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1090			if ((x & ~v) == LK_UNLOCKED) {
1091				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1092
1093				/*
1094				 * If interruptible sleeps left the exclusive
1095				 * queue empty avoid a starvation for the
1096				 * threads sleeping on the shared queue by
1097				 * giving them precedence and cleaning up the
1098				 * exclusive waiters bit anyway.
1099				 * Please note that lk_exslpfail count may be
1100				 * lying about the real number of waiters with
1101				 * the LK_SLEEPFAIL flag on because they may
1102				 * be used in conjuction with interruptible
1103				 * sleeps so lk_exslpfail might be considered
1104				 * an 'upper limit' bound, including the edge
1105				 * cases.
1106				 */
1107				if (v & LK_EXCLUSIVE_WAITERS) {
1108					queue = SQ_EXCLUSIVE_QUEUE;
1109					v &= ~LK_EXCLUSIVE_WAITERS;
1110				} else {
1111
1112					/*
1113					 * Exclusive waiters sleeping with
1114					 * LK_SLEEPFAIL on and using
1115					 * interruptible sleeps/timeout may
1116					 * have left spourious lk_exslpfail
1117					 * counts on, so clean it up anyway.
1118					 */
1119					MPASS(v & LK_SHARED_WAITERS);
1120					lk->lk_exslpfail = 0;
1121					queue = SQ_SHARED_QUEUE;
1122					v &= ~LK_SHARED_WAITERS;
1123				}
1124				if (queue == SQ_EXCLUSIVE_QUEUE) {
1125					realexslp =
1126					    sleepq_sleepcnt(&lk->lock_object,
1127					    SQ_EXCLUSIVE_QUEUE);
1128					if (lk->lk_exslpfail >= realexslp) {
1129						lk->lk_exslpfail = 0;
1130						queue = SQ_SHARED_QUEUE;
1131						v &= ~LK_SHARED_WAITERS;
1132						if (realexslp != 0) {
1133							LOCK_LOG2(lk,
1134					"%s: %p has only LK_SLEEPFAIL sleepers",
1135							    __func__, lk);
1136							LOCK_LOG2(lk,
1137			"%s: %p waking up threads on the exclusive queue",
1138							    __func__, lk);
1139							wakeup_swapper =
1140							    sleepq_broadcast(
1141							    &lk->lock_object,
1142							    SLEEPQ_LK, 0,
1143							    SQ_EXCLUSIVE_QUEUE);
1144						}
1145					} else
1146						lk->lk_exslpfail = 0;
1147				}
1148				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1149					sleepq_release(&lk->lock_object);
1150					continue;
1151				}
1152				LOCK_LOG3(lk,
1153				"%s: %p waking up all threads on the %s queue",
1154				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1155				    "shared" : "exclusive");
1156				wakeup_swapper |= sleepq_broadcast(
1157				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1158
1159				/*
1160				 * If shared waiters have been woken up we need
1161				 * to wait for one of them to acquire the lock
1162				 * before to set the exclusive waiters in
1163				 * order to avoid a deadlock.
1164				 */
1165				if (queue == SQ_SHARED_QUEUE) {
1166					for (v = lk->lk_lock;
1167					    (v & LK_SHARE) && !LK_SHARERS(v);
1168					    v = lk->lk_lock)
1169						cpu_spinwait();
1170				}
1171			}
1172
1173			/*
1174			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1175			 * fail, loop back and retry.
1176			 */
1177			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1178				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1179				    x | LK_EXCLUSIVE_WAITERS)) {
1180					sleepq_release(&lk->lock_object);
1181					continue;
1182				}
1183				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1184				    __func__, lk);
1185			}
1186
1187			/*
1188			 * As far as we have been unable to acquire the
1189			 * exclusive lock and the exclusive waiters flag
1190			 * is set, we will sleep.
1191			 */
1192			if (flags & LK_INTERLOCK) {
1193				class->lc_unlock(ilk);
1194				flags &= ~LK_INTERLOCK;
1195			}
1196			GIANT_SAVE();
1197			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1198			    SQ_EXCLUSIVE_QUEUE);
1199			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1200			GIANT_RESTORE();
1201			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1202			    __func__, lk);
1203		}
1204
1205		if (error == 0) {
1206			lock_profile_obtain_lock_success(&lk->lock_object,
1207			    contested, waittime, file, line);
1208			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1209			    lk->lk_recurse, file, line);
1210			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1211			    LK_TRYWIT(flags), file, line);
1212			TD_LOCKS_INC(curthread);
1213			STACK_SAVE(lk);
1214		}
1215		break;
1216	default:
1217		if (flags & LK_INTERLOCK)
1218			class->lc_unlock(ilk);
1219		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1220	}
1221
1222	if (flags & LK_INTERLOCK)
1223		class->lc_unlock(ilk);
1224	if (wakeup_swapper)
1225		kick_proc0();
1226
1227	return (error);
1228}
1229
1230void
1231_lockmgr_disown(struct lock *lk, const char *file, int line)
1232{
1233	uintptr_t tid, x;
1234
1235	if (SCHEDULER_STOPPED())
1236		return;
1237
1238	tid = (uintptr_t)curthread;
1239	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1240
1241	/*
1242	 * If the owner is already LK_KERNPROC just skip the whole operation.
1243	 */
1244	if (LK_HOLDER(lk->lk_lock) != tid)
1245		return;
1246	lock_profile_release_lock(&lk->lock_object);
1247	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1248	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1249	TD_LOCKS_DEC(curthread);
1250	STACK_SAVE(lk);
1251
1252	/*
1253	 * In order to preserve waiters flags, just spin.
1254	 */
1255	for (;;) {
1256		x = lk->lk_lock;
1257		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1258		x &= LK_ALL_WAITERS;
1259		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1260		    LK_KERNPROC | x))
1261			return;
1262		cpu_spinwait();
1263	}
1264}
1265
1266void
1267lockmgr_printinfo(const struct lock *lk)
1268{
1269	struct thread *td;
1270	uintptr_t x;
1271
1272	if (lk->lk_lock == LK_UNLOCKED)
1273		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1274	else if (lk->lk_lock & LK_SHARE)
1275		printf("lock type %s: SHARED (count %ju)\n",
1276		    lk->lock_object.lo_name,
1277		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1278	else {
1279		td = lockmgr_xholder(lk);
1280		printf("lock type %s: EXCL by thread %p "
1281		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1282		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1283	}
1284
1285	x = lk->lk_lock;
1286	if (x & LK_EXCLUSIVE_WAITERS)
1287		printf(" with exclusive waiters pending\n");
1288	if (x & LK_SHARED_WAITERS)
1289		printf(" with shared waiters pending\n");
1290	if (x & LK_EXCLUSIVE_SPINNERS)
1291		printf(" with exclusive spinners pending\n");
1292
1293	STACK_PRINT(lk);
1294}
1295
1296int
1297lockstatus(const struct lock *lk)
1298{
1299	uintptr_t v, x;
1300	int ret;
1301
1302	ret = LK_SHARED;
1303	x = lk->lk_lock;
1304	v = LK_HOLDER(x);
1305
1306	if ((x & LK_SHARE) == 0) {
1307		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1308			ret = LK_EXCLUSIVE;
1309		else
1310			ret = LK_EXCLOTHER;
1311	} else if (x == LK_UNLOCKED)
1312		ret = 0;
1313
1314	return (ret);
1315}
1316
1317#ifdef INVARIANT_SUPPORT
1318
1319FEATURE(invariant_support,
1320    "Support for modules compiled with INVARIANTS option");
1321
1322#ifndef INVARIANTS
1323#undef	_lockmgr_assert
1324#endif
1325
1326void
1327_lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1328{
1329	int slocked = 0;
1330
1331	if (panicstr != NULL)
1332		return;
1333	switch (what) {
1334	case KA_SLOCKED:
1335	case KA_SLOCKED | KA_NOTRECURSED:
1336	case KA_SLOCKED | KA_RECURSED:
1337		slocked = 1;
1338	case KA_LOCKED:
1339	case KA_LOCKED | KA_NOTRECURSED:
1340	case KA_LOCKED | KA_RECURSED:
1341#ifdef WITNESS
1342
1343		/*
1344		 * We cannot trust WITNESS if the lock is held in exclusive
1345		 * mode and a call to lockmgr_disown() happened.
1346		 * Workaround this skipping the check if the lock is held in
1347		 * exclusive mode even for the KA_LOCKED case.
1348		 */
1349		if (slocked || (lk->lk_lock & LK_SHARE)) {
1350			witness_assert(&lk->lock_object, what, file, line);
1351			break;
1352		}
1353#endif
1354		if (lk->lk_lock == LK_UNLOCKED ||
1355		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1356		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1357			panic("Lock %s not %slocked @ %s:%d\n",
1358			    lk->lock_object.lo_name, slocked ? "share" : "",
1359			    file, line);
1360
1361		if ((lk->lk_lock & LK_SHARE) == 0) {
1362			if (lockmgr_recursed(lk)) {
1363				if (what & KA_NOTRECURSED)
1364					panic("Lock %s recursed @ %s:%d\n",
1365					    lk->lock_object.lo_name, file,
1366					    line);
1367			} else if (what & KA_RECURSED)
1368				panic("Lock %s not recursed @ %s:%d\n",
1369				    lk->lock_object.lo_name, file, line);
1370		}
1371		break;
1372	case KA_XLOCKED:
1373	case KA_XLOCKED | KA_NOTRECURSED:
1374	case KA_XLOCKED | KA_RECURSED:
1375		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1376			panic("Lock %s not exclusively locked @ %s:%d\n",
1377			    lk->lock_object.lo_name, file, line);
1378		if (lockmgr_recursed(lk)) {
1379			if (what & KA_NOTRECURSED)
1380				panic("Lock %s recursed @ %s:%d\n",
1381				    lk->lock_object.lo_name, file, line);
1382		} else if (what & KA_RECURSED)
1383			panic("Lock %s not recursed @ %s:%d\n",
1384			    lk->lock_object.lo_name, file, line);
1385		break;
1386	case KA_UNLOCKED:
1387		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1388			panic("Lock %s exclusively locked @ %s:%d\n",
1389			    lk->lock_object.lo_name, file, line);
1390		break;
1391	default:
1392		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1393		    line);
1394	}
1395}
1396#endif
1397
1398#ifdef DDB
1399int
1400lockmgr_chain(struct thread *td, struct thread **ownerp)
1401{
1402	struct lock *lk;
1403
1404	lk = td->td_wchan;
1405
1406	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1407		return (0);
1408	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1409	if (lk->lk_lock & LK_SHARE)
1410		db_printf("SHARED (count %ju)\n",
1411		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1412	else
1413		db_printf("EXCL\n");
1414	*ownerp = lockmgr_xholder(lk);
1415
1416	return (1);
1417}
1418
1419static void
1420db_show_lockmgr(const struct lock_object *lock)
1421{
1422	struct thread *td;
1423	const struct lock *lk;
1424
1425	lk = (const struct lock *)lock;
1426
1427	db_printf(" state: ");
1428	if (lk->lk_lock == LK_UNLOCKED)
1429		db_printf("UNLOCKED\n");
1430	else if (lk->lk_lock & LK_SHARE)
1431		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1432	else {
1433		td = lockmgr_xholder(lk);
1434		if (td == (struct thread *)LK_KERNPROC)
1435			db_printf("XLOCK: LK_KERNPROC\n");
1436		else
1437			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1438			    td->td_tid, td->td_proc->p_pid,
1439			    td->td_proc->p_comm);
1440		if (lockmgr_recursed(lk))
1441			db_printf(" recursed: %d\n", lk->lk_recurse);
1442	}
1443	db_printf(" waiters: ");
1444	switch (lk->lk_lock & LK_ALL_WAITERS) {
1445	case LK_SHARED_WAITERS:
1446		db_printf("shared\n");
1447		break;
1448	case LK_EXCLUSIVE_WAITERS:
1449		db_printf("exclusive\n");
1450		break;
1451	case LK_ALL_WAITERS:
1452		db_printf("shared and exclusive\n");
1453		break;
1454	default:
1455		db_printf("none\n");
1456	}
1457	db_printf(" spinners: ");
1458	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1459		db_printf("exclusive\n");
1460	else
1461		db_printf("none\n");
1462}
1463#endif
1464