kern_lock.c revision 233628
1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_adaptive_lockmgrs.h"
30#include "opt_ddb.h"
31#include "opt_hwpmc_hooks.h"
32#include "opt_kdtrace.h"
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 233628 2012-03-28 20:58:30Z fabient $");
36
37#include <sys/param.h>
38#include <sys/ktr.h>
39#include <sys/lock.h>
40#include <sys/lock_profile.h>
41#include <sys/lockmgr.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/sleepqueue.h>
45#ifdef DEBUG_LOCKS
46#include <sys/stack.h>
47#endif
48#include <sys/sysctl.h>
49#include <sys/systm.h>
50
51#include <machine/cpu.h>
52
53#ifdef DDB
54#include <ddb/ddb.h>
55#endif
56
57#ifdef HWPMC_HOOKS
58#include <sys/pmckern.h>
59PMC_SOFT_DECLARE( , , lock, failed);
60#endif
61
62CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
63    (LK_ADAPTIVE | LK_NOSHARE));
64CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65    ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66
67#define	SQ_EXCLUSIVE_QUEUE	0
68#define	SQ_SHARED_QUEUE		1
69
70#ifndef INVARIANTS
71#define	_lockmgr_assert(lk, what, file, line)
72#define	TD_LOCKS_INC(td)
73#define	TD_LOCKS_DEC(td)
74#else
75#define	TD_LOCKS_INC(td)	((td)->td_locks++)
76#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
77#endif
78#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
79#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
80
81#ifndef DEBUG_LOCKS
82#define	STACK_PRINT(lk)
83#define	STACK_SAVE(lk)
84#define	STACK_ZERO(lk)
85#else
86#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
87#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
88#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
89#endif
90
91#define	LOCK_LOG2(lk, string, arg1, arg2)				\
92	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
93		CTR2(KTR_LOCK, (string), (arg1), (arg2))
94#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
95	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
96		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97
98#define	GIANT_DECLARE							\
99	int _i = 0;							\
100	WITNESS_SAVE_DECL(Giant)
101#define	GIANT_RESTORE() do {						\
102	if (_i > 0) {							\
103		while (_i--)						\
104			mtx_lock(&Giant);				\
105		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
106	}								\
107} while (0)
108#define	GIANT_SAVE() do {						\
109	if (mtx_owned(&Giant)) {					\
110		WITNESS_SAVE(&Giant.lock_object, Giant);		\
111		while (mtx_owned(&Giant)) {				\
112			_i++;						\
113			mtx_unlock(&Giant);				\
114		}							\
115	}								\
116} while (0)
117
118#define	LK_CAN_SHARE(x)							\
119	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
120	((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||				\
121	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
122#define	LK_TRYOP(x)							\
123	((x) & LK_NOWAIT)
124
125#define	LK_CAN_WITNESS(x)						\
126	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
127#define	LK_TRYWIT(x)							\
128	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
129
130#define	LK_CAN_ADAPT(lk, f)						\
131	(((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&		\
132	((f) & LK_SLEEPFAIL) == 0)
133
134#define	lockmgr_disowned(lk)						\
135	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
136
137#define	lockmgr_xlocked(lk)						\
138	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
139
140static void	assert_lockmgr(const struct lock_object *lock, int how);
141#ifdef DDB
142static void	db_show_lockmgr(const struct lock_object *lock);
143#endif
144static void	lock_lockmgr(struct lock_object *lock, int how);
145#ifdef KDTRACE_HOOKS
146static int	owner_lockmgr(const struct lock_object *lock,
147		    struct thread **owner);
148#endif
149static int	unlock_lockmgr(struct lock_object *lock);
150
151struct lock_class lock_class_lockmgr = {
152	.lc_name = "lockmgr",
153	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
154	.lc_assert = assert_lockmgr,
155#ifdef DDB
156	.lc_ddb_show = db_show_lockmgr,
157#endif
158	.lc_lock = lock_lockmgr,
159	.lc_unlock = unlock_lockmgr,
160#ifdef KDTRACE_HOOKS
161	.lc_owner = owner_lockmgr,
162#endif
163};
164
165#ifdef ADAPTIVE_LOCKMGRS
166static u_int alk_retries = 10;
167static u_int alk_loops = 10000;
168static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
169    "lockmgr debugging");
170SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
171SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
172#endif
173
174static __inline struct thread *
175lockmgr_xholder(const struct lock *lk)
176{
177	uintptr_t x;
178
179	x = lk->lk_lock;
180	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
181}
182
183/*
184 * It assumes sleepq_lock held and returns with this one unheld.
185 * It also assumes the generic interlock is sane and previously checked.
186 * If LK_INTERLOCK is specified the interlock is not reacquired after the
187 * sleep.
188 */
189static __inline int
190sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
191    const char *wmesg, int pri, int timo, int queue)
192{
193	GIANT_DECLARE;
194	struct lock_class *class;
195	int catch, error;
196
197	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
198	catch = pri & PCATCH;
199	pri &= PRIMASK;
200	error = 0;
201
202	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
203	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
204
205	if (flags & LK_INTERLOCK)
206		class->lc_unlock(ilk);
207	if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
208		lk->lk_exslpfail++;
209	GIANT_SAVE();
210	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
211	    SLEEPQ_INTERRUPTIBLE : 0), queue);
212	if ((flags & LK_TIMELOCK) && timo)
213		sleepq_set_timeout(&lk->lock_object, timo);
214
215	/*
216	 * Decisional switch for real sleeping.
217	 */
218	if ((flags & LK_TIMELOCK) && timo && catch)
219		error = sleepq_timedwait_sig(&lk->lock_object, pri);
220	else if ((flags & LK_TIMELOCK) && timo)
221		error = sleepq_timedwait(&lk->lock_object, pri);
222	else if (catch)
223		error = sleepq_wait_sig(&lk->lock_object, pri);
224	else
225		sleepq_wait(&lk->lock_object, pri);
226	GIANT_RESTORE();
227	if ((flags & LK_SLEEPFAIL) && error == 0)
228		error = ENOLCK;
229
230	return (error);
231}
232
233static __inline int
234wakeupshlk(struct lock *lk, const char *file, int line)
235{
236	uintptr_t v, x;
237	u_int realexslp;
238	int queue, wakeup_swapper;
239
240	TD_LOCKS_DEC(curthread);
241	TD_SLOCKS_DEC(curthread);
242	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
243	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
244
245	wakeup_swapper = 0;
246	for (;;) {
247		x = lk->lk_lock;
248
249		/*
250		 * If there is more than one shared lock held, just drop one
251		 * and return.
252		 */
253		if (LK_SHARERS(x) > 1) {
254			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
255			    x - LK_ONE_SHARER))
256				break;
257			continue;
258		}
259
260		/*
261		 * If there are not waiters on the exclusive queue, drop the
262		 * lock quickly.
263		 */
264		if ((x & LK_ALL_WAITERS) == 0) {
265			MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
266			    LK_SHARERS_LOCK(1));
267			if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
268				break;
269			continue;
270		}
271
272		/*
273		 * We should have a sharer with waiters, so enter the hard
274		 * path in order to handle wakeups correctly.
275		 */
276		sleepq_lock(&lk->lock_object);
277		x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
278		v = LK_UNLOCKED;
279
280		/*
281		 * If the lock has exclusive waiters, give them preference in
282		 * order to avoid deadlock with shared runners up.
283		 * If interruptible sleeps left the exclusive queue empty
284		 * avoid a starvation for the threads sleeping on the shared
285		 * queue by giving them precedence and cleaning up the
286		 * exclusive waiters bit anyway.
287		 * Please note that lk_exslpfail count may be lying about
288		 * the real number of waiters with the LK_SLEEPFAIL flag on
289		 * because they may be used in conjuction with interruptible
290		 * sleeps so lk_exslpfail might be considered an 'upper limit'
291		 * bound, including the edge cases.
292		 */
293		realexslp = sleepq_sleepcnt(&lk->lock_object,
294		    SQ_EXCLUSIVE_QUEUE);
295		if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
296			if (lk->lk_exslpfail < realexslp) {
297				lk->lk_exslpfail = 0;
298				queue = SQ_EXCLUSIVE_QUEUE;
299				v |= (x & LK_SHARED_WAITERS);
300			} else {
301				lk->lk_exslpfail = 0;
302				LOCK_LOG2(lk,
303				    "%s: %p has only LK_SLEEPFAIL sleepers",
304				    __func__, lk);
305				LOCK_LOG2(lk,
306			    "%s: %p waking up threads on the exclusive queue",
307				    __func__, lk);
308				wakeup_swapper =
309				    sleepq_broadcast(&lk->lock_object,
310				    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
311				queue = SQ_SHARED_QUEUE;
312			}
313
314		} else {
315
316			/*
317			 * Exclusive waiters sleeping with LK_SLEEPFAIL on
318			 * and using interruptible sleeps/timeout may have
319			 * left spourious lk_exslpfail counts on, so clean
320			 * it up anyway.
321			 */
322			lk->lk_exslpfail = 0;
323			queue = SQ_SHARED_QUEUE;
324		}
325
326		if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
327		    v)) {
328			sleepq_release(&lk->lock_object);
329			continue;
330		}
331		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
332		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
333		    "exclusive");
334		wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
335		    0, queue);
336		sleepq_release(&lk->lock_object);
337		break;
338	}
339
340	lock_profile_release_lock(&lk->lock_object);
341	return (wakeup_swapper);
342}
343
344static void
345assert_lockmgr(const struct lock_object *lock, int what)
346{
347
348	panic("lockmgr locks do not support assertions");
349}
350
351static void
352lock_lockmgr(struct lock_object *lock, int how)
353{
354
355	panic("lockmgr locks do not support sleep interlocking");
356}
357
358static int
359unlock_lockmgr(struct lock_object *lock)
360{
361
362	panic("lockmgr locks do not support sleep interlocking");
363}
364
365#ifdef KDTRACE_HOOKS
366static int
367owner_lockmgr(const struct lock_object *lock, struct thread **owner)
368{
369
370	panic("lockmgr locks do not support owner inquiring");
371}
372#endif
373
374void
375lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
376{
377	int iflags;
378
379	MPASS((flags & ~LK_INIT_MASK) == 0);
380	ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
381            ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
382            &lk->lk_lock));
383
384	iflags = LO_SLEEPABLE | LO_UPGRADABLE;
385	if (flags & LK_CANRECURSE)
386		iflags |= LO_RECURSABLE;
387	if ((flags & LK_NODUP) == 0)
388		iflags |= LO_DUPOK;
389	if (flags & LK_NOPROFILE)
390		iflags |= LO_NOPROFILE;
391	if ((flags & LK_NOWITNESS) == 0)
392		iflags |= LO_WITNESS;
393	if (flags & LK_QUIET)
394		iflags |= LO_QUIET;
395	iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
396
397	lk->lk_lock = LK_UNLOCKED;
398	lk->lk_recurse = 0;
399	lk->lk_exslpfail = 0;
400	lk->lk_timo = timo;
401	lk->lk_pri = pri;
402	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
403	STACK_ZERO(lk);
404}
405
406/*
407 * XXX: Gross hacks to manipulate external lock flags after
408 * initialization.  Used for certain vnode and buf locks.
409 */
410void
411lockallowshare(struct lock *lk)
412{
413
414	lockmgr_assert(lk, KA_XLOCKED);
415	lk->lock_object.lo_flags &= ~LK_NOSHARE;
416}
417
418void
419lockallowrecurse(struct lock *lk)
420{
421
422	lockmgr_assert(lk, KA_XLOCKED);
423	lk->lock_object.lo_flags |= LO_RECURSABLE;
424}
425
426void
427lockdisablerecurse(struct lock *lk)
428{
429
430	lockmgr_assert(lk, KA_XLOCKED);
431	lk->lock_object.lo_flags &= ~LO_RECURSABLE;
432}
433
434void
435lockdestroy(struct lock *lk)
436{
437
438	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
439	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
440	KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
441	lock_destroy(&lk->lock_object);
442}
443
444int
445__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
446    const char *wmesg, int pri, int timo, const char *file, int line)
447{
448	GIANT_DECLARE;
449	struct lock_class *class;
450	const char *iwmesg;
451	uintptr_t tid, v, x;
452	u_int op, realexslp;
453	int error, ipri, itimo, queue, wakeup_swapper;
454#ifdef LOCK_PROFILING
455	uint64_t waittime = 0;
456	int contested = 0;
457#endif
458#ifdef ADAPTIVE_LOCKMGRS
459	volatile struct thread *owner;
460	u_int i, spintries = 0;
461#endif
462
463	error = 0;
464	tid = (uintptr_t)curthread;
465	op = (flags & LK_TYPE_MASK);
466	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
467	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
468	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
469
470	MPASS((flags & ~LK_TOTAL_MASK) == 0);
471	KASSERT((op & (op - 1)) == 0,
472	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
473	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
474	    (op != LK_DOWNGRADE && op != LK_RELEASE),
475	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
476	    __func__, file, line));
477	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
478	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
479	    __func__, file, line));
480
481	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
482	if (panicstr != NULL) {
483		if (flags & LK_INTERLOCK)
484			class->lc_unlock(ilk);
485		return (0);
486	}
487
488	if (lk->lock_object.lo_flags & LK_NOSHARE) {
489		switch (op) {
490		case LK_SHARED:
491			op = LK_EXCLUSIVE;
492			break;
493		case LK_UPGRADE:
494		case LK_DOWNGRADE:
495			_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
496			    file, line);
497			return (0);
498		}
499	}
500
501	wakeup_swapper = 0;
502	switch (op) {
503	case LK_SHARED:
504		if (LK_CAN_WITNESS(flags))
505			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
506			    file, line, ilk);
507		for (;;) {
508			x = lk->lk_lock;
509
510			/*
511			 * If no other thread has an exclusive lock, or
512			 * no exclusive waiter is present, bump the count of
513			 * sharers.  Since we have to preserve the state of
514			 * waiters, if we fail to acquire the shared lock
515			 * loop back and retry.
516			 */
517			if (LK_CAN_SHARE(x)) {
518				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
519				    x + LK_ONE_SHARER))
520					break;
521				continue;
522			}
523#ifdef HWPMC_HOOKS
524			PMC_SOFT_CALL( , , lock, failed);
525#endif
526			lock_profile_obtain_lock_failed(&lk->lock_object,
527			    &contested, &waittime);
528
529			/*
530			 * If the lock is already held by curthread in
531			 * exclusive way avoid a deadlock.
532			 */
533			if (LK_HOLDER(x) == tid) {
534				LOCK_LOG2(lk,
535				    "%s: %p already held in exclusive mode",
536				    __func__, lk);
537				error = EDEADLK;
538				break;
539			}
540
541			/*
542			 * If the lock is expected to not sleep just give up
543			 * and return.
544			 */
545			if (LK_TRYOP(flags)) {
546				LOCK_LOG2(lk, "%s: %p fails the try operation",
547				    __func__, lk);
548				error = EBUSY;
549				break;
550			}
551
552#ifdef ADAPTIVE_LOCKMGRS
553			/*
554			 * If the owner is running on another CPU, spin until
555			 * the owner stops running or the state of the lock
556			 * changes.  We need a double-state handle here
557			 * because for a failed acquisition the lock can be
558			 * either held in exclusive mode or shared mode
559			 * (for the writer starvation avoidance technique).
560			 */
561			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
562			    LK_HOLDER(x) != LK_KERNPROC) {
563				owner = (struct thread *)LK_HOLDER(x);
564				if (LOCK_LOG_TEST(&lk->lock_object, 0))
565					CTR3(KTR_LOCK,
566					    "%s: spinning on %p held by %p",
567					    __func__, lk, owner);
568
569				/*
570				 * If we are holding also an interlock drop it
571				 * in order to avoid a deadlock if the lockmgr
572				 * owner is adaptively spinning on the
573				 * interlock itself.
574				 */
575				if (flags & LK_INTERLOCK) {
576					class->lc_unlock(ilk);
577					flags &= ~LK_INTERLOCK;
578				}
579				GIANT_SAVE();
580				while (LK_HOLDER(lk->lk_lock) ==
581				    (uintptr_t)owner && TD_IS_RUNNING(owner))
582					cpu_spinwait();
583				GIANT_RESTORE();
584				continue;
585			} else if (LK_CAN_ADAPT(lk, flags) &&
586			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
587			    spintries < alk_retries) {
588				if (flags & LK_INTERLOCK) {
589					class->lc_unlock(ilk);
590					flags &= ~LK_INTERLOCK;
591				}
592				GIANT_SAVE();
593				spintries++;
594				for (i = 0; i < alk_loops; i++) {
595					if (LOCK_LOG_TEST(&lk->lock_object, 0))
596						CTR4(KTR_LOCK,
597				    "%s: shared spinning on %p with %u and %u",
598						    __func__, lk, spintries, i);
599					x = lk->lk_lock;
600					if ((x & LK_SHARE) == 0 ||
601					    LK_CAN_SHARE(x) != 0)
602						break;
603					cpu_spinwait();
604				}
605				GIANT_RESTORE();
606				if (i != alk_loops)
607					continue;
608			}
609#endif
610
611			/*
612			 * Acquire the sleepqueue chain lock because we
613			 * probabilly will need to manipulate waiters flags.
614			 */
615			sleepq_lock(&lk->lock_object);
616			x = lk->lk_lock;
617
618			/*
619			 * if the lock can be acquired in shared mode, try
620			 * again.
621			 */
622			if (LK_CAN_SHARE(x)) {
623				sleepq_release(&lk->lock_object);
624				continue;
625			}
626
627#ifdef ADAPTIVE_LOCKMGRS
628			/*
629			 * The current lock owner might have started executing
630			 * on another CPU (or the lock could have changed
631			 * owner) while we were waiting on the turnstile
632			 * chain lock.  If so, drop the turnstile lock and try
633			 * again.
634			 */
635			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
636			    LK_HOLDER(x) != LK_KERNPROC) {
637				owner = (struct thread *)LK_HOLDER(x);
638				if (TD_IS_RUNNING(owner)) {
639					sleepq_release(&lk->lock_object);
640					continue;
641				}
642			}
643#endif
644
645			/*
646			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
647			 * loop back and retry.
648			 */
649			if ((x & LK_SHARED_WAITERS) == 0) {
650				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
651				    x | LK_SHARED_WAITERS)) {
652					sleepq_release(&lk->lock_object);
653					continue;
654				}
655				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
656				    __func__, lk);
657			}
658
659			/*
660			 * As far as we have been unable to acquire the
661			 * shared lock and the shared waiters flag is set,
662			 * we will sleep.
663			 */
664			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
665			    SQ_SHARED_QUEUE);
666			flags &= ~LK_INTERLOCK;
667			if (error) {
668				LOCK_LOG3(lk,
669				    "%s: interrupted sleep for %p with %d",
670				    __func__, lk, error);
671				break;
672			}
673			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
674			    __func__, lk);
675		}
676		if (error == 0) {
677			lock_profile_obtain_lock_success(&lk->lock_object,
678			    contested, waittime, file, line);
679			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
680			    line);
681			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
682			    line);
683			TD_LOCKS_INC(curthread);
684			TD_SLOCKS_INC(curthread);
685			STACK_SAVE(lk);
686		}
687		break;
688	case LK_UPGRADE:
689		_lockmgr_assert(lk, KA_SLOCKED, file, line);
690		v = lk->lk_lock;
691		x = v & LK_ALL_WAITERS;
692		v &= LK_EXCLUSIVE_SPINNERS;
693
694		/*
695		 * Try to switch from one shared lock to an exclusive one.
696		 * We need to preserve waiters flags during the operation.
697		 */
698		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
699		    tid | x)) {
700			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
701			    line);
702			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
703			    LK_TRYWIT(flags), file, line);
704			TD_SLOCKS_DEC(curthread);
705			break;
706		}
707
708		/*
709		 * We have been unable to succeed in upgrading, so just
710		 * give up the shared lock.
711		 */
712		wakeup_swapper |= wakeupshlk(lk, file, line);
713
714		/* FALLTHROUGH */
715	case LK_EXCLUSIVE:
716		if (LK_CAN_WITNESS(flags))
717			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
718			    LOP_EXCLUSIVE, file, line, ilk);
719
720		/*
721		 * If curthread already holds the lock and this one is
722		 * allowed to recurse, simply recurse on it.
723		 */
724		if (lockmgr_xlocked(lk)) {
725			if ((flags & LK_CANRECURSE) == 0 &&
726			    (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
727
728				/*
729				 * If the lock is expected to not panic just
730				 * give up and return.
731				 */
732				if (LK_TRYOP(flags)) {
733					LOCK_LOG2(lk,
734					    "%s: %p fails the try operation",
735					    __func__, lk);
736					error = EBUSY;
737					break;
738				}
739				if (flags & LK_INTERLOCK)
740					class->lc_unlock(ilk);
741		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
742				    __func__, iwmesg, file, line);
743			}
744			lk->lk_recurse++;
745			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
746			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
747			    lk->lk_recurse, file, line);
748			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
749			    LK_TRYWIT(flags), file, line);
750			TD_LOCKS_INC(curthread);
751			break;
752		}
753
754		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
755		    tid)) {
756#ifdef HWPMC_HOOKS
757			PMC_SOFT_CALL( , , lock, failed);
758#endif
759			lock_profile_obtain_lock_failed(&lk->lock_object,
760			    &contested, &waittime);
761
762			/*
763			 * If the lock is expected to not sleep just give up
764			 * and return.
765			 */
766			if (LK_TRYOP(flags)) {
767				LOCK_LOG2(lk, "%s: %p fails the try operation",
768				    __func__, lk);
769				error = EBUSY;
770				break;
771			}
772
773#ifdef ADAPTIVE_LOCKMGRS
774			/*
775			 * If the owner is running on another CPU, spin until
776			 * the owner stops running or the state of the lock
777			 * changes.
778			 */
779			x = lk->lk_lock;
780			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
781			    LK_HOLDER(x) != LK_KERNPROC) {
782				owner = (struct thread *)LK_HOLDER(x);
783				if (LOCK_LOG_TEST(&lk->lock_object, 0))
784					CTR3(KTR_LOCK,
785					    "%s: spinning on %p held by %p",
786					    __func__, lk, owner);
787
788				/*
789				 * If we are holding also an interlock drop it
790				 * in order to avoid a deadlock if the lockmgr
791				 * owner is adaptively spinning on the
792				 * interlock itself.
793				 */
794				if (flags & LK_INTERLOCK) {
795					class->lc_unlock(ilk);
796					flags &= ~LK_INTERLOCK;
797				}
798				GIANT_SAVE();
799				while (LK_HOLDER(lk->lk_lock) ==
800				    (uintptr_t)owner && TD_IS_RUNNING(owner))
801					cpu_spinwait();
802				GIANT_RESTORE();
803				continue;
804			} else if (LK_CAN_ADAPT(lk, flags) &&
805			    (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
806			    spintries < alk_retries) {
807				if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
808				    !atomic_cmpset_ptr(&lk->lk_lock, x,
809				    x | LK_EXCLUSIVE_SPINNERS))
810					continue;
811				if (flags & LK_INTERLOCK) {
812					class->lc_unlock(ilk);
813					flags &= ~LK_INTERLOCK;
814				}
815				GIANT_SAVE();
816				spintries++;
817				for (i = 0; i < alk_loops; i++) {
818					if (LOCK_LOG_TEST(&lk->lock_object, 0))
819						CTR4(KTR_LOCK,
820				    "%s: shared spinning on %p with %u and %u",
821						    __func__, lk, spintries, i);
822					if ((lk->lk_lock &
823					    LK_EXCLUSIVE_SPINNERS) == 0)
824						break;
825					cpu_spinwait();
826				}
827				GIANT_RESTORE();
828				if (i != alk_loops)
829					continue;
830			}
831#endif
832
833			/*
834			 * Acquire the sleepqueue chain lock because we
835			 * probabilly will need to manipulate waiters flags.
836			 */
837			sleepq_lock(&lk->lock_object);
838			x = lk->lk_lock;
839
840			/*
841			 * if the lock has been released while we spun on
842			 * the sleepqueue chain lock just try again.
843			 */
844			if (x == LK_UNLOCKED) {
845				sleepq_release(&lk->lock_object);
846				continue;
847			}
848
849#ifdef ADAPTIVE_LOCKMGRS
850			/*
851			 * The current lock owner might have started executing
852			 * on another CPU (or the lock could have changed
853			 * owner) while we were waiting on the turnstile
854			 * chain lock.  If so, drop the turnstile lock and try
855			 * again.
856			 */
857			if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
858			    LK_HOLDER(x) != LK_KERNPROC) {
859				owner = (struct thread *)LK_HOLDER(x);
860				if (TD_IS_RUNNING(owner)) {
861					sleepq_release(&lk->lock_object);
862					continue;
863				}
864			}
865#endif
866
867			/*
868			 * The lock can be in the state where there is a
869			 * pending queue of waiters, but still no owner.
870			 * This happens when the lock is contested and an
871			 * owner is going to claim the lock.
872			 * If curthread is the one successfully acquiring it
873			 * claim lock ownership and return, preserving waiters
874			 * flags.
875			 */
876			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
877			if ((x & ~v) == LK_UNLOCKED) {
878				v &= ~LK_EXCLUSIVE_SPINNERS;
879				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
880				    tid | v)) {
881					sleepq_release(&lk->lock_object);
882					LOCK_LOG2(lk,
883					    "%s: %p claimed by a new writer",
884					    __func__, lk);
885					break;
886				}
887				sleepq_release(&lk->lock_object);
888				continue;
889			}
890
891			/*
892			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
893			 * fail, loop back and retry.
894			 */
895			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
896				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
897				    x | LK_EXCLUSIVE_WAITERS)) {
898					sleepq_release(&lk->lock_object);
899					continue;
900				}
901				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
902				    __func__, lk);
903			}
904
905			/*
906			 * As far as we have been unable to acquire the
907			 * exclusive lock and the exclusive waiters flag
908			 * is set, we will sleep.
909			 */
910			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
911			    SQ_EXCLUSIVE_QUEUE);
912			flags &= ~LK_INTERLOCK;
913			if (error) {
914				LOCK_LOG3(lk,
915				    "%s: interrupted sleep for %p with %d",
916				    __func__, lk, error);
917				break;
918			}
919			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
920			    __func__, lk);
921		}
922		if (error == 0) {
923			lock_profile_obtain_lock_success(&lk->lock_object,
924			    contested, waittime, file, line);
925			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
926			    lk->lk_recurse, file, line);
927			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
928			    LK_TRYWIT(flags), file, line);
929			TD_LOCKS_INC(curthread);
930			STACK_SAVE(lk);
931		}
932		break;
933	case LK_DOWNGRADE:
934		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
935		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
936		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
937		TD_SLOCKS_INC(curthread);
938
939		/*
940		 * In order to preserve waiters flags, just spin.
941		 */
942		for (;;) {
943			x = lk->lk_lock;
944			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
945			x &= LK_ALL_WAITERS;
946			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
947			    LK_SHARERS_LOCK(1) | x))
948				break;
949			cpu_spinwait();
950		}
951		break;
952	case LK_RELEASE:
953		_lockmgr_assert(lk, KA_LOCKED, file, line);
954		x = lk->lk_lock;
955
956		if ((x & LK_SHARE) == 0) {
957
958			/*
959			 * As first option, treact the lock as if it has not
960			 * any waiter.
961			 * Fix-up the tid var if the lock has been disowned.
962			 */
963			if (LK_HOLDER(x) == LK_KERNPROC)
964				tid = LK_KERNPROC;
965			else {
966				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
967				    file, line);
968				TD_LOCKS_DEC(curthread);
969			}
970			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
971			    lk->lk_recurse, file, line);
972
973			/*
974			 * The lock is held in exclusive mode.
975			 * If the lock is recursed also, then unrecurse it.
976			 */
977			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
978				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
979				    lk);
980				lk->lk_recurse--;
981				break;
982			}
983			if (tid != LK_KERNPROC)
984				lock_profile_release_lock(&lk->lock_object);
985
986			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
987			    LK_UNLOCKED))
988				break;
989
990			sleepq_lock(&lk->lock_object);
991			x = lk->lk_lock;
992			v = LK_UNLOCKED;
993
994			/*
995		 	 * If the lock has exclusive waiters, give them
996			 * preference in order to avoid deadlock with
997			 * shared runners up.
998			 * If interruptible sleeps left the exclusive queue
999			 * empty avoid a starvation for the threads sleeping
1000			 * on the shared queue by giving them precedence
1001			 * and cleaning up the exclusive waiters bit anyway.
1002			 * Please note that lk_exslpfail count may be lying
1003			 * about the real number of waiters with the
1004			 * LK_SLEEPFAIL flag on because they may be used in
1005			 * conjuction with interruptible sleeps so
1006			 * lk_exslpfail might be considered an 'upper limit'
1007			 * bound, including the edge cases.
1008			 */
1009			MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1010			realexslp = sleepq_sleepcnt(&lk->lock_object,
1011			    SQ_EXCLUSIVE_QUEUE);
1012			if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1013				if (lk->lk_exslpfail < realexslp) {
1014					lk->lk_exslpfail = 0;
1015					queue = SQ_EXCLUSIVE_QUEUE;
1016					v |= (x & LK_SHARED_WAITERS);
1017				} else {
1018					lk->lk_exslpfail = 0;
1019					LOCK_LOG2(lk,
1020					"%s: %p has only LK_SLEEPFAIL sleepers",
1021					    __func__, lk);
1022					LOCK_LOG2(lk,
1023			"%s: %p waking up threads on the exclusive queue",
1024					    __func__, lk);
1025					wakeup_swapper =
1026					    sleepq_broadcast(&lk->lock_object,
1027					    SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1028					queue = SQ_SHARED_QUEUE;
1029				}
1030			} else {
1031
1032				/*
1033				 * Exclusive waiters sleeping with LK_SLEEPFAIL
1034				 * on and using interruptible sleeps/timeout
1035				 * may have left spourious lk_exslpfail counts
1036				 * on, so clean it up anyway.
1037				 */
1038				lk->lk_exslpfail = 0;
1039				queue = SQ_SHARED_QUEUE;
1040			}
1041
1042			LOCK_LOG3(lk,
1043			    "%s: %p waking up threads on the %s queue",
1044			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1045			    "exclusive");
1046			atomic_store_rel_ptr(&lk->lk_lock, v);
1047			wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1048			    SLEEPQ_LK, 0, queue);
1049			sleepq_release(&lk->lock_object);
1050			break;
1051		} else
1052			wakeup_swapper = wakeupshlk(lk, file, line);
1053		break;
1054	case LK_DRAIN:
1055		if (LK_CAN_WITNESS(flags))
1056			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1057			    LOP_EXCLUSIVE, file, line, ilk);
1058
1059		/*
1060		 * Trying to drain a lock we already own will result in a
1061		 * deadlock.
1062		 */
1063		if (lockmgr_xlocked(lk)) {
1064			if (flags & LK_INTERLOCK)
1065				class->lc_unlock(ilk);
1066			panic("%s: draining %s with the lock held @ %s:%d\n",
1067			    __func__, iwmesg, file, line);
1068		}
1069
1070		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1071#ifdef HWPMC_HOOKS
1072			PMC_SOFT_CALL( , , lock, failed);
1073#endif
1074			lock_profile_obtain_lock_failed(&lk->lock_object,
1075			    &contested, &waittime);
1076
1077			/*
1078			 * If the lock is expected to not sleep just give up
1079			 * and return.
1080			 */
1081			if (LK_TRYOP(flags)) {
1082				LOCK_LOG2(lk, "%s: %p fails the try operation",
1083				    __func__, lk);
1084				error = EBUSY;
1085				break;
1086			}
1087
1088			/*
1089			 * Acquire the sleepqueue chain lock because we
1090			 * probabilly will need to manipulate waiters flags.
1091			 */
1092			sleepq_lock(&lk->lock_object);
1093			x = lk->lk_lock;
1094
1095			/*
1096			 * if the lock has been released while we spun on
1097			 * the sleepqueue chain lock just try again.
1098			 */
1099			if (x == LK_UNLOCKED) {
1100				sleepq_release(&lk->lock_object);
1101				continue;
1102			}
1103
1104			v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1105			if ((x & ~v) == LK_UNLOCKED) {
1106				v = (x & ~LK_EXCLUSIVE_SPINNERS);
1107
1108				/*
1109				 * If interruptible sleeps left the exclusive
1110				 * queue empty avoid a starvation for the
1111				 * threads sleeping on the shared queue by
1112				 * giving them precedence and cleaning up the
1113				 * exclusive waiters bit anyway.
1114				 * Please note that lk_exslpfail count may be
1115				 * lying about the real number of waiters with
1116				 * the LK_SLEEPFAIL flag on because they may
1117				 * be used in conjuction with interruptible
1118				 * sleeps so lk_exslpfail might be considered
1119				 * an 'upper limit' bound, including the edge
1120				 * cases.
1121				 */
1122				if (v & LK_EXCLUSIVE_WAITERS) {
1123					queue = SQ_EXCLUSIVE_QUEUE;
1124					v &= ~LK_EXCLUSIVE_WAITERS;
1125				} else {
1126
1127					/*
1128					 * Exclusive waiters sleeping with
1129					 * LK_SLEEPFAIL on and using
1130					 * interruptible sleeps/timeout may
1131					 * have left spourious lk_exslpfail
1132					 * counts on, so clean it up anyway.
1133					 */
1134					MPASS(v & LK_SHARED_WAITERS);
1135					lk->lk_exslpfail = 0;
1136					queue = SQ_SHARED_QUEUE;
1137					v &= ~LK_SHARED_WAITERS;
1138				}
1139				if (queue == SQ_EXCLUSIVE_QUEUE) {
1140					realexslp =
1141					    sleepq_sleepcnt(&lk->lock_object,
1142					    SQ_EXCLUSIVE_QUEUE);
1143					if (lk->lk_exslpfail >= realexslp) {
1144						lk->lk_exslpfail = 0;
1145						queue = SQ_SHARED_QUEUE;
1146						v &= ~LK_SHARED_WAITERS;
1147						if (realexslp != 0) {
1148							LOCK_LOG2(lk,
1149					"%s: %p has only LK_SLEEPFAIL sleepers",
1150							    __func__, lk);
1151							LOCK_LOG2(lk,
1152			"%s: %p waking up threads on the exclusive queue",
1153							    __func__, lk);
1154							wakeup_swapper =
1155							    sleepq_broadcast(
1156							    &lk->lock_object,
1157							    SLEEPQ_LK, 0,
1158							    SQ_EXCLUSIVE_QUEUE);
1159						}
1160					} else
1161						lk->lk_exslpfail = 0;
1162				}
1163				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1164					sleepq_release(&lk->lock_object);
1165					continue;
1166				}
1167				LOCK_LOG3(lk,
1168				"%s: %p waking up all threads on the %s queue",
1169				    __func__, lk, queue == SQ_SHARED_QUEUE ?
1170				    "shared" : "exclusive");
1171				wakeup_swapper |= sleepq_broadcast(
1172				    &lk->lock_object, SLEEPQ_LK, 0, queue);
1173
1174				/*
1175				 * If shared waiters have been woken up we need
1176				 * to wait for one of them to acquire the lock
1177				 * before to set the exclusive waiters in
1178				 * order to avoid a deadlock.
1179				 */
1180				if (queue == SQ_SHARED_QUEUE) {
1181					for (v = lk->lk_lock;
1182					    (v & LK_SHARE) && !LK_SHARERS(v);
1183					    v = lk->lk_lock)
1184						cpu_spinwait();
1185				}
1186			}
1187
1188			/*
1189			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1190			 * fail, loop back and retry.
1191			 */
1192			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1193				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1194				    x | LK_EXCLUSIVE_WAITERS)) {
1195					sleepq_release(&lk->lock_object);
1196					continue;
1197				}
1198				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1199				    __func__, lk);
1200			}
1201
1202			/*
1203			 * As far as we have been unable to acquire the
1204			 * exclusive lock and the exclusive waiters flag
1205			 * is set, we will sleep.
1206			 */
1207			if (flags & LK_INTERLOCK) {
1208				class->lc_unlock(ilk);
1209				flags &= ~LK_INTERLOCK;
1210			}
1211			GIANT_SAVE();
1212			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1213			    SQ_EXCLUSIVE_QUEUE);
1214			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1215			GIANT_RESTORE();
1216			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1217			    __func__, lk);
1218		}
1219
1220		if (error == 0) {
1221			lock_profile_obtain_lock_success(&lk->lock_object,
1222			    contested, waittime, file, line);
1223			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1224			    lk->lk_recurse, file, line);
1225			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1226			    LK_TRYWIT(flags), file, line);
1227			TD_LOCKS_INC(curthread);
1228			STACK_SAVE(lk);
1229		}
1230		break;
1231	default:
1232		if (flags & LK_INTERLOCK)
1233			class->lc_unlock(ilk);
1234		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1235	}
1236
1237	if (flags & LK_INTERLOCK)
1238		class->lc_unlock(ilk);
1239	if (wakeup_swapper)
1240		kick_proc0();
1241
1242	return (error);
1243}
1244
1245void
1246_lockmgr_disown(struct lock *lk, const char *file, int line)
1247{
1248	uintptr_t tid, x;
1249
1250	if (SCHEDULER_STOPPED())
1251		return;
1252
1253	tid = (uintptr_t)curthread;
1254	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1255
1256	/*
1257	 * If the owner is already LK_KERNPROC just skip the whole operation.
1258	 */
1259	if (LK_HOLDER(lk->lk_lock) != tid)
1260		return;
1261	lock_profile_release_lock(&lk->lock_object);
1262	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1263	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1264	TD_LOCKS_DEC(curthread);
1265	STACK_SAVE(lk);
1266
1267	/*
1268	 * In order to preserve waiters flags, just spin.
1269	 */
1270	for (;;) {
1271		x = lk->lk_lock;
1272		MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1273		x &= LK_ALL_WAITERS;
1274		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1275		    LK_KERNPROC | x))
1276			return;
1277		cpu_spinwait();
1278	}
1279}
1280
1281void
1282lockmgr_printinfo(const struct lock *lk)
1283{
1284	struct thread *td;
1285	uintptr_t x;
1286
1287	if (lk->lk_lock == LK_UNLOCKED)
1288		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1289	else if (lk->lk_lock & LK_SHARE)
1290		printf("lock type %s: SHARED (count %ju)\n",
1291		    lk->lock_object.lo_name,
1292		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1293	else {
1294		td = lockmgr_xholder(lk);
1295		printf("lock type %s: EXCL by thread %p "
1296		    "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1297		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1298	}
1299
1300	x = lk->lk_lock;
1301	if (x & LK_EXCLUSIVE_WAITERS)
1302		printf(" with exclusive waiters pending\n");
1303	if (x & LK_SHARED_WAITERS)
1304		printf(" with shared waiters pending\n");
1305	if (x & LK_EXCLUSIVE_SPINNERS)
1306		printf(" with exclusive spinners pending\n");
1307
1308	STACK_PRINT(lk);
1309}
1310
1311int
1312lockstatus(const struct lock *lk)
1313{
1314	uintptr_t v, x;
1315	int ret;
1316
1317	ret = LK_SHARED;
1318	x = lk->lk_lock;
1319	v = LK_HOLDER(x);
1320
1321	if ((x & LK_SHARE) == 0) {
1322		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1323			ret = LK_EXCLUSIVE;
1324		else
1325			ret = LK_EXCLOTHER;
1326	} else if (x == LK_UNLOCKED)
1327		ret = 0;
1328
1329	return (ret);
1330}
1331
1332#ifdef INVARIANT_SUPPORT
1333
1334FEATURE(invariant_support,
1335    "Support for modules compiled with INVARIANTS option");
1336
1337#ifndef INVARIANTS
1338#undef	_lockmgr_assert
1339#endif
1340
1341void
1342_lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1343{
1344	int slocked = 0;
1345
1346	if (panicstr != NULL)
1347		return;
1348	switch (what) {
1349	case KA_SLOCKED:
1350	case KA_SLOCKED | KA_NOTRECURSED:
1351	case KA_SLOCKED | KA_RECURSED:
1352		slocked = 1;
1353	case KA_LOCKED:
1354	case KA_LOCKED | KA_NOTRECURSED:
1355	case KA_LOCKED | KA_RECURSED:
1356#ifdef WITNESS
1357
1358		/*
1359		 * We cannot trust WITNESS if the lock is held in exclusive
1360		 * mode and a call to lockmgr_disown() happened.
1361		 * Workaround this skipping the check if the lock is held in
1362		 * exclusive mode even for the KA_LOCKED case.
1363		 */
1364		if (slocked || (lk->lk_lock & LK_SHARE)) {
1365			witness_assert(&lk->lock_object, what, file, line);
1366			break;
1367		}
1368#endif
1369		if (lk->lk_lock == LK_UNLOCKED ||
1370		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1371		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1372			panic("Lock %s not %slocked @ %s:%d\n",
1373			    lk->lock_object.lo_name, slocked ? "share" : "",
1374			    file, line);
1375
1376		if ((lk->lk_lock & LK_SHARE) == 0) {
1377			if (lockmgr_recursed(lk)) {
1378				if (what & KA_NOTRECURSED)
1379					panic("Lock %s recursed @ %s:%d\n",
1380					    lk->lock_object.lo_name, file,
1381					    line);
1382			} else if (what & KA_RECURSED)
1383				panic("Lock %s not recursed @ %s:%d\n",
1384				    lk->lock_object.lo_name, file, line);
1385		}
1386		break;
1387	case KA_XLOCKED:
1388	case KA_XLOCKED | KA_NOTRECURSED:
1389	case KA_XLOCKED | KA_RECURSED:
1390		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1391			panic("Lock %s not exclusively locked @ %s:%d\n",
1392			    lk->lock_object.lo_name, file, line);
1393		if (lockmgr_recursed(lk)) {
1394			if (what & KA_NOTRECURSED)
1395				panic("Lock %s recursed @ %s:%d\n",
1396				    lk->lock_object.lo_name, file, line);
1397		} else if (what & KA_RECURSED)
1398			panic("Lock %s not recursed @ %s:%d\n",
1399			    lk->lock_object.lo_name, file, line);
1400		break;
1401	case KA_UNLOCKED:
1402		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1403			panic("Lock %s exclusively locked @ %s:%d\n",
1404			    lk->lock_object.lo_name, file, line);
1405		break;
1406	default:
1407		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1408		    line);
1409	}
1410}
1411#endif
1412
1413#ifdef DDB
1414int
1415lockmgr_chain(struct thread *td, struct thread **ownerp)
1416{
1417	struct lock *lk;
1418
1419	lk = td->td_wchan;
1420
1421	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1422		return (0);
1423	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1424	if (lk->lk_lock & LK_SHARE)
1425		db_printf("SHARED (count %ju)\n",
1426		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1427	else
1428		db_printf("EXCL\n");
1429	*ownerp = lockmgr_xholder(lk);
1430
1431	return (1);
1432}
1433
1434static void
1435db_show_lockmgr(const struct lock_object *lock)
1436{
1437	struct thread *td;
1438	const struct lock *lk;
1439
1440	lk = (const struct lock *)lock;
1441
1442	db_printf(" state: ");
1443	if (lk->lk_lock == LK_UNLOCKED)
1444		db_printf("UNLOCKED\n");
1445	else if (lk->lk_lock & LK_SHARE)
1446		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1447	else {
1448		td = lockmgr_xholder(lk);
1449		if (td == (struct thread *)LK_KERNPROC)
1450			db_printf("XLOCK: LK_KERNPROC\n");
1451		else
1452			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1453			    td->td_tid, td->td_proc->p_pid,
1454			    td->td_proc->p_comm);
1455		if (lockmgr_recursed(lk))
1456			db_printf(" recursed: %d\n", lk->lk_recurse);
1457	}
1458	db_printf(" waiters: ");
1459	switch (lk->lk_lock & LK_ALL_WAITERS) {
1460	case LK_SHARED_WAITERS:
1461		db_printf("shared\n");
1462		break;
1463	case LK_EXCLUSIVE_WAITERS:
1464		db_printf("exclusive\n");
1465		break;
1466	case LK_ALL_WAITERS:
1467		db_printf("shared and exclusive\n");
1468		break;
1469	default:
1470		db_printf("none\n");
1471	}
1472	db_printf(" spinners: ");
1473	if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1474		db_printf("exclusive\n");
1475	else
1476		db_printf("none\n");
1477}
1478#endif
1479