kern_lock.c revision 182010
117651Speter/*-
2131377Stjr * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
317651Speter * All rights reserved.
4206002Sdelphij *
5206002Sdelphij * Redistribution and use in source and binary forms, with or without
6206002Sdelphij * modification, are permitted provided that the following conditions
7206002Sdelphij * are met:
8206002Sdelphij * 1. Redistributions of source code must retain the above copyright
9206002Sdelphij *    notice(s), this list of conditions and the following disclaimer as
10206002Sdelphij *    the first lines of this file unmodified other than the possible
11206002Sdelphij *    addition of one or more copyright notices.
12206002Sdelphij * 2. Redistributions in binary form must reproduce the above copyright
13206002Sdelphij *    notice(s), this list of conditions and the following disclaimer in the
14206002Sdelphij *    documentation and/or other materials provided with the distribution.
15206002Sdelphij *
16206002Sdelphij * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17206002Sdelphij * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18206002Sdelphij * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19206002Sdelphij * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20206002Sdelphij * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21206002Sdelphij * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22206002Sdelphij * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23206002Sdelphij * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24206002Sdelphij * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25206002Sdelphij * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26206002Sdelphij * DAMAGE.
27206002Sdelphij */
28205471Sdelphij
29205471Sdelphij#include "opt_ddb.h"
30205471Sdelphij
31205471Sdelphij#include <sys/cdefs.h>
32205471Sdelphij__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 182010 2008-08-22 16:14:23Z jhb $");
33205471Sdelphij
34205471Sdelphij#include <sys/param.h>
35205471Sdelphij#include <sys/ktr.h>
36205471Sdelphij#include <sys/lock.h>
37205471Sdelphij#include <sys/lock_profile.h>
38205471Sdelphij#include <sys/lockmgr.h>
39205471Sdelphij#include <sys/mutex.h>
40205471Sdelphij#include <sys/proc.h>
41205471Sdelphij#include <sys/sleepqueue.h>
42205471Sdelphij#ifdef DEBUG_LOCKS
43205471Sdelphij#include <sys/stack.h>
44205471Sdelphij#endif
45205471Sdelphij#include <sys/systm.h>
46205471Sdelphij
47205471Sdelphij#include <machine/cpu.h>
48205471Sdelphij
49205471Sdelphij#ifdef DDB
50205471Sdelphij#include <ddb/ddb.h>
51205471Sdelphij#endif
52205471Sdelphij
53205471SdelphijCTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
54205471Sdelphij    (LK_CANRECURSE | LK_NOSHARE));
55205471Sdelphij
56205471Sdelphij#define	SQ_EXCLUSIVE_QUEUE	0
57205471Sdelphij#define	SQ_SHARED_QUEUE		1
58205471Sdelphij
59205471Sdelphij#ifndef INVARIANTS
60205471Sdelphij#define	_lockmgr_assert(lk, what, file, line)
61205471Sdelphij#define	TD_LOCKS_INC(td)
62205471Sdelphij#define	TD_LOCKS_DEC(td)
63205471Sdelphij#else
64205471Sdelphij#define	TD_LOCKS_INC(td)	((td)->td_locks++)
65205471Sdelphij#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
66205471Sdelphij#endif
67205471Sdelphij#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
68205471Sdelphij#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
69205471Sdelphij
70205471Sdelphij#ifndef DEBUG_LOCKS
71205471Sdelphij#define	STACK_PRINT(lk)
72205471Sdelphij#define	STACK_SAVE(lk)
73205471Sdelphij#define	STACK_ZERO(lk)
74205471Sdelphij#else
75205471Sdelphij#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
76205471Sdelphij#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
77205471Sdelphij#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
78205471Sdelphij#endif
79205471Sdelphij
80205471Sdelphij#define	LOCK_LOG2(lk, string, arg1, arg2)				\
81205471Sdelphij	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
82205471Sdelphij		CTR2(KTR_LOCK, (string), (arg1), (arg2))
83205471Sdelphij#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
84205471Sdelphij	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
85205471Sdelphij		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
86205471Sdelphij
87205471Sdelphij#define	GIANT_DECLARE							\
88205471Sdelphij	int _i = 0;							\
89205471Sdelphij	WITNESS_SAVE_DECL(Giant)
90205471Sdelphij#define	GIANT_RESTORE() do {						\
91205471Sdelphij	if (_i > 0) {							\
92205471Sdelphij		while (_i--)						\
93205471Sdelphij			mtx_lock(&Giant);				\
94205471Sdelphij		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
95205471Sdelphij	}								\
96205471Sdelphij} while (0)
97205471Sdelphij#define	GIANT_SAVE() do {						\
98205471Sdelphij	if (mtx_owned(&Giant)) {					\
99205471Sdelphij		WITNESS_SAVE(&Giant.lock_object, Giant);		\
100205471Sdelphij		while (mtx_owned(&Giant)) {				\
101205471Sdelphij			_i++;						\
102205471Sdelphij			mtx_unlock(&Giant);				\
103205471Sdelphij		}							\
104205471Sdelphij	}								\
105205471Sdelphij} while (0)
106205471Sdelphij
107205471Sdelphij#define	LK_CAN_SHARE(x)							\
108205471Sdelphij	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
109205471Sdelphij	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
110205471Sdelphij#define	LK_TRYOP(x)							\
111205471Sdelphij	((x) & LK_NOWAIT)
112205471Sdelphij
113205471Sdelphij#define	LK_CAN_WITNESS(x)						\
114205471Sdelphij	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
115205471Sdelphij#define	LK_TRYWIT(x)							\
116205471Sdelphij	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
117205471Sdelphij
118205471Sdelphij#define	lockmgr_disowned(lk)						\
119205471Sdelphij	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
120205471Sdelphij
121205471Sdelphij#define	lockmgr_xlocked(lk)						\
122205471Sdelphij	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
123205471Sdelphij
124205471Sdelphijstatic void	 assert_lockmgr(struct lock_object *lock, int how);
125205471Sdelphij#ifdef DDB
126205471Sdelphijstatic void	 db_show_lockmgr(struct lock_object *lock);
127205471Sdelphij#endif
128205471Sdelphijstatic void	 lock_lockmgr(struct lock_object *lock, int how);
129205471Sdelphijstatic int	 unlock_lockmgr(struct lock_object *lock);
130205471Sdelphij
131205471Sdelphijstruct lock_class lock_class_lockmgr = {
132205471Sdelphij	.lc_name = "lockmgr",
133205471Sdelphij	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
134205471Sdelphij	.lc_assert = assert_lockmgr,
135205471Sdelphij#ifdef DDB
136205471Sdelphij	.lc_ddb_show = db_show_lockmgr,
137205471Sdelphij#endif
138205471Sdelphij	.lc_lock = lock_lockmgr,
139205471Sdelphij	.lc_unlock = unlock_lockmgr
140205471Sdelphij};
141205471Sdelphij
142205471Sdelphijstatic __inline struct thread *
143205471Sdelphijlockmgr_xholder(struct lock *lk)
144205471Sdelphij{
145205471Sdelphij	uintptr_t x;
146205471Sdelphij
147205471Sdelphij	x = lk->lk_lock;
148205471Sdelphij	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
149205471Sdelphij}
150205471Sdelphij
151205471Sdelphij/*
152205471Sdelphij * It assumes sleepq_lock held and returns with this one unheld.
153205471Sdelphij * It also assumes the generic interlock is sane and previously checked.
154205471Sdelphij * If LK_INTERLOCK is specified the interlock is not reacquired after the
155205471Sdelphij * sleep.
156205471Sdelphij */
157205471Sdelphijstatic __inline int
158205471Sdelphijsleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
159205471Sdelphij    const char *wmesg, int pri, int timo, int queue)
160205471Sdelphij{
161205471Sdelphij	GIANT_DECLARE;
162205471Sdelphij	struct lock_class *class;
163205471Sdelphij	int catch, error;
164205471Sdelphij
165205471Sdelphij	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
166205471Sdelphij	catch = pri & PCATCH;
167205471Sdelphij	pri &= PRIMASK;
168205471Sdelphij	error = 0;
169205471Sdelphij
170205471Sdelphij	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
171205471Sdelphij	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
172205471Sdelphij
173205471Sdelphij	if (flags & LK_INTERLOCK)
174205471Sdelphij		class->lc_unlock(ilk);
175205471Sdelphij	GIANT_SAVE();
176205471Sdelphij	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
177205471Sdelphij	    SLEEPQ_INTERRUPTIBLE : 0), queue);
178205471Sdelphij	if ((flags & LK_TIMELOCK) && timo)
179205471Sdelphij		sleepq_set_timeout(&lk->lock_object, timo);
180205471Sdelphij
181205471Sdelphij	/*
182205471Sdelphij	 * Decisional switch for real sleeping.
183205471Sdelphij	 */
184205471Sdelphij	if ((flags & LK_TIMELOCK) && timo && catch)
185205471Sdelphij		error = sleepq_timedwait_sig(&lk->lock_object, pri);
186205471Sdelphij	else if ((flags & LK_TIMELOCK) && timo)
187205471Sdelphij		error = sleepq_timedwait(&lk->lock_object, pri);
188205471Sdelphij	else if (catch)
189205471Sdelphij		error = sleepq_wait_sig(&lk->lock_object, pri);
190205471Sdelphij	else
191205471Sdelphij		sleepq_wait(&lk->lock_object, pri);
192205471Sdelphij	GIANT_RESTORE();
193205471Sdelphij	if ((flags & LK_SLEEPFAIL) && error == 0)
194205471Sdelphij		error = ENOLCK;
195205471Sdelphij
196205471Sdelphij	return (error);
197205471Sdelphij}
198205471Sdelphij
199205471Sdelphijstatic __inline int
200205471Sdelphijwakeupshlk(struct lock *lk, const char *file, int line)
201205471Sdelphij{
202205471Sdelphij	uintptr_t v, x;
203205471Sdelphij	int queue, wakeup_swapper;
204205471Sdelphij
205205471Sdelphij	TD_LOCKS_DEC(curthread);
206205471Sdelphij	TD_SLOCKS_DEC(curthread);
207205471Sdelphij	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
208205471Sdelphij	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
209205471Sdelphij
210205471Sdelphij	wakeup_swapper = 0;
211205471Sdelphij	for (;;) {
212205471Sdelphij		x = lk->lk_lock;
213205471Sdelphij
214205471Sdelphij		/*
215205471Sdelphij		 * If there is more than one shared lock held, just drop one
216205471Sdelphij		 * and return.
217205471Sdelphij		 */
218205471Sdelphij		if (LK_SHARERS(x) > 1) {
219205471Sdelphij			if (atomic_cmpset_ptr(&lk->lk_lock, x,
220205471Sdelphij			    x - LK_ONE_SHARER))
221205471Sdelphij				break;
222205471Sdelphij			continue;
223205471Sdelphij		}
224205471Sdelphij
225205471Sdelphij		/*
226205471Sdelphij		 * If there are not waiters on the exclusive queue, drop the
227205471Sdelphij		 * lock quickly.
228205471Sdelphij		 */
229205471Sdelphij		if ((x & LK_ALL_WAITERS) == 0) {
230205471Sdelphij			MPASS(x == LK_SHARERS_LOCK(1));
231205471Sdelphij			if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
232205471Sdelphij			    LK_UNLOCKED))
233205471Sdelphij				break;
234205471Sdelphij			continue;
235205471Sdelphij		}
236205471Sdelphij
237205471Sdelphij		/*
238205471Sdelphij		 * We should have a sharer with waiters, so enter the hard
239205471Sdelphij		 * path in order to handle wakeups correctly.
240205471Sdelphij		 */
241205471Sdelphij		sleepq_lock(&lk->lock_object);
242205471Sdelphij		x = lk->lk_lock & LK_ALL_WAITERS;
243205471Sdelphij		v = LK_UNLOCKED;
244205471Sdelphij
245205471Sdelphij		/*
246205471Sdelphij		 * If the lock has exclusive waiters, give them preference in
247205471Sdelphij		 * order to avoid deadlock with shared runners up.
248205471Sdelphij		 */
249205471Sdelphij		if (x & LK_EXCLUSIVE_WAITERS) {
250205471Sdelphij			queue = SQ_EXCLUSIVE_QUEUE;
251205471Sdelphij			v |= (x & LK_SHARED_WAITERS);
252205471Sdelphij		} else {
253205471Sdelphij			MPASS(x == LK_SHARED_WAITERS);
254205471Sdelphij			queue = SQ_SHARED_QUEUE;
255205471Sdelphij		}
256205471Sdelphij
257205471Sdelphij		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
258205471Sdelphij		    v)) {
259205471Sdelphij			sleepq_release(&lk->lock_object);
260205471Sdelphij			continue;
261205471Sdelphij		}
262205471Sdelphij		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
263205471Sdelphij		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
264205471Sdelphij		    "exclusive");
265205471Sdelphij		wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
266205471Sdelphij		    0, queue);
267205471Sdelphij		sleepq_release(&lk->lock_object);
268205471Sdelphij		break;
269205471Sdelphij	}
270205471Sdelphij
271205471Sdelphij	lock_profile_release_lock(&lk->lock_object);
272205471Sdelphij	return (wakeup_swapper);
273205471Sdelphij}
274205471Sdelphij
275205471Sdelphijstatic void
276205471Sdelphijassert_lockmgr(struct lock_object *lock, int what)
277205471Sdelphij{
278205471Sdelphij
279205471Sdelphij	panic("lockmgr locks do not support assertions");
280205471Sdelphij}
281205471Sdelphij
282205471Sdelphijstatic void
283205471Sdelphijlock_lockmgr(struct lock_object *lock, int how)
284205471Sdelphij{
285205471Sdelphij
286205471Sdelphij	panic("lockmgr locks do not support sleep interlocking");
287205471Sdelphij}
288205471Sdelphij
289205471Sdelphijstatic int
290205471Sdelphijunlock_lockmgr(struct lock_object *lock)
291205471Sdelphij{
292205471Sdelphij
293205471Sdelphij	panic("lockmgr locks do not support sleep interlocking");
294205471Sdelphij}
295205471Sdelphij
296205471Sdelphijvoid
297205471Sdelphijlockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
298205471Sdelphij{
299205471Sdelphij	int iflags;
300205471Sdelphij
301205471Sdelphij	MPASS((flags & ~LK_INIT_MASK) == 0);
302205471Sdelphij
303157043Sdes	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
304157043Sdes	if ((flags & LK_NODUP) == 0)
305157043Sdes		iflags |= LO_DUPOK;
306157043Sdes	if (flags & LK_NOPROFILE)
307157043Sdes		iflags |= LO_NOPROFILE;
308157043Sdes	if ((flags & LK_NOWITNESS) == 0)
309157043Sdes		iflags |= LO_WITNESS;
310157043Sdes	if (flags & LK_QUIET)
311157043Sdes		iflags |= LO_QUIET;
312157043Sdes	iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
313157043Sdes
314157043Sdes	lk->lk_lock = LK_UNLOCKED;
315205471Sdelphij	lk->lk_recurse = 0;
316157043Sdes	lk->lk_timo = timo;
317157043Sdes	lk->lk_pri = pri;
318157043Sdes	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
319157043Sdes	STACK_ZERO(lk);
320157043Sdes}
321157043Sdes
322157043Sdesvoid
323157043Sdeslockdestroy(struct lock *lk)
324157043Sdes{
325157043Sdes
326157043Sdes	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
327157043Sdes	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
328157043Sdes	lock_destroy(&lk->lock_object);
329157043Sdes}
330157043Sdes
331157043Sdesint
332157043Sdes__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
333157043Sdes    const char *wmesg, int pri, int timo, const char *file, int line)
334157043Sdes{
335157043Sdes	GIANT_DECLARE;
336157043Sdes	uint64_t waittime;
337157043Sdes	struct lock_class *class;
338157043Sdes	const char *iwmesg;
339157043Sdes	uintptr_t tid, v, x;
340157043Sdes	u_int op;
341157043Sdes	int contested, error, ipri, itimo, queue, wakeup_swapper;
342157043Sdes
343157043Sdes	contested = 0;
344157043Sdes	error = 0;
345157043Sdes	waittime = 0;
346157043Sdes	tid = (uintptr_t)curthread;
347157043Sdes	op = (flags & LK_TYPE_MASK);
348157043Sdes	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
349157043Sdes	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
350157043Sdes	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
351157043Sdes
352157043Sdes	MPASS((flags & ~LK_TOTAL_MASK) == 0);
353157043Sdes	KASSERT((op & (op - 1)) == 0,
354157043Sdes	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
355157043Sdes	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
356157043Sdes	    (op != LK_DOWNGRADE && op != LK_RELEASE),
357157043Sdes	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
358157043Sdes	    __func__, file, line));
359157043Sdes	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
360157043Sdes	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
361157043Sdes	    __func__, file, line));
362157043Sdes
363157043Sdes	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
364157043Sdes	if (panicstr != NULL) {
365157043Sdes		if (flags & LK_INTERLOCK)
366157043Sdes			class->lc_unlock(ilk);
367157043Sdes		return (0);
368157043Sdes	}
369157043Sdes
370157043Sdes	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
371157043Sdes		op = LK_EXCLUSIVE;
372157043Sdes
373157043Sdes	wakeup_swapper = 0;
374157043Sdes	switch (op) {
375157043Sdes	case LK_SHARED:
376157043Sdes		if (LK_CAN_WITNESS(flags))
377157043Sdes			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
378157043Sdes			    file, line);
379157043Sdes		for (;;) {
380157043Sdes			x = lk->lk_lock;
381157043Sdes
382157043Sdes			/*
383157043Sdes			 * If no other thread has an exclusive lock, or
384157043Sdes			 * no exclusive waiter is present, bump the count of
385157043Sdes			 * sharers.  Since we have to preserve the state of
386157043Sdes			 * waiters, if we fail to acquire the shared lock
387157043Sdes			 * loop back and retry.
388157043Sdes			 */
389157043Sdes			if (LK_CAN_SHARE(x)) {
390157043Sdes				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
391157043Sdes				    x + LK_ONE_SHARER))
392157043Sdes					break;
393157043Sdes				continue;
394145474Skientzle			}
395145474Skientzle			lock_profile_obtain_lock_failed(&lk->lock_object,
396145474Skientzle			    &contested, &waittime);
397145474Skientzle
398145474Skientzle			/*
399145474Skientzle			 * If the lock is already held by curthread in
400145474Skientzle			 * exclusive way avoid a deadlock.
401145474Skientzle			 */
402145474Skientzle			if (LK_HOLDER(x) == tid) {
403145474Skientzle				LOCK_LOG2(lk,
404145474Skientzle				    "%s: %p already held in exclusive mode",
405145474Skientzle				    __func__, lk);
406145474Skientzle				error = EDEADLK;
407145474Skientzle				break;
408145474Skientzle			}
409145474Skientzle
410145474Skientzle			/*
411145474Skientzle			 * If the lock is expected to not sleep just give up
412145474Skientzle			 * and return.
413145474Skientzle			 */
414145474Skientzle			if (LK_TRYOP(flags)) {
415145474Skientzle				LOCK_LOG2(lk, "%s: %p fails the try operation",
416145474Skientzle				    __func__, lk);
417145474Skientzle				error = EBUSY;
418145474Skientzle				break;
419145474Skientzle			}
420145474Skientzle
421145474Skientzle			/*
422145474Skientzle			 * Acquire the sleepqueue chain lock because we
423145474Skientzle			 * probabilly will need to manipulate waiters flags.
424145474Skientzle			 */
425145474Skientzle			sleepq_lock(&lk->lock_object);
426145474Skientzle			x = lk->lk_lock;
427145474Skientzle
428145474Skientzle			/*
429145474Skientzle			 * if the lock can be acquired in shared mode, try
430145474Skientzle			 * again.
431145474Skientzle			 */
432145474Skientzle			if (LK_CAN_SHARE(x)) {
433145474Skientzle				sleepq_release(&lk->lock_object);
434145474Skientzle				continue;
435145474Skientzle			}
436131377Stjr
437131377Stjr			/*
438131377Stjr			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
439131377Stjr			 * loop back and retry.
440131377Stjr			 */
441131377Stjr			if ((x & LK_SHARED_WAITERS) == 0) {
442131377Stjr				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
443131377Stjr				    x | LK_SHARED_WAITERS)) {
444131377Stjr					sleepq_release(&lk->lock_object);
445131377Stjr					continue;
446131377Stjr				}
447131377Stjr				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
448131377Stjr				    __func__, lk);
449131377Stjr			}
450131377Stjr
451131377Stjr			/*
452131377Stjr			 * As far as we have been unable to acquire the
453131377Stjr			 * shared lock and the shared waiters flag is set,
454131377Stjr			 * we will sleep.
455131377Stjr			 */
456131377Stjr			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
457131377Stjr			    SQ_SHARED_QUEUE);
458131377Stjr			flags &= ~LK_INTERLOCK;
459131377Stjr			if (error) {
460131377Stjr				LOCK_LOG3(lk,
461131377Stjr				    "%s: interrupted sleep for %p with %d",
462131377Stjr				    __func__, lk, error);
463131377Stjr				break;
464131377Stjr			}
465131377Stjr			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
466131377Stjr			    __func__, lk);
467131377Stjr		}
468131377Stjr		if (error == 0) {
469131377Stjr			lock_profile_obtain_lock_success(&lk->lock_object,
470131377Stjr			    contested, waittime, file, line);
471131377Stjr			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
472131377Stjr			    line);
473131377Stjr			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
474131377Stjr			    line);
475131377Stjr			TD_LOCKS_INC(curthread);
476131377Stjr			TD_SLOCKS_INC(curthread);
477131377Stjr			STACK_SAVE(lk);
478131377Stjr		}
479131377Stjr		break;
480131377Stjr	case LK_UPGRADE:
481131377Stjr		_lockmgr_assert(lk, KA_SLOCKED, file, line);
482131377Stjr		x = lk->lk_lock & LK_ALL_WAITERS;
483131377Stjr
484131377Stjr		/*
485131377Stjr		 * Try to switch from one shared lock to an exclusive one.
486131377Stjr		 * We need to preserve waiters flags during the operation.
487131377Stjr		 */
488131377Stjr		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
489131377Stjr		    tid | x)) {
490131377Stjr			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
491131377Stjr			    line);
492131377Stjr			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
493131377Stjr			    LK_TRYWIT(flags), file, line);
494131377Stjr			TD_SLOCKS_DEC(curthread);
495131377Stjr			break;
496131377Stjr		}
497131377Stjr
498131377Stjr		/*
499131377Stjr		 * We have been unable to succeed in upgrading, so just
500131377Stjr		 * give up the shared lock.
501131377Stjr		 */
502131377Stjr		wakeup_swapper |= wakeupshlk(lk, file, line);
503131377Stjr
504131377Stjr		/* FALLTHROUGH */
505131377Stjr	case LK_EXCLUSIVE:
506131377Stjr		if (LK_CAN_WITNESS(flags))
507131377Stjr			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
508131377Stjr			    LOP_EXCLUSIVE, file, line);
509131377Stjr
510131377Stjr		/*
511131377Stjr		 * If curthread already holds the lock and this one is
512131377Stjr		 * allowed to recurse, simply recurse on it.
513131377Stjr		 */
514131377Stjr		if (lockmgr_xlocked(lk)) {
515131377Stjr			if ((flags & LK_CANRECURSE) == 0 &&
516131377Stjr			    (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
517131377Stjr
518131377Stjr				/*
519131377Stjr				 * If the lock is expected to not panic just
520131377Stjr				 * give up and return.
521131377Stjr				 */
522131377Stjr				if (LK_TRYOP(flags)) {
523131377Stjr					LOCK_LOG2(lk,
524131377Stjr					    "%s: %p fails the try operation",
525131377Stjr					    __func__, lk);
526131377Stjr					error = EBUSY;
527131377Stjr					break;
528131377Stjr				}
529131377Stjr				if (flags & LK_INTERLOCK)
530131377Stjr					class->lc_unlock(ilk);
531131377Stjr		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
532131377Stjr				    __func__, iwmesg, file, line);
533131377Stjr			}
534131377Stjr			lk->lk_recurse++;
535131377Stjr			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
536131377Stjr			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
537131377Stjr			    lk->lk_recurse, file, line);
538131377Stjr			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
539131377Stjr			    LK_TRYWIT(flags), file, line);
540131377Stjr			TD_LOCKS_INC(curthread);
541131377Stjr			break;
542131377Stjr		}
543131377Stjr
544131377Stjr		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
545131377Stjr		    tid)) {
546131377Stjr			lock_profile_obtain_lock_failed(&lk->lock_object,
547131377Stjr			    &contested, &waittime);
548131377Stjr
549131377Stjr			/*
550131377Stjr			 * If the lock is expected to not sleep just give up
551131377Stjr			 * and return.
552131377Stjr			 */
553131377Stjr			if (LK_TRYOP(flags)) {
554131377Stjr				LOCK_LOG2(lk, "%s: %p fails the try operation",
555131377Stjr				    __func__, lk);
556131377Stjr				error = EBUSY;
557131377Stjr				break;
558131377Stjr			}
559131377Stjr
560131377Stjr			/*
561131377Stjr			 * Acquire the sleepqueue chain lock because we
562131377Stjr			 * probabilly will need to manipulate waiters flags.
563131377Stjr			 */
564131377Stjr			sleepq_lock(&lk->lock_object);
565131377Stjr			x = lk->lk_lock;
566131377Stjr			v = x & LK_ALL_WAITERS;
567131377Stjr
568131377Stjr			/*
569131377Stjr			 * if the lock has been released while we spun on
570131377Stjr			 * the sleepqueue chain lock just try again.
571131377Stjr			 */
572131377Stjr			if (x == LK_UNLOCKED) {
573131377Stjr				sleepq_release(&lk->lock_object);
574131377Stjr				continue;
575131377Stjr			}
576131377Stjr
577131377Stjr			/*
578131377Stjr			 * The lock can be in the state where there is a
579131377Stjr			 * pending queue of waiters, but still no owner.
580131377Stjr			 * This happens when the lock is contested and an
581131377Stjr			 * owner is going to claim the lock.
582131377Stjr			 * If curthread is the one successfully acquiring it
583131377Stjr			 * claim lock ownership and return, preserving waiters
584131377Stjr			 * flags.
585131377Stjr			 */
586131377Stjr			if (x == (LK_UNLOCKED | v)) {
587131377Stjr				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
588131377Stjr				    tid | v)) {
589131377Stjr					sleepq_release(&lk->lock_object);
590131377Stjr					LOCK_LOG2(lk,
591131377Stjr					    "%s: %p claimed by a new writer",
592131377Stjr					    __func__, lk);
593131377Stjr					break;
594131377Stjr				}
595131377Stjr				sleepq_release(&lk->lock_object);
596131377Stjr				continue;
597131377Stjr			}
598131377Stjr
599131377Stjr			/*
600131377Stjr			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
601131377Stjr			 * fail, loop back and retry.
602131377Stjr			 */
603131377Stjr			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
604131377Stjr				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
605131377Stjr				    x | LK_EXCLUSIVE_WAITERS)) {
606131377Stjr					sleepq_release(&lk->lock_object);
607131377Stjr					continue;
608131377Stjr				}
609131377Stjr				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
610131377Stjr				    __func__, lk);
611131377Stjr			}
612131377Stjr
613131377Stjr			/*
614131377Stjr			 * As far as we have been unable to acquire the
615131377Stjr			 * exclusive lock and the exclusive waiters flag
616131377Stjr			 * is set, we will sleep.
617131377Stjr			 */
618131377Stjr			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
619131377Stjr			    SQ_EXCLUSIVE_QUEUE);
620131377Stjr			flags &= ~LK_INTERLOCK;
621131377Stjr			if (error) {
622131377Stjr				LOCK_LOG3(lk,
623131377Stjr				    "%s: interrupted sleep for %p with %d",
624131377Stjr				    __func__, lk, error);
625131377Stjr				break;
626131377Stjr			}
627131377Stjr			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
628131377Stjr			    __func__, lk);
629131377Stjr		}
630131377Stjr		if (error == 0) {
631131377Stjr			lock_profile_obtain_lock_success(&lk->lock_object,
632131377Stjr			    contested, waittime, file, line);
633131377Stjr			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
634131377Stjr			    lk->lk_recurse, file, line);
635131377Stjr			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
636131377Stjr			    LK_TRYWIT(flags), file, line);
637131377Stjr			TD_LOCKS_INC(curthread);
638131377Stjr			STACK_SAVE(lk);
639131377Stjr		}
640131377Stjr		break;
641131377Stjr	case LK_DOWNGRADE:
642131377Stjr		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
643131377Stjr		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
644131377Stjr		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
645131377Stjr		TD_SLOCKS_INC(curthread);
646131377Stjr
647131377Stjr		/*
648131377Stjr		 * In order to preserve waiters flags, just spin.
649131377Stjr		 */
650131377Stjr		for (;;) {
651131377Stjr			x = lk->lk_lock & LK_ALL_WAITERS;
652131377Stjr			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
653131377Stjr			    LK_SHARERS_LOCK(1) | x))
654131377Stjr				break;
655131377Stjr			cpu_spinwait();
656131377Stjr		}
657131377Stjr		break;
658131377Stjr	case LK_RELEASE:
659131377Stjr		_lockmgr_assert(lk, KA_LOCKED, file, line);
660131377Stjr		x = lk->lk_lock;
661131377Stjr
662131377Stjr		if ((x & LK_SHARE) == 0) {
663131377Stjr
664131377Stjr			/*
665131377Stjr			 * As first option, treact the lock as if it has not
666131377Stjr			 * any waiter.
667131377Stjr			 * Fix-up the tid var if the lock has been disowned.
668131377Stjr			 */
669131377Stjr			if (LK_HOLDER(x) == LK_KERNPROC)
670131377Stjr				tid = LK_KERNPROC;
671131377Stjr			else {
672131377Stjr				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
673131377Stjr				    file, line);
674131377Stjr				TD_LOCKS_DEC(curthread);
675131377Stjr			}
676131377Stjr			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
67792111Sgreen			    lk->lk_recurse, file, line);
67892111Sgreen
67992111Sgreen			/*
68092111Sgreen			 * The lock is held in exclusive mode.
68192111Sgreen			 * If the lock is recursed also, then unrecurse it.
68292111Sgreen			 */
68392111Sgreen			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
68492111Sgreen				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
68592111Sgreen				    lk);
686131377Stjr				lk->lk_recurse--;
68742468Speter				break;
68842468Speter			}
68942468Speter			lock_profile_release_lock(&lk->lock_object);
69042468Speter
69142468Speter			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
69242468Speter			    LK_UNLOCKED))
69342468Speter				break;
69442468Speter
69542468Speter			sleepq_lock(&lk->lock_object);
69642468Speter			x = lk->lk_lock & LK_ALL_WAITERS;
69742468Speter			v = LK_UNLOCKED;
69842468Speter
69942468Speter			/*
70042468Speter		 	 * If the lock has exclusive waiters, give them
70142468Speter			 * preference in order to avoid deadlock with
70242468Speter			 * shared runners up.
70342468Speter			 */
70442468Speter			if (x & LK_EXCLUSIVE_WAITERS) {
70542468Speter				queue = SQ_EXCLUSIVE_QUEUE;
70642468Speter				v |= (x & LK_SHARED_WAITERS);
70742468Speter			} else {
70842468Speter				MPASS(x == LK_SHARED_WAITERS);
70942468Speter				queue = SQ_SHARED_QUEUE;
71042468Speter			}
71142468Speter
71242468Speter			LOCK_LOG3(lk,
71342468Speter			    "%s: %p waking up threads on the %s queue",
71442468Speter			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
71542468Speter			    "exclusive");
71642468Speter			atomic_store_rel_ptr(&lk->lk_lock, v);
71742468Speter			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
71842468Speter			    SLEEPQ_LK, 0, queue);
71942468Speter			sleepq_release(&lk->lock_object);
72042468Speter			break;
72142468Speter		} else
72242468Speter			wakeup_swapper = wakeupshlk(lk, file, line);
72342468Speter		break;
72442468Speter	case LK_DRAIN:
72542468Speter		if (LK_CAN_WITNESS(flags))
72642468Speter			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
72742468Speter			    LOP_EXCLUSIVE, file, line);
72842468Speter
72942468Speter		/*
73042468Speter		 * Trying to drain a lock we already own will result in a
73142468Speter		 * deadlock.
73242468Speter		 */
73342468Speter		if (lockmgr_xlocked(lk)) {
73442468Speter			if (flags & LK_INTERLOCK)
73542468Speter				class->lc_unlock(ilk);
73642468Speter			panic("%s: draining %s with the lock held @ %s:%d\n",
73742468Speter			    __func__, iwmesg, file, line);
73842468Speter		}
73942468Speter
74042468Speter		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
74142468Speter			lock_profile_obtain_lock_failed(&lk->lock_object,
74242468Speter			    &contested, &waittime);
74342468Speter
74442468Speter			/*
74542468Speter			 * If the lock is expected to not sleep just give up
74642468Speter			 * and return.
74742468Speter			 */
74842468Speter			if (LK_TRYOP(flags)) {
74942468Speter				LOCK_LOG2(lk, "%s: %p fails the try operation",
75042468Speter				    __func__, lk);
75142468Speter				error = EBUSY;
75242468Speter				break;
75342468Speter			}
75433904Ssteve
75533904Ssteve			/*
75633904Ssteve			 * Acquire the sleepqueue chain lock because we
75733904Ssteve			 * probabilly will need to manipulate waiters flags.
75833904Ssteve			 */
75933904Ssteve			sleepq_lock(&lk->lock_object);
76033904Ssteve			x = lk->lk_lock;
76133904Ssteve			v = x & LK_ALL_WAITERS;
76233904Ssteve
76333904Ssteve			/*
76433904Ssteve			 * if the lock has been released while we spun on
76533904Ssteve			 * the sleepqueue chain lock just try again.
76633904Ssteve			 */
76733904Ssteve			if (x == LK_UNLOCKED) {
76833904Ssteve				sleepq_release(&lk->lock_object);
76933904Ssteve				continue;
77033904Ssteve			}
77133904Ssteve
77233904Ssteve			if (x == (LK_UNLOCKED | v)) {
77333904Ssteve				v = x;
77433904Ssteve				if (v & LK_EXCLUSIVE_WAITERS) {
77533904Ssteve					queue = SQ_EXCLUSIVE_QUEUE;
77633904Ssteve					v &= ~LK_EXCLUSIVE_WAITERS;
77733904Ssteve				} else {
77833904Ssteve					MPASS(v & LK_SHARED_WAITERS);
77933904Ssteve					queue = SQ_SHARED_QUEUE;
78033904Ssteve					v &= ~LK_SHARED_WAITERS;
78133904Ssteve				}
78233904Ssteve				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
78333904Ssteve					sleepq_release(&lk->lock_object);
78433904Ssteve					continue;
78533904Ssteve				}
78633904Ssteve				LOCK_LOG3(lk,
78733904Ssteve				"%s: %p waking up all threads on the %s queue",
78833904Ssteve				    __func__, lk, queue == SQ_SHARED_QUEUE ?
78933904Ssteve				    "shared" : "exclusive");
79033904Ssteve				wakeup_swapper |= sleepq_broadcast(
79133904Ssteve				    &lk->lock_object, SLEEPQ_LK, 0, queue);
79233904Ssteve
79333904Ssteve				/*
79433904Ssteve				 * If shared waiters have been woken up we need
79533904Ssteve				 * to wait for one of them to acquire the lock
79633904Ssteve				 * before to set the exclusive waiters in
79733904Ssteve				 * order to avoid a deadlock.
79833904Ssteve				 */
79933904Ssteve				if (queue == SQ_SHARED_QUEUE) {
80033904Ssteve					for (v = lk->lk_lock;
80133904Ssteve					    (v & LK_SHARE) && !LK_SHARERS(v);
80233904Ssteve					    v = lk->lk_lock)
80333904Ssteve						cpu_spinwait();
80433904Ssteve				}
80533904Ssteve			}
80633904Ssteve
80733904Ssteve			/*
80833904Ssteve			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
80933904Ssteve			 * fail, loop back and retry.
81033904Ssteve			 */
81133904Ssteve			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
81233904Ssteve				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
81333904Ssteve				    x | LK_EXCLUSIVE_WAITERS)) {
81433904Ssteve					sleepq_release(&lk->lock_object);
81533904Ssteve					continue;
81633904Ssteve				}
81733904Ssteve				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
81833904Ssteve				    __func__, lk);
81933904Ssteve			}
82033904Ssteve
82133904Ssteve			/*
82233904Ssteve			 * As far as we have been unable to acquire the
82333904Ssteve			 * exclusive lock and the exclusive waiters flag
82433904Ssteve			 * is set, we will sleep.
82533904Ssteve			 */
82633904Ssteve			if (flags & LK_INTERLOCK) {
82733904Ssteve				class->lc_unlock(ilk);
82833904Ssteve				flags &= ~LK_INTERLOCK;
82933904Ssteve			}
83033904Ssteve			GIANT_SAVE();
83133904Ssteve			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
83233904Ssteve			    SQ_EXCLUSIVE_QUEUE);
83333904Ssteve			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
83433904Ssteve			GIANT_RESTORE();
83533904Ssteve			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
83633904Ssteve			    __func__, lk);
83733904Ssteve		}
83833904Ssteve
83933904Ssteve		if (error == 0) {
84033904Ssteve			lock_profile_obtain_lock_success(&lk->lock_object,
84133904Ssteve			    contested, waittime, file, line);
842157043Sdes			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
84333904Ssteve			    lk->lk_recurse, file, line);
84433904Ssteve			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
84533904Ssteve			    LK_TRYWIT(flags), file, line);
84633904Ssteve			TD_LOCKS_INC(curthread);
84733904Ssteve			STACK_SAVE(lk);
84833904Ssteve		}
84933904Ssteve		break;
85033904Ssteve	default:
85133904Ssteve		if (flags & LK_INTERLOCK)
85233904Ssteve			class->lc_unlock(ilk);
85333904Ssteve		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
85433904Ssteve	}
85533904Ssteve
85633904Ssteve	if (flags & LK_INTERLOCK)
85733904Ssteve		class->lc_unlock(ilk);
85833904Ssteve	if (wakeup_swapper)
85933904Ssteve		kick_proc0();
860131377Stjr
86133904Ssteve	return (error);
86233904Ssteve}
86333904Ssteve
864131377Stjrvoid
86533904Ssteve_lockmgr_disown(struct lock *lk, const char *file, int line)
866131377Stjr{
86733904Ssteve	uintptr_t tid, x;
86833904Ssteve
86933904Ssteve	tid = (uintptr_t)curthread;
87033904Ssteve	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
87133904Ssteve
87233904Ssteve	/*
87333904Ssteve	 * If the owner is already LK_KERNPROC just skip the whole operation.
87433904Ssteve	 */
87533904Ssteve	if (LK_HOLDER(lk->lk_lock) != tid)
87633904Ssteve		return;
87733904Ssteve	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
87833904Ssteve	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
87933904Ssteve	TD_LOCKS_DEC(curthread);
88033904Ssteve
88133904Ssteve	/*
88233904Ssteve	 * In order to preserve waiters flags, just spin.
88333904Ssteve	 */
88433904Ssteve	for (;;) {
88533904Ssteve		x = lk->lk_lock & LK_ALL_WAITERS;
88633904Ssteve		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
88733904Ssteve		    LK_KERNPROC | x))
88833904Ssteve			return;
88933904Ssteve		cpu_spinwait();
89033904Ssteve	}
89142468Speter}
89233904Ssteve
893131377Stjrvoid
89433904Sstevelockmgr_printinfo(struct lock *lk)
89533904Ssteve{
89633904Ssteve	struct thread *td;
89733904Ssteve	uintptr_t x;
89833904Ssteve
89933904Ssteve	if (lk->lk_lock == LK_UNLOCKED)
90033904Ssteve		printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
90133904Ssteve	else if (lk->lk_lock & LK_SHARE)
90233904Ssteve		printf(" lock type %s: SHARED (count %ju)\n",
90333904Ssteve		    lk->lock_object.lo_name,
90433904Ssteve		    (uintmax_t)LK_SHARERS(lk->lk_lock));
90533904Ssteve	else {
90633904Ssteve		td = lockmgr_xholder(lk);
90733904Ssteve		printf(" lock type %s: EXCL by thread %p (pid %d)\n",
90833904Ssteve		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
90933904Ssteve	}
91033904Ssteve
91133904Ssteve	x = lk->lk_lock;
91233904Ssteve	if (x & LK_EXCLUSIVE_WAITERS)
91333904Ssteve		printf(" with exclusive waiters pending\n");
91433904Ssteve	if (x & LK_SHARED_WAITERS)
91533904Ssteve		printf(" with shared waiters pending\n");
91633904Ssteve
91733904Ssteve	STACK_PRINT(lk);
918131377Stjr}
91917651Speter
92017651Speterint
92117651Speterlockstatus(struct lock *lk)
92217651Speter{
92317651Speter	uintptr_t v, x;
92417651Speter	int ret;
92517651Speter
92617651Speter	ret = LK_SHARED;
92717651Speter	x = lk->lk_lock;
92817651Speter	v = LK_HOLDER(x);
92917651Speter
93017651Speter	if ((x & LK_SHARE) == 0) {
93117651Speter		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
93217651Speter			ret = LK_EXCLUSIVE;
93317651Speter		else
93417651Speter			ret = LK_EXCLOTHER;
93517651Speter	} else if (x == LK_UNLOCKED)
93617651Speter		ret = 0;
93717651Speter
93817651Speter	return (ret);
93917651Speter}
94017651Speter
94117651Speter#ifdef INVARIANT_SUPPORT
94217651Speter#ifndef INVARIANTS
94317651Speter#undef	_lockmgr_assert
94417651Speter#endif
94517651Speter
94617651Spetervoid
94717651Speter_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
94817651Speter{
94917651Speter	int slocked = 0;
95017651Speter
95117651Speter	if (panicstr != NULL)
95217651Speter		return;
95317651Speter	switch (what) {
95417651Speter	case KA_SLOCKED:
95517651Speter	case KA_SLOCKED | KA_NOTRECURSED:
95617651Speter	case KA_SLOCKED | KA_RECURSED:
95717651Speter		slocked = 1;
95817651Speter	case KA_LOCKED:
95917651Speter	case KA_LOCKED | KA_NOTRECURSED:
96017651Speter	case KA_LOCKED | KA_RECURSED:
96117651Speter#ifdef WITNESS
96217651Speter
96317651Speter		/*
96417651Speter		 * We cannot trust WITNESS if the lock is held in exclusive
96517651Speter		 * mode and a call to lockmgr_disown() happened.
96617651Speter		 * Workaround this skipping the check if the lock is held in
96717651Speter		 * exclusive mode even for the KA_LOCKED case.
96817651Speter		 */
96917651Speter		if (slocked || (lk->lk_lock & LK_SHARE)) {
97017651Speter			witness_assert(&lk->lock_object, what, file, line);
97117651Speter			break;
97217651Speter		}
97317651Speter#endif
97417651Speter		if (lk->lk_lock == LK_UNLOCKED ||
97517651Speter		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
97617651Speter		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
97717651Speter			panic("Lock %s not %slocked @ %s:%d\n",
97817651Speter			    lk->lock_object.lo_name, slocked ? "share" : "",
97917651Speter			    file, line);
98017651Speter
98117651Speter		if ((lk->lk_lock & LK_SHARE) == 0) {
98217651Speter			if (lockmgr_recursed(lk)) {
98317651Speter				if (what & KA_NOTRECURSED)
98417651Speter					panic("Lock %s recursed @ %s:%d\n",
98517651Speter					    lk->lock_object.lo_name, file,
98617651Speter					    line);
98717651Speter			} else if (what & KA_RECURSED)
98817651Speter				panic("Lock %s not recursed @ %s:%d\n",
98917651Speter				    lk->lock_object.lo_name, file, line);
99017651Speter		}
99117651Speter		break;
99217651Speter	case KA_XLOCKED:
99317651Speter	case KA_XLOCKED | KA_NOTRECURSED:
99417651Speter	case KA_XLOCKED | KA_RECURSED:
99517651Speter		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
99617651Speter			panic("Lock %s not exclusively locked @ %s:%d\n",
99717651Speter			    lk->lock_object.lo_name, file, line);
99817651Speter		if (lockmgr_recursed(lk)) {
99917651Speter			if (what & KA_NOTRECURSED)
100017651Speter				panic("Lock %s recursed @ %s:%d\n",
100117651Speter				    lk->lock_object.lo_name, file, line);
100217651Speter		} else if (what & KA_RECURSED)
100317651Speter			panic("Lock %s not recursed @ %s:%d\n",
100417651Speter			    lk->lock_object.lo_name, file, line);
100517651Speter		break;
100617651Speter	case KA_UNLOCKED:
100717651Speter		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
100817651Speter			panic("Lock %s exclusively locked @ %s:%d\n",
100917651Speter			    lk->lock_object.lo_name, file, line);
101017651Speter		break;
101117651Speter	default:
101217651Speter		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
101317651Speter		    line);
101417651Speter	}
101517651Speter}
101617651Speter#endif
101717651Speter
101817651Speter#ifdef DDB
101917651Speterint
102017651Speterlockmgr_chain(struct thread *td, struct thread **ownerp)
102117651Speter{
102217651Speter	struct lock *lk;
102317651Speter
102417651Speter	lk = td->td_wchan;
102517651Speter
102617651Speter	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
102717651Speter		return (0);
102817651Speter	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
102917651Speter	if (lk->lk_lock & LK_SHARE)
103017651Speter		db_printf("SHARED (count %ju)\n",
103117651Speter		    (uintmax_t)LK_SHARERS(lk->lk_lock));
103217651Speter	else
103317651Speter		db_printf("EXCL\n");
103417651Speter	*ownerp = lockmgr_xholder(lk);
103517651Speter
103617651Speter	return (1);
103717651Speter}
103817651Speter
103917651Speterstatic void
104017651Speterdb_show_lockmgr(struct lock_object *lock)
104117651Speter{
104217651Speter	struct thread *td;
104317651Speter	struct lock *lk;
104417651Speter
104517651Speter	lk = (struct lock *)lock;
104617651Speter
104717651Speter	db_printf(" state: ");
104817651Speter	if (lk->lk_lock == LK_UNLOCKED)
104917651Speter		db_printf("UNLOCKED\n");
105017651Speter	else if (lk->lk_lock & LK_SHARE)
105117651Speter		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
105217651Speter	else {
105317651Speter		td = lockmgr_xholder(lk);
105417651Speter		if (td == (struct thread *)LK_KERNPROC)
105517651Speter			db_printf("XLOCK: LK_KERNPROC\n");
105617651Speter		else
105717651Speter			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
105817651Speter			    td->td_tid, td->td_proc->p_pid,
105917651Speter			    td->td_proc->p_comm);
106017651Speter		if (lockmgr_recursed(lk))
106117651Speter			db_printf(" recursed: %d\n", lk->lk_recurse);
106217651Speter	}
106317651Speter	db_printf(" waiters: ");
106417651Speter	switch (lk->lk_lock & LK_ALL_WAITERS) {
106517651Speter	case LK_SHARED_WAITERS:
106617651Speter		db_printf("shared\n");
106717651Speter	case LK_EXCLUSIVE_WAITERS:
106817651Speter		db_printf("exclusive\n");
106917651Speter		break;
107017651Speter	case LK_ALL_WAITERS:
107117651Speter		db_printf("shared and exclusive\n");
107217651Speter		break;
107317651Speter	default:
107417651Speter		db_printf("none\n");
107517651Speter	}
107617651Speter}
107717651Speter#endif
107817651Speter