kern_lock.c revision 192853
1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice(s), this list of conditions and the following disclaimer as
10 *    the first lines of this file unmodified other than the possible
11 *    addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice(s), this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 */
28
29#include "opt_ddb.h"
30#include "opt_kdtrace.h"
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 192853 2009-05-26 20:28:22Z sson $");
34
35#include <sys/param.h>
36#include <sys/ktr.h>
37#include <sys/lock.h>
38#include <sys/lock_profile.h>
39#include <sys/lockmgr.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sleepqueue.h>
43#ifdef DEBUG_LOCKS
44#include <sys/stack.h>
45#endif
46#include <sys/systm.h>
47
48#include <machine/cpu.h>
49
50#ifdef DDB
51#include <ddb/ddb.h>
52#endif
53
54CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
55    (LK_CANRECURSE | LK_NOSHARE));
56
57#define	SQ_EXCLUSIVE_QUEUE	0
58#define	SQ_SHARED_QUEUE		1
59
60#ifndef INVARIANTS
61#define	_lockmgr_assert(lk, what, file, line)
62#define	TD_LOCKS_INC(td)
63#define	TD_LOCKS_DEC(td)
64#else
65#define	TD_LOCKS_INC(td)	((td)->td_locks++)
66#define	TD_LOCKS_DEC(td)	((td)->td_locks--)
67#endif
68#define	TD_SLOCKS_INC(td)	((td)->td_lk_slocks++)
69#define	TD_SLOCKS_DEC(td)	((td)->td_lk_slocks--)
70
71#ifndef DEBUG_LOCKS
72#define	STACK_PRINT(lk)
73#define	STACK_SAVE(lk)
74#define	STACK_ZERO(lk)
75#else
76#define	STACK_PRINT(lk)	stack_print_ddb(&(lk)->lk_stack)
77#define	STACK_SAVE(lk)	stack_save(&(lk)->lk_stack)
78#define	STACK_ZERO(lk)	stack_zero(&(lk)->lk_stack)
79#endif
80
81#define	LOCK_LOG2(lk, string, arg1, arg2)				\
82	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
83		CTR2(KTR_LOCK, (string), (arg1), (arg2))
84#define	LOCK_LOG3(lk, string, arg1, arg2, arg3)				\
85	if (LOCK_LOG_TEST(&(lk)->lock_object, 0))			\
86		CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
87
88#define	GIANT_DECLARE							\
89	int _i = 0;							\
90	WITNESS_SAVE_DECL(Giant)
91#define	GIANT_RESTORE() do {						\
92	if (_i > 0) {							\
93		while (_i--)						\
94			mtx_lock(&Giant);				\
95		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
96	}								\
97} while (0)
98#define	GIANT_SAVE() do {						\
99	if (mtx_owned(&Giant)) {					\
100		WITNESS_SAVE(&Giant.lock_object, Giant);		\
101		while (mtx_owned(&Giant)) {				\
102			_i++;						\
103			mtx_unlock(&Giant);				\
104		}							\
105	}								\
106} while (0)
107
108#define	LK_CAN_SHARE(x)							\
109	(((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||	\
110	curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
111#define	LK_TRYOP(x)							\
112	((x) & LK_NOWAIT)
113
114#define	LK_CAN_WITNESS(x)						\
115	(((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
116#define	LK_TRYWIT(x)							\
117	(LK_TRYOP(x) ? LOP_TRYLOCK : 0)
118
119#define	lockmgr_disowned(lk)						\
120	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
121
122#define	lockmgr_xlocked(lk)						\
123	(((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
124
125static void	 assert_lockmgr(struct lock_object *lock, int how);
126#ifdef DDB
127static void	 db_show_lockmgr(struct lock_object *lock);
128#endif
129static void	 lock_lockmgr(struct lock_object *lock, int how);
130#ifdef KDTRACE_HOOKS
131static int	 owner_lockmgr(struct lock_object *lock, struct thread **owner);
132#endif
133static int	 unlock_lockmgr(struct lock_object *lock);
134
135struct lock_class lock_class_lockmgr = {
136	.lc_name = "lockmgr",
137	.lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
138	.lc_assert = assert_lockmgr,
139#ifdef DDB
140	.lc_ddb_show = db_show_lockmgr,
141#endif
142	.lc_lock = lock_lockmgr,
143	.lc_unlock = unlock_lockmgr,
144#ifdef KDTRACE_HOOKS
145	.lc_owner = owner_lockmgr,
146#endif
147};
148
149static __inline struct thread *
150lockmgr_xholder(struct lock *lk)
151{
152	uintptr_t x;
153
154	x = lk->lk_lock;
155	return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
156}
157
158/*
159 * It assumes sleepq_lock held and returns with this one unheld.
160 * It also assumes the generic interlock is sane and previously checked.
161 * If LK_INTERLOCK is specified the interlock is not reacquired after the
162 * sleep.
163 */
164static __inline int
165sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
166    const char *wmesg, int pri, int timo, int queue)
167{
168	GIANT_DECLARE;
169	struct lock_class *class;
170	int catch, error;
171
172	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
173	catch = pri & PCATCH;
174	pri &= PRIMASK;
175	error = 0;
176
177	LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
178	    (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
179
180	if (flags & LK_INTERLOCK)
181		class->lc_unlock(ilk);
182	GIANT_SAVE();
183	sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
184	    SLEEPQ_INTERRUPTIBLE : 0), queue);
185	if ((flags & LK_TIMELOCK) && timo)
186		sleepq_set_timeout(&lk->lock_object, timo);
187
188	/*
189	 * Decisional switch for real sleeping.
190	 */
191	if ((flags & LK_TIMELOCK) && timo && catch)
192		error = sleepq_timedwait_sig(&lk->lock_object, pri);
193	else if ((flags & LK_TIMELOCK) && timo)
194		error = sleepq_timedwait(&lk->lock_object, pri);
195	else if (catch)
196		error = sleepq_wait_sig(&lk->lock_object, pri);
197	else
198		sleepq_wait(&lk->lock_object, pri);
199	GIANT_RESTORE();
200	if ((flags & LK_SLEEPFAIL) && error == 0)
201		error = ENOLCK;
202
203	return (error);
204}
205
206static __inline int
207wakeupshlk(struct lock *lk, const char *file, int line)
208{
209	uintptr_t v, x;
210	int queue, wakeup_swapper;
211
212	TD_LOCKS_DEC(curthread);
213	TD_SLOCKS_DEC(curthread);
214	WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
215	LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
216
217	wakeup_swapper = 0;
218	for (;;) {
219		x = lk->lk_lock;
220
221		/*
222		 * If there is more than one shared lock held, just drop one
223		 * and return.
224		 */
225		if (LK_SHARERS(x) > 1) {
226			if (atomic_cmpset_ptr(&lk->lk_lock, x,
227			    x - LK_ONE_SHARER))
228				break;
229			continue;
230		}
231
232		/*
233		 * If there are not waiters on the exclusive queue, drop the
234		 * lock quickly.
235		 */
236		if ((x & LK_ALL_WAITERS) == 0) {
237			MPASS(x == LK_SHARERS_LOCK(1));
238			if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
239			    LK_UNLOCKED))
240				break;
241			continue;
242		}
243
244		/*
245		 * We should have a sharer with waiters, so enter the hard
246		 * path in order to handle wakeups correctly.
247		 */
248		sleepq_lock(&lk->lock_object);
249		x = lk->lk_lock & LK_ALL_WAITERS;
250		v = LK_UNLOCKED;
251
252		/*
253		 * If the lock has exclusive waiters, give them preference in
254		 * order to avoid deadlock with shared runners up.
255		 */
256		if (x & LK_EXCLUSIVE_WAITERS) {
257			queue = SQ_EXCLUSIVE_QUEUE;
258			v |= (x & LK_SHARED_WAITERS);
259		} else {
260			MPASS(x == LK_SHARED_WAITERS);
261			queue = SQ_SHARED_QUEUE;
262		}
263
264		if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
265		    v)) {
266			sleepq_release(&lk->lock_object);
267			continue;
268		}
269		LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
270		    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
271		    "exclusive");
272		wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
273		    0, queue);
274		sleepq_release(&lk->lock_object);
275		break;
276	}
277
278	lock_profile_release_lock(&lk->lock_object);
279	return (wakeup_swapper);
280}
281
282static void
283assert_lockmgr(struct lock_object *lock, int what)
284{
285
286	panic("lockmgr locks do not support assertions");
287}
288
289static void
290lock_lockmgr(struct lock_object *lock, int how)
291{
292
293	panic("lockmgr locks do not support sleep interlocking");
294}
295
296static int
297unlock_lockmgr(struct lock_object *lock)
298{
299
300	panic("lockmgr locks do not support sleep interlocking");
301}
302
303#ifdef KDTRACE_HOOKS
304static int
305owner_lockmgr(struct lock_object *lock, struct thread **owner)
306{
307
308	panic("lockmgr locks do not support owner inquiring");
309}
310#endif
311
312void
313lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
314{
315	int iflags;
316
317	MPASS((flags & ~LK_INIT_MASK) == 0);
318
319	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
320	if ((flags & LK_NODUP) == 0)
321		iflags |= LO_DUPOK;
322	if (flags & LK_NOPROFILE)
323		iflags |= LO_NOPROFILE;
324	if ((flags & LK_NOWITNESS) == 0)
325		iflags |= LO_WITNESS;
326	if (flags & LK_QUIET)
327		iflags |= LO_QUIET;
328	iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
329
330	lk->lk_lock = LK_UNLOCKED;
331	lk->lk_recurse = 0;
332	lk->lk_timo = timo;
333	lk->lk_pri = pri;
334	lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
335	STACK_ZERO(lk);
336}
337
338void
339lockdestroy(struct lock *lk)
340{
341
342	KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
343	KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
344	lock_destroy(&lk->lock_object);
345}
346
347int
348__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
349    const char *wmesg, int pri, int timo, const char *file, int line)
350{
351	GIANT_DECLARE;
352	struct lock_class *class;
353	const char *iwmesg;
354	uintptr_t tid, v, x;
355	u_int op;
356	int error, ipri, itimo, queue, wakeup_swapper;
357#ifdef LOCK_PROFILING
358	uint64_t waittime = 0;
359	int contested = 0;
360#endif
361
362	error = 0;
363	tid = (uintptr_t)curthread;
364	op = (flags & LK_TYPE_MASK);
365	iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
366	ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
367	itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
368
369	MPASS((flags & ~LK_TOTAL_MASK) == 0);
370	KASSERT((op & (op - 1)) == 0,
371	    ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
372	KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
373	    (op != LK_DOWNGRADE && op != LK_RELEASE),
374	    ("%s: Invalid flags in regard of the operation desired @ %s:%d",
375	    __func__, file, line));
376	KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
377	    ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
378	    __func__, file, line));
379
380	class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
381	if (panicstr != NULL) {
382		if (flags & LK_INTERLOCK)
383			class->lc_unlock(ilk);
384		return (0);
385	}
386
387	if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
388		op = LK_EXCLUSIVE;
389
390	wakeup_swapper = 0;
391	switch (op) {
392	case LK_SHARED:
393		if (LK_CAN_WITNESS(flags))
394			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
395			    file, line, ilk);
396		for (;;) {
397			x = lk->lk_lock;
398
399			/*
400			 * If no other thread has an exclusive lock, or
401			 * no exclusive waiter is present, bump the count of
402			 * sharers.  Since we have to preserve the state of
403			 * waiters, if we fail to acquire the shared lock
404			 * loop back and retry.
405			 */
406			if (LK_CAN_SHARE(x)) {
407				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
408				    x + LK_ONE_SHARER))
409					break;
410				continue;
411			}
412			lock_profile_obtain_lock_failed(&lk->lock_object,
413			    &contested, &waittime);
414
415			/*
416			 * If the lock is already held by curthread in
417			 * exclusive way avoid a deadlock.
418			 */
419			if (LK_HOLDER(x) == tid) {
420				LOCK_LOG2(lk,
421				    "%s: %p already held in exclusive mode",
422				    __func__, lk);
423				error = EDEADLK;
424				break;
425			}
426
427			/*
428			 * If the lock is expected to not sleep just give up
429			 * and return.
430			 */
431			if (LK_TRYOP(flags)) {
432				LOCK_LOG2(lk, "%s: %p fails the try operation",
433				    __func__, lk);
434				error = EBUSY;
435				break;
436			}
437
438			/*
439			 * Acquire the sleepqueue chain lock because we
440			 * probabilly will need to manipulate waiters flags.
441			 */
442			sleepq_lock(&lk->lock_object);
443			x = lk->lk_lock;
444
445			/*
446			 * if the lock can be acquired in shared mode, try
447			 * again.
448			 */
449			if (LK_CAN_SHARE(x)) {
450				sleepq_release(&lk->lock_object);
451				continue;
452			}
453
454			/*
455			 * Try to set the LK_SHARED_WAITERS flag.  If we fail,
456			 * loop back and retry.
457			 */
458			if ((x & LK_SHARED_WAITERS) == 0) {
459				if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
460				    x | LK_SHARED_WAITERS)) {
461					sleepq_release(&lk->lock_object);
462					continue;
463				}
464				LOCK_LOG2(lk, "%s: %p set shared waiters flag",
465				    __func__, lk);
466			}
467
468			/*
469			 * As far as we have been unable to acquire the
470			 * shared lock and the shared waiters flag is set,
471			 * we will sleep.
472			 */
473			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
474			    SQ_SHARED_QUEUE);
475			flags &= ~LK_INTERLOCK;
476			if (error) {
477				LOCK_LOG3(lk,
478				    "%s: interrupted sleep for %p with %d",
479				    __func__, lk, error);
480				break;
481			}
482			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
483			    __func__, lk);
484		}
485		if (error == 0) {
486			lock_profile_obtain_lock_success(&lk->lock_object,
487			    contested, waittime, file, line);
488			LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
489			    line);
490			WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
491			    line);
492			TD_LOCKS_INC(curthread);
493			TD_SLOCKS_INC(curthread);
494			STACK_SAVE(lk);
495		}
496		break;
497	case LK_UPGRADE:
498		_lockmgr_assert(lk, KA_SLOCKED, file, line);
499		x = lk->lk_lock & LK_ALL_WAITERS;
500
501		/*
502		 * Try to switch from one shared lock to an exclusive one.
503		 * We need to preserve waiters flags during the operation.
504		 */
505		if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
506		    tid | x)) {
507			LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
508			    line);
509			WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
510			    LK_TRYWIT(flags), file, line);
511			TD_SLOCKS_DEC(curthread);
512			break;
513		}
514
515		/*
516		 * We have been unable to succeed in upgrading, so just
517		 * give up the shared lock.
518		 */
519		wakeup_swapper |= wakeupshlk(lk, file, line);
520
521		/* FALLTHROUGH */
522	case LK_EXCLUSIVE:
523		if (LK_CAN_WITNESS(flags))
524			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
525			    LOP_EXCLUSIVE, file, line, ilk);
526
527		/*
528		 * If curthread already holds the lock and this one is
529		 * allowed to recurse, simply recurse on it.
530		 */
531		if (lockmgr_xlocked(lk)) {
532			if ((flags & LK_CANRECURSE) == 0 &&
533			    (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
534
535				/*
536				 * If the lock is expected to not panic just
537				 * give up and return.
538				 */
539				if (LK_TRYOP(flags)) {
540					LOCK_LOG2(lk,
541					    "%s: %p fails the try operation",
542					    __func__, lk);
543					error = EBUSY;
544					break;
545				}
546				if (flags & LK_INTERLOCK)
547					class->lc_unlock(ilk);
548		panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
549				    __func__, iwmesg, file, line);
550			}
551			lk->lk_recurse++;
552			LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
553			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
554			    lk->lk_recurse, file, line);
555			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
556			    LK_TRYWIT(flags), file, line);
557			TD_LOCKS_INC(curthread);
558			break;
559		}
560
561		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
562		    tid)) {
563			lock_profile_obtain_lock_failed(&lk->lock_object,
564			    &contested, &waittime);
565
566			/*
567			 * If the lock is expected to not sleep just give up
568			 * and return.
569			 */
570			if (LK_TRYOP(flags)) {
571				LOCK_LOG2(lk, "%s: %p fails the try operation",
572				    __func__, lk);
573				error = EBUSY;
574				break;
575			}
576
577			/*
578			 * Acquire the sleepqueue chain lock because we
579			 * probabilly will need to manipulate waiters flags.
580			 */
581			sleepq_lock(&lk->lock_object);
582			x = lk->lk_lock;
583			v = x & LK_ALL_WAITERS;
584
585			/*
586			 * if the lock has been released while we spun on
587			 * the sleepqueue chain lock just try again.
588			 */
589			if (x == LK_UNLOCKED) {
590				sleepq_release(&lk->lock_object);
591				continue;
592			}
593
594			/*
595			 * The lock can be in the state where there is a
596			 * pending queue of waiters, but still no owner.
597			 * This happens when the lock is contested and an
598			 * owner is going to claim the lock.
599			 * If curthread is the one successfully acquiring it
600			 * claim lock ownership and return, preserving waiters
601			 * flags.
602			 */
603			if (x == (LK_UNLOCKED | v)) {
604				if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
605				    tid | v)) {
606					sleepq_release(&lk->lock_object);
607					LOCK_LOG2(lk,
608					    "%s: %p claimed by a new writer",
609					    __func__, lk);
610					break;
611				}
612				sleepq_release(&lk->lock_object);
613				continue;
614			}
615
616			/*
617			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
618			 * fail, loop back and retry.
619			 */
620			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
621				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
622				    x | LK_EXCLUSIVE_WAITERS)) {
623					sleepq_release(&lk->lock_object);
624					continue;
625				}
626				LOCK_LOG2(lk, "%s: %p set excl waiters flag",
627				    __func__, lk);
628			}
629
630			/*
631			 * As far as we have been unable to acquire the
632			 * exclusive lock and the exclusive waiters flag
633			 * is set, we will sleep.
634			 */
635			error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
636			    SQ_EXCLUSIVE_QUEUE);
637			flags &= ~LK_INTERLOCK;
638			if (error) {
639				LOCK_LOG3(lk,
640				    "%s: interrupted sleep for %p with %d",
641				    __func__, lk, error);
642				break;
643			}
644			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
645			    __func__, lk);
646		}
647		if (error == 0) {
648			lock_profile_obtain_lock_success(&lk->lock_object,
649			    contested, waittime, file, line);
650			LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
651			    lk->lk_recurse, file, line);
652			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
653			    LK_TRYWIT(flags), file, line);
654			TD_LOCKS_INC(curthread);
655			STACK_SAVE(lk);
656		}
657		break;
658	case LK_DOWNGRADE:
659		_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
660		LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
661		WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
662		TD_SLOCKS_INC(curthread);
663
664		/*
665		 * In order to preserve waiters flags, just spin.
666		 */
667		for (;;) {
668			x = lk->lk_lock & LK_ALL_WAITERS;
669			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
670			    LK_SHARERS_LOCK(1) | x))
671				break;
672			cpu_spinwait();
673		}
674		break;
675	case LK_RELEASE:
676		_lockmgr_assert(lk, KA_LOCKED, file, line);
677		x = lk->lk_lock;
678
679		if ((x & LK_SHARE) == 0) {
680
681			/*
682			 * As first option, treact the lock as if it has not
683			 * any waiter.
684			 * Fix-up the tid var if the lock has been disowned.
685			 */
686			if (LK_HOLDER(x) == LK_KERNPROC)
687				tid = LK_KERNPROC;
688			else {
689				WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
690				    file, line);
691				TD_LOCKS_DEC(curthread);
692			}
693			LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
694			    lk->lk_recurse, file, line);
695
696			/*
697			 * The lock is held in exclusive mode.
698			 * If the lock is recursed also, then unrecurse it.
699			 */
700			if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
701				LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
702				    lk);
703				lk->lk_recurse--;
704				break;
705			}
706			if (tid != LK_KERNPROC)
707				lock_profile_release_lock(&lk->lock_object);
708
709			if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
710			    LK_UNLOCKED))
711				break;
712
713			sleepq_lock(&lk->lock_object);
714			x = lk->lk_lock & LK_ALL_WAITERS;
715			v = LK_UNLOCKED;
716
717			/*
718		 	 * If the lock has exclusive waiters, give them
719			 * preference in order to avoid deadlock with
720			 * shared runners up.
721			 */
722			if (x & LK_EXCLUSIVE_WAITERS) {
723				queue = SQ_EXCLUSIVE_QUEUE;
724				v |= (x & LK_SHARED_WAITERS);
725			} else {
726				MPASS(x == LK_SHARED_WAITERS);
727				queue = SQ_SHARED_QUEUE;
728			}
729
730			LOCK_LOG3(lk,
731			    "%s: %p waking up threads on the %s queue",
732			    __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
733			    "exclusive");
734			atomic_store_rel_ptr(&lk->lk_lock, v);
735			wakeup_swapper = sleepq_broadcast(&lk->lock_object,
736			    SLEEPQ_LK, 0, queue);
737			sleepq_release(&lk->lock_object);
738			break;
739		} else
740			wakeup_swapper = wakeupshlk(lk, file, line);
741		break;
742	case LK_DRAIN:
743		if (LK_CAN_WITNESS(flags))
744			WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
745			    LOP_EXCLUSIVE, file, line, ilk);
746
747		/*
748		 * Trying to drain a lock we already own will result in a
749		 * deadlock.
750		 */
751		if (lockmgr_xlocked(lk)) {
752			if (flags & LK_INTERLOCK)
753				class->lc_unlock(ilk);
754			panic("%s: draining %s with the lock held @ %s:%d\n",
755			    __func__, iwmesg, file, line);
756		}
757
758		while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
759			lock_profile_obtain_lock_failed(&lk->lock_object,
760			    &contested, &waittime);
761
762			/*
763			 * If the lock is expected to not sleep just give up
764			 * and return.
765			 */
766			if (LK_TRYOP(flags)) {
767				LOCK_LOG2(lk, "%s: %p fails the try operation",
768				    __func__, lk);
769				error = EBUSY;
770				break;
771			}
772
773			/*
774			 * Acquire the sleepqueue chain lock because we
775			 * probabilly will need to manipulate waiters flags.
776			 */
777			sleepq_lock(&lk->lock_object);
778			x = lk->lk_lock;
779			v = x & LK_ALL_WAITERS;
780
781			/*
782			 * if the lock has been released while we spun on
783			 * the sleepqueue chain lock just try again.
784			 */
785			if (x == LK_UNLOCKED) {
786				sleepq_release(&lk->lock_object);
787				continue;
788			}
789
790			if (x == (LK_UNLOCKED | v)) {
791				v = x;
792				if (v & LK_EXCLUSIVE_WAITERS) {
793					queue = SQ_EXCLUSIVE_QUEUE;
794					v &= ~LK_EXCLUSIVE_WAITERS;
795				} else {
796					MPASS(v & LK_SHARED_WAITERS);
797					queue = SQ_SHARED_QUEUE;
798					v &= ~LK_SHARED_WAITERS;
799				}
800				if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
801					sleepq_release(&lk->lock_object);
802					continue;
803				}
804				LOCK_LOG3(lk,
805				"%s: %p waking up all threads on the %s queue",
806				    __func__, lk, queue == SQ_SHARED_QUEUE ?
807				    "shared" : "exclusive");
808				wakeup_swapper |= sleepq_broadcast(
809				    &lk->lock_object, SLEEPQ_LK, 0, queue);
810
811				/*
812				 * If shared waiters have been woken up we need
813				 * to wait for one of them to acquire the lock
814				 * before to set the exclusive waiters in
815				 * order to avoid a deadlock.
816				 */
817				if (queue == SQ_SHARED_QUEUE) {
818					for (v = lk->lk_lock;
819					    (v & LK_SHARE) && !LK_SHARERS(v);
820					    v = lk->lk_lock)
821						cpu_spinwait();
822				}
823			}
824
825			/*
826			 * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
827			 * fail, loop back and retry.
828			 */
829			if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
830				if (!atomic_cmpset_ptr(&lk->lk_lock, x,
831				    x | LK_EXCLUSIVE_WAITERS)) {
832					sleepq_release(&lk->lock_object);
833					continue;
834				}
835				LOCK_LOG2(lk, "%s: %p set drain waiters flag",
836				    __func__, lk);
837			}
838
839			/*
840			 * As far as we have been unable to acquire the
841			 * exclusive lock and the exclusive waiters flag
842			 * is set, we will sleep.
843			 */
844			if (flags & LK_INTERLOCK) {
845				class->lc_unlock(ilk);
846				flags &= ~LK_INTERLOCK;
847			}
848			GIANT_SAVE();
849			sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
850			    SQ_EXCLUSIVE_QUEUE);
851			sleepq_wait(&lk->lock_object, ipri & PRIMASK);
852			GIANT_RESTORE();
853			LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
854			    __func__, lk);
855		}
856
857		if (error == 0) {
858			lock_profile_obtain_lock_success(&lk->lock_object,
859			    contested, waittime, file, line);
860			LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
861			    lk->lk_recurse, file, line);
862			WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
863			    LK_TRYWIT(flags), file, line);
864			TD_LOCKS_INC(curthread);
865			STACK_SAVE(lk);
866		}
867		break;
868	default:
869		if (flags & LK_INTERLOCK)
870			class->lc_unlock(ilk);
871		panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
872	}
873
874	if (flags & LK_INTERLOCK)
875		class->lc_unlock(ilk);
876	if (wakeup_swapper)
877		kick_proc0();
878
879	return (error);
880}
881
882void
883_lockmgr_disown(struct lock *lk, const char *file, int line)
884{
885	uintptr_t tid, x;
886
887	tid = (uintptr_t)curthread;
888	_lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
889
890	/*
891	 * If the owner is already LK_KERNPROC just skip the whole operation.
892	 */
893	if (LK_HOLDER(lk->lk_lock) != tid)
894		return;
895	lock_profile_release_lock(&lk->lock_object);
896	LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
897	WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
898	TD_LOCKS_DEC(curthread);
899
900	/*
901	 * In order to preserve waiters flags, just spin.
902	 */
903	for (;;) {
904		x = lk->lk_lock & LK_ALL_WAITERS;
905		if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
906		    LK_KERNPROC | x))
907			return;
908		cpu_spinwait();
909	}
910}
911
912void
913lockmgr_printinfo(struct lock *lk)
914{
915	struct thread *td;
916	uintptr_t x;
917
918	if (lk->lk_lock == LK_UNLOCKED)
919		printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
920	else if (lk->lk_lock & LK_SHARE)
921		printf("lock type %s: SHARED (count %ju)\n",
922		    lk->lock_object.lo_name,
923		    (uintmax_t)LK_SHARERS(lk->lk_lock));
924	else {
925		td = lockmgr_xholder(lk);
926		printf("lock type %s: EXCL by thread %p (pid %d)\n",
927		    lk->lock_object.lo_name, td, td->td_proc->p_pid);
928	}
929
930	x = lk->lk_lock;
931	if (x & LK_EXCLUSIVE_WAITERS)
932		printf(" with exclusive waiters pending\n");
933	if (x & LK_SHARED_WAITERS)
934		printf(" with shared waiters pending\n");
935
936	STACK_PRINT(lk);
937}
938
939int
940lockstatus(struct lock *lk)
941{
942	uintptr_t v, x;
943	int ret;
944
945	ret = LK_SHARED;
946	x = lk->lk_lock;
947	v = LK_HOLDER(x);
948
949	if ((x & LK_SHARE) == 0) {
950		if (v == (uintptr_t)curthread || v == LK_KERNPROC)
951			ret = LK_EXCLUSIVE;
952		else
953			ret = LK_EXCLOTHER;
954	} else if (x == LK_UNLOCKED)
955		ret = 0;
956
957	return (ret);
958}
959
960#ifdef INVARIANT_SUPPORT
961#ifndef INVARIANTS
962#undef	_lockmgr_assert
963#endif
964
965void
966_lockmgr_assert(struct lock *lk, int what, const char *file, int line)
967{
968	int slocked = 0;
969
970	if (panicstr != NULL)
971		return;
972	switch (what) {
973	case KA_SLOCKED:
974	case KA_SLOCKED | KA_NOTRECURSED:
975	case KA_SLOCKED | KA_RECURSED:
976		slocked = 1;
977	case KA_LOCKED:
978	case KA_LOCKED | KA_NOTRECURSED:
979	case KA_LOCKED | KA_RECURSED:
980#ifdef WITNESS
981
982		/*
983		 * We cannot trust WITNESS if the lock is held in exclusive
984		 * mode and a call to lockmgr_disown() happened.
985		 * Workaround this skipping the check if the lock is held in
986		 * exclusive mode even for the KA_LOCKED case.
987		 */
988		if (slocked || (lk->lk_lock & LK_SHARE)) {
989			witness_assert(&lk->lock_object, what, file, line);
990			break;
991		}
992#endif
993		if (lk->lk_lock == LK_UNLOCKED ||
994		    ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
995		    (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
996			panic("Lock %s not %slocked @ %s:%d\n",
997			    lk->lock_object.lo_name, slocked ? "share" : "",
998			    file, line);
999
1000		if ((lk->lk_lock & LK_SHARE) == 0) {
1001			if (lockmgr_recursed(lk)) {
1002				if (what & KA_NOTRECURSED)
1003					panic("Lock %s recursed @ %s:%d\n",
1004					    lk->lock_object.lo_name, file,
1005					    line);
1006			} else if (what & KA_RECURSED)
1007				panic("Lock %s not recursed @ %s:%d\n",
1008				    lk->lock_object.lo_name, file, line);
1009		}
1010		break;
1011	case KA_XLOCKED:
1012	case KA_XLOCKED | KA_NOTRECURSED:
1013	case KA_XLOCKED | KA_RECURSED:
1014		if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1015			panic("Lock %s not exclusively locked @ %s:%d\n",
1016			    lk->lock_object.lo_name, file, line);
1017		if (lockmgr_recursed(lk)) {
1018			if (what & KA_NOTRECURSED)
1019				panic("Lock %s recursed @ %s:%d\n",
1020				    lk->lock_object.lo_name, file, line);
1021		} else if (what & KA_RECURSED)
1022			panic("Lock %s not recursed @ %s:%d\n",
1023			    lk->lock_object.lo_name, file, line);
1024		break;
1025	case KA_UNLOCKED:
1026		if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1027			panic("Lock %s exclusively locked @ %s:%d\n",
1028			    lk->lock_object.lo_name, file, line);
1029		break;
1030	default:
1031		panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1032		    line);
1033	}
1034}
1035#endif
1036
1037#ifdef DDB
1038int
1039lockmgr_chain(struct thread *td, struct thread **ownerp)
1040{
1041	struct lock *lk;
1042
1043	lk = td->td_wchan;
1044
1045	if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1046		return (0);
1047	db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1048	if (lk->lk_lock & LK_SHARE)
1049		db_printf("SHARED (count %ju)\n",
1050		    (uintmax_t)LK_SHARERS(lk->lk_lock));
1051	else
1052		db_printf("EXCL\n");
1053	*ownerp = lockmgr_xholder(lk);
1054
1055	return (1);
1056}
1057
1058static void
1059db_show_lockmgr(struct lock_object *lock)
1060{
1061	struct thread *td;
1062	struct lock *lk;
1063
1064	lk = (struct lock *)lock;
1065
1066	db_printf(" state: ");
1067	if (lk->lk_lock == LK_UNLOCKED)
1068		db_printf("UNLOCKED\n");
1069	else if (lk->lk_lock & LK_SHARE)
1070		db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1071	else {
1072		td = lockmgr_xholder(lk);
1073		if (td == (struct thread *)LK_KERNPROC)
1074			db_printf("XLOCK: LK_KERNPROC\n");
1075		else
1076			db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1077			    td->td_tid, td->td_proc->p_pid,
1078			    td->td_proc->p_comm);
1079		if (lockmgr_recursed(lk))
1080			db_printf(" recursed: %d\n", lk->lk_recurse);
1081	}
1082	db_printf(" waiters: ");
1083	switch (lk->lk_lock & LK_ALL_WAITERS) {
1084	case LK_SHARED_WAITERS:
1085		db_printf("shared\n");
1086		break;
1087	case LK_EXCLUSIVE_WAITERS:
1088		db_printf("exclusive\n");
1089		break;
1090	case LK_ALL_WAITERS:
1091		db_printf("shared and exclusive\n");
1092		break;
1093	default:
1094		db_printf("none\n");
1095	}
1096}
1097#endif
1098