1/*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice(s), this list of conditions and the following disclaimer as
11 *    the first lines of this file unmodified other than the possible
12 *    addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice(s), this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30/*
31 * Shared/exclusive locks.  This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39#include "opt_ddb.h"
40#include "opt_hwpmc_hooks.h"
41#include "opt_no_adaptive_sx.h"
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_sx.c 303953 2016-08-11 09:28:49Z mjg $");
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kdb.h>
49#include <sys/kernel.h>
50#include <sys/ktr.h>
51#include <sys/lock.h>
52#include <sys/mutex.h>
53#include <sys/proc.h>
54#include <sys/sched.h>
55#include <sys/sleepqueue.h>
56#include <sys/sx.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
61#include <machine/cpu.h>
62#endif
63
64#ifdef DDB
65#include <ddb/ddb.h>
66#endif
67
68#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
69#define	ADAPTIVE_SX
70#endif
71
72CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
73
74#ifdef HWPMC_HOOKS
75#include <sys/pmckern.h>
76PMC_SOFT_DECLARE( , , lock, failed);
77#endif
78
79/* Handy macros for sleep queues. */
80#define	SQ_EXCLUSIVE_QUEUE	0
81#define	SQ_SHARED_QUEUE		1
82
83/*
84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
85 * drop Giant anytime we have to sleep or if we adaptively spin.
86 */
87#define	GIANT_DECLARE							\
88	int _giantcnt = 0;						\
89	WITNESS_SAVE_DECL(Giant)					\
90
91#define	GIANT_SAVE() do {						\
92	if (mtx_owned(&Giant)) {					\
93		WITNESS_SAVE(&Giant.lock_object, Giant);		\
94		while (mtx_owned(&Giant)) {				\
95			_giantcnt++;					\
96			mtx_unlock(&Giant);				\
97		}							\
98	}								\
99} while (0)
100
101#define GIANT_RESTORE() do {						\
102	if (_giantcnt > 0) {						\
103		mtx_assert(&Giant, MA_NOTOWNED);			\
104		while (_giantcnt--)					\
105			mtx_lock(&Giant);				\
106		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
107	}								\
108} while (0)
109
110/*
111 * Returns true if an exclusive lock is recursed.  It assumes
112 * curthread currently has an exclusive lock.
113 */
114#define	sx_recursed(sx)		((sx)->sx_recurse != 0)
115
116static void	assert_sx(const struct lock_object *lock, int what);
117#ifdef DDB
118static void	db_show_sx(const struct lock_object *lock);
119#endif
120static void	lock_sx(struct lock_object *lock, uintptr_t how);
121#ifdef KDTRACE_HOOKS
122static int	owner_sx(const struct lock_object *lock, struct thread **owner);
123#endif
124static uintptr_t unlock_sx(struct lock_object *lock);
125
126struct lock_class lock_class_sx = {
127	.lc_name = "sx",
128	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
129	.lc_assert = assert_sx,
130#ifdef DDB
131	.lc_ddb_show = db_show_sx,
132#endif
133	.lc_lock = lock_sx,
134	.lc_unlock = unlock_sx,
135#ifdef KDTRACE_HOOKS
136	.lc_owner = owner_sx,
137#endif
138};
139
140#ifndef INVARIANTS
141#define	_sx_assert(sx, what, file, line)
142#endif
143
144#ifdef ADAPTIVE_SX
145static u_int asx_retries = 10;
146static u_int asx_loops = 10000;
147static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
148SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
149SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
150
151static struct lock_delay_config sx_delay = {
152	.initial	= 1000,
153	.step           = 500,
154	.min		= 100,
155	.max		= 5000,
156};
157
158SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial,
159    0, "");
160SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step,
161    0, "");
162SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min,
163    0, "");
164SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
165    0, "");
166
167static void
168sx_delay_sysinit(void *dummy)
169{
170
171	sx_delay.initial = mp_ncpus * 25;
172	sx_delay.step = (mp_ncpus * 25) / 2;
173	sx_delay.min = mp_ncpus * 5;
174	sx_delay.max = mp_ncpus * 25 * 10;
175}
176LOCK_DELAY_SYSINIT(sx_delay_sysinit);
177#endif
178
179void
180assert_sx(const struct lock_object *lock, int what)
181{
182
183	sx_assert((const struct sx *)lock, what);
184}
185
186void
187lock_sx(struct lock_object *lock, uintptr_t how)
188{
189	struct sx *sx;
190
191	sx = (struct sx *)lock;
192	if (how)
193		sx_slock(sx);
194	else
195		sx_xlock(sx);
196}
197
198uintptr_t
199unlock_sx(struct lock_object *lock)
200{
201	struct sx *sx;
202
203	sx = (struct sx *)lock;
204	sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
205	if (sx_xlocked(sx)) {
206		sx_xunlock(sx);
207		return (0);
208	} else {
209		sx_sunlock(sx);
210		return (1);
211	}
212}
213
214#ifdef KDTRACE_HOOKS
215int
216owner_sx(const struct lock_object *lock, struct thread **owner)
217{
218        const struct sx *sx = (const struct sx *)lock;
219	uintptr_t x = sx->sx_lock;
220
221        *owner = (struct thread *)SX_OWNER(x);
222        return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
223	    (*owner != NULL));
224}
225#endif
226
227void
228sx_sysinit(void *arg)
229{
230	struct sx_args *sargs = arg;
231
232	sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
233}
234
235void
236sx_init_flags(struct sx *sx, const char *description, int opts)
237{
238	int flags;
239
240	MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
241	    SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
242	ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
243	    ("%s: sx_lock not aligned for %s: %p", __func__, description,
244	    &sx->sx_lock));
245
246	flags = LO_SLEEPABLE | LO_UPGRADABLE;
247	if (opts & SX_DUPOK)
248		flags |= LO_DUPOK;
249	if (opts & SX_NOPROFILE)
250		flags |= LO_NOPROFILE;
251	if (!(opts & SX_NOWITNESS))
252		flags |= LO_WITNESS;
253	if (opts & SX_RECURSE)
254		flags |= LO_RECURSABLE;
255	if (opts & SX_QUIET)
256		flags |= LO_QUIET;
257	if (opts & SX_NEW)
258		flags |= LO_NEW;
259
260	flags |= opts & SX_NOADAPTIVE;
261	lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
262	sx->sx_lock = SX_LOCK_UNLOCKED;
263	sx->sx_recurse = 0;
264}
265
266void
267sx_destroy(struct sx *sx)
268{
269
270	KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
271	KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
272	sx->sx_lock = SX_LOCK_DESTROYED;
273	lock_destroy(&sx->lock_object);
274}
275
276int
277_sx_slock(struct sx *sx, int opts, const char *file, int line)
278{
279	int error = 0;
280
281	if (SCHEDULER_STOPPED())
282		return (0);
283	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
284	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
285	    curthread, sx->lock_object.lo_name, file, line));
286	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
287	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
288	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
289	error = __sx_slock(sx, opts, file, line);
290	if (!error) {
291		LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
292		WITNESS_LOCK(&sx->lock_object, 0, file, line);
293		TD_LOCKS_INC(curthread);
294	}
295
296	return (error);
297}
298
299int
300sx_try_slock_(struct sx *sx, const char *file, int line)
301{
302	uintptr_t x;
303
304	if (SCHEDULER_STOPPED())
305		return (1);
306
307	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
308	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
309	    curthread, sx->lock_object.lo_name, file, line));
310
311	for (;;) {
312		x = sx->sx_lock;
313		KASSERT(x != SX_LOCK_DESTROYED,
314		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
315		if (!(x & SX_LOCK_SHARED))
316			break;
317		if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
318			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
319			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
320			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
321			    sx, 0, 0, file, line, LOCKSTAT_READER);
322			TD_LOCKS_INC(curthread);
323			return (1);
324		}
325	}
326
327	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
328	return (0);
329}
330
331int
332_sx_xlock(struct sx *sx, int opts, const char *file, int line)
333{
334	int error = 0;
335
336	if (SCHEDULER_STOPPED())
337		return (0);
338	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
339	    ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
340	    curthread, sx->lock_object.lo_name, file, line));
341	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
342	    ("sx_xlock() of destroyed sx @ %s:%d", file, line));
343	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
344	    line, NULL);
345	error = __sx_xlock(sx, curthread, opts, file, line);
346	if (!error) {
347		LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
348		    file, line);
349		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
350		TD_LOCKS_INC(curthread);
351	}
352
353	return (error);
354}
355
356int
357sx_try_xlock_(struct sx *sx, const char *file, int line)
358{
359	int rval;
360
361	if (SCHEDULER_STOPPED())
362		return (1);
363
364	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
365	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
366	    curthread, sx->lock_object.lo_name, file, line));
367	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
368	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
369
370	if (sx_xlocked(sx) &&
371	    (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
372		sx->sx_recurse++;
373		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
374		rval = 1;
375	} else
376		rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
377		    (uintptr_t)curthread);
378	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
379	if (rval) {
380		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
381		    file, line);
382		if (!sx_recursed(sx))
383			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
384			    sx, 0, 0, file, line, LOCKSTAT_WRITER);
385		TD_LOCKS_INC(curthread);
386	}
387
388	return (rval);
389}
390
391void
392_sx_sunlock(struct sx *sx, const char *file, int line)
393{
394
395	if (SCHEDULER_STOPPED())
396		return;
397	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
398	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
399	_sx_assert(sx, SA_SLOCKED, file, line);
400	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
401	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
402	__sx_sunlock(sx, file, line);
403	TD_LOCKS_DEC(curthread);
404}
405
406void
407_sx_xunlock(struct sx *sx, const char *file, int line)
408{
409
410	if (SCHEDULER_STOPPED())
411		return;
412	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
413	    ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
414	_sx_assert(sx, SA_XLOCKED, file, line);
415	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
416	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
417	    line);
418	__sx_xunlock(sx, curthread, file, line);
419	TD_LOCKS_DEC(curthread);
420}
421
422/*
423 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
424 * This will only succeed if this thread holds a single shared lock.
425 * Return 1 if if the upgrade succeed, 0 otherwise.
426 */
427int
428sx_try_upgrade_(struct sx *sx, const char *file, int line)
429{
430	uintptr_t x;
431	int success;
432
433	if (SCHEDULER_STOPPED())
434		return (1);
435
436	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
437	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
438	_sx_assert(sx, SA_SLOCKED, file, line);
439
440	/*
441	 * Try to switch from one shared lock to an exclusive lock.  We need
442	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
443	 * we will wake up the exclusive waiters when we drop the lock.
444	 */
445	x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
446	success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
447	    (uintptr_t)curthread | x);
448	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
449	if (success) {
450		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
451		    file, line);
452		LOCKSTAT_RECORD0(sx__upgrade, sx);
453	}
454	return (success);
455}
456
457/*
458 * Downgrade an unrecursed exclusive lock into a single shared lock.
459 */
460void
461sx_downgrade_(struct sx *sx, const char *file, int line)
462{
463	uintptr_t x;
464	int wakeup_swapper;
465
466	if (SCHEDULER_STOPPED())
467		return;
468
469	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
470	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
471	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
472#ifndef INVARIANTS
473	if (sx_recursed(sx))
474		panic("downgrade of a recursed lock");
475#endif
476
477	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
478
479	/*
480	 * Try to switch from an exclusive lock with no shared waiters
481	 * to one sharer with no shared waiters.  If there are
482	 * exclusive waiters, we don't need to lock the sleep queue so
483	 * long as we preserve the flag.  We do one quick try and if
484	 * that fails we grab the sleepq lock to keep the flags from
485	 * changing and do it the slow way.
486	 *
487	 * We have to lock the sleep queue if there are shared waiters
488	 * so we can wake them up.
489	 */
490	x = sx->sx_lock;
491	if (!(x & SX_LOCK_SHARED_WAITERS) &&
492	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
493	    (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
494		LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
495		return;
496	}
497
498	/*
499	 * Lock the sleep queue so we can read the waiters bits
500	 * without any races and wakeup any shared waiters.
501	 */
502	sleepq_lock(&sx->lock_object);
503
504	/*
505	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
506	 * shared lock.  If there are any shared waiters, wake them up.
507	 */
508	wakeup_swapper = 0;
509	x = sx->sx_lock;
510	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
511	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
512	if (x & SX_LOCK_SHARED_WAITERS)
513		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
514		    0, SQ_SHARED_QUEUE);
515	sleepq_release(&sx->lock_object);
516
517	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
518	LOCKSTAT_RECORD0(sx__downgrade, sx);
519
520	if (wakeup_swapper)
521		kick_proc0();
522}
523
524/*
525 * This function represents the so-called 'hard case' for sx_xlock
526 * operation.  All 'easy case' failures are redirected to this.  Note
527 * that ideally this would be a static function, but it needs to be
528 * accessible from at least sx.h.
529 */
530int
531_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
532    int line)
533{
534	GIANT_DECLARE;
535#ifdef ADAPTIVE_SX
536	volatile struct thread *owner;
537	u_int i, spintries = 0;
538#endif
539	uintptr_t x;
540#ifdef LOCK_PROFILING
541	uint64_t waittime = 0;
542	int contested = 0;
543#endif
544	int error = 0;
545#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
546	struct lock_delay_arg lda;
547#endif
548#ifdef	KDTRACE_HOOKS
549	uintptr_t state;
550	u_int sleep_cnt = 0;
551	int64_t sleep_time = 0;
552	int64_t all_time = 0;
553#endif
554
555	if (SCHEDULER_STOPPED())
556		return (0);
557
558#if defined(ADAPTIVE_SX)
559	lock_delay_arg_init(&lda, &sx_delay);
560#elif defined(KDTRACE_HOOKS)
561	lock_delay_arg_init(&lda, NULL);
562#endif
563
564	/* If we already hold an exclusive lock, then recurse. */
565	if (sx_xlocked(sx)) {
566		KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
567	    ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
568		    sx->lock_object.lo_name, file, line));
569		sx->sx_recurse++;
570		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
571		if (LOCK_LOG_TEST(&sx->lock_object, 0))
572			CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
573		return (0);
574	}
575
576	if (LOCK_LOG_TEST(&sx->lock_object, 0))
577		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
578		    sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
579
580#ifdef KDTRACE_HOOKS
581	all_time -= lockstat_nsecs(&sx->lock_object);
582	state = sx->sx_lock;
583#endif
584	for (;;) {
585		if (sx->sx_lock == SX_LOCK_UNLOCKED &&
586		    atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
587			break;
588#ifdef KDTRACE_HOOKS
589		lda.spin_cnt++;
590#endif
591#ifdef HWPMC_HOOKS
592		PMC_SOFT_CALL( , , lock, failed);
593#endif
594		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
595		    &waittime);
596#ifdef ADAPTIVE_SX
597		/*
598		 * If the lock is write locked and the owner is
599		 * running on another CPU, spin until the owner stops
600		 * running or the state of the lock changes.
601		 */
602		x = sx->sx_lock;
603		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
604			if ((x & SX_LOCK_SHARED) == 0) {
605				x = SX_OWNER(x);
606				owner = (struct thread *)x;
607				if (TD_IS_RUNNING(owner)) {
608					if (LOCK_LOG_TEST(&sx->lock_object, 0))
609						CTR3(KTR_LOCK,
610					    "%s: spinning on %p held by %p",
611						    __func__, sx, owner);
612					KTR_STATE1(KTR_SCHED, "thread",
613					    sched_tdname(curthread), "spinning",
614					    "lockname:\"%s\"",
615					    sx->lock_object.lo_name);
616					GIANT_SAVE();
617					while (SX_OWNER(sx->sx_lock) == x &&
618					    TD_IS_RUNNING(owner))
619						lock_delay(&lda);
620					KTR_STATE0(KTR_SCHED, "thread",
621					    sched_tdname(curthread), "running");
622					continue;
623				}
624			} else if (SX_SHARERS(x) && spintries < asx_retries) {
625				KTR_STATE1(KTR_SCHED, "thread",
626				    sched_tdname(curthread), "spinning",
627				    "lockname:\"%s\"", sx->lock_object.lo_name);
628				GIANT_SAVE();
629				spintries++;
630				for (i = 0; i < asx_loops; i++) {
631					if (LOCK_LOG_TEST(&sx->lock_object, 0))
632						CTR4(KTR_LOCK,
633				    "%s: shared spinning on %p with %u and %u",
634						    __func__, sx, spintries, i);
635					x = sx->sx_lock;
636					if ((x & SX_LOCK_SHARED) == 0 ||
637					    SX_SHARERS(x) == 0)
638						break;
639					cpu_spinwait();
640#ifdef KDTRACE_HOOKS
641					lda.spin_cnt++;
642#endif
643				}
644				KTR_STATE0(KTR_SCHED, "thread",
645				    sched_tdname(curthread), "running");
646				if (i != asx_loops)
647					continue;
648			}
649		}
650#endif
651
652		sleepq_lock(&sx->lock_object);
653		x = sx->sx_lock;
654
655		/*
656		 * If the lock was released while spinning on the
657		 * sleep queue chain lock, try again.
658		 */
659		if (x == SX_LOCK_UNLOCKED) {
660			sleepq_release(&sx->lock_object);
661			continue;
662		}
663
664#ifdef ADAPTIVE_SX
665		/*
666		 * The current lock owner might have started executing
667		 * on another CPU (or the lock could have changed
668		 * owners) while we were waiting on the sleep queue
669		 * chain lock.  If so, drop the sleep queue lock and try
670		 * again.
671		 */
672		if (!(x & SX_LOCK_SHARED) &&
673		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
674			owner = (struct thread *)SX_OWNER(x);
675			if (TD_IS_RUNNING(owner)) {
676				sleepq_release(&sx->lock_object);
677				continue;
678			}
679		}
680#endif
681
682		/*
683		 * If an exclusive lock was released with both shared
684		 * and exclusive waiters and a shared waiter hasn't
685		 * woken up and acquired the lock yet, sx_lock will be
686		 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
687		 * If we see that value, try to acquire it once.  Note
688		 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
689		 * as there are other exclusive waiters still.  If we
690		 * fail, restart the loop.
691		 */
692		if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
693			if (atomic_cmpset_acq_ptr(&sx->sx_lock,
694			    SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
695			    tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
696				sleepq_release(&sx->lock_object);
697				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
698				    __func__, sx);
699				break;
700			}
701			sleepq_release(&sx->lock_object);
702			continue;
703		}
704
705		/*
706		 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
707		 * than loop back and retry.
708		 */
709		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
710			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
711			    x | SX_LOCK_EXCLUSIVE_WAITERS)) {
712				sleepq_release(&sx->lock_object);
713				continue;
714			}
715			if (LOCK_LOG_TEST(&sx->lock_object, 0))
716				CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
717				    __func__, sx);
718		}
719
720		/*
721		 * Since we have been unable to acquire the exclusive
722		 * lock and the exclusive waiters flag is set, we have
723		 * to sleep.
724		 */
725		if (LOCK_LOG_TEST(&sx->lock_object, 0))
726			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
727			    __func__, sx);
728
729#ifdef KDTRACE_HOOKS
730		sleep_time -= lockstat_nsecs(&sx->lock_object);
731#endif
732		GIANT_SAVE();
733		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
734		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
735		    SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
736		if (!(opts & SX_INTERRUPTIBLE))
737			sleepq_wait(&sx->lock_object, 0);
738		else
739			error = sleepq_wait_sig(&sx->lock_object, 0);
740#ifdef KDTRACE_HOOKS
741		sleep_time += lockstat_nsecs(&sx->lock_object);
742		sleep_cnt++;
743#endif
744		if (error) {
745			if (LOCK_LOG_TEST(&sx->lock_object, 0))
746				CTR2(KTR_LOCK,
747			"%s: interruptible sleep by %p suspended by signal",
748				    __func__, sx);
749			break;
750		}
751		if (LOCK_LOG_TEST(&sx->lock_object, 0))
752			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
753			    __func__, sx);
754	}
755#ifdef KDTRACE_HOOKS
756	all_time += lockstat_nsecs(&sx->lock_object);
757	if (sleep_time)
758		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
759		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
760		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
761	if (lda.spin_cnt > sleep_cnt)
762		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
763		    LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
764		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
765#endif
766	if (!error)
767		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
768		    contested, waittime, file, line, LOCKSTAT_WRITER);
769	GIANT_RESTORE();
770	return (error);
771}
772
773/*
774 * This function represents the so-called 'hard case' for sx_xunlock
775 * operation.  All 'easy case' failures are redirected to this.  Note
776 * that ideally this would be a static function, but it needs to be
777 * accessible from at least sx.h.
778 */
779void
780_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
781{
782	uintptr_t x;
783	int queue, wakeup_swapper;
784
785	if (SCHEDULER_STOPPED())
786		return;
787
788	MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
789
790	/* If the lock is recursed, then unrecurse one level. */
791	if (sx_xlocked(sx) && sx_recursed(sx)) {
792		if ((--sx->sx_recurse) == 0)
793			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
794		if (LOCK_LOG_TEST(&sx->lock_object, 0))
795			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
796		return;
797	}
798	MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
799	    SX_LOCK_EXCLUSIVE_WAITERS));
800	if (LOCK_LOG_TEST(&sx->lock_object, 0))
801		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
802
803	sleepq_lock(&sx->lock_object);
804	x = SX_LOCK_UNLOCKED;
805
806	/*
807	 * The wake up algorithm here is quite simple and probably not
808	 * ideal.  It gives precedence to shared waiters if they are
809	 * present.  For this condition, we have to preserve the
810	 * state of the exclusive waiters flag.
811	 * If interruptible sleeps left the shared queue empty avoid a
812	 * starvation for the threads sleeping on the exclusive queue by giving
813	 * them precedence and cleaning up the shared waiters bit anyway.
814	 */
815	if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
816	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
817		queue = SQ_SHARED_QUEUE;
818		x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
819	} else
820		queue = SQ_EXCLUSIVE_QUEUE;
821
822	/* Wake up all the waiters for the specific queue. */
823	if (LOCK_LOG_TEST(&sx->lock_object, 0))
824		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
825		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
826		    "exclusive");
827	atomic_store_rel_ptr(&sx->sx_lock, x);
828	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
829	    queue);
830	sleepq_release(&sx->lock_object);
831	if (wakeup_swapper)
832		kick_proc0();
833}
834
835/*
836 * This function represents the so-called 'hard case' for sx_slock
837 * operation.  All 'easy case' failures are redirected to this.  Note
838 * that ideally this would be a static function, but it needs to be
839 * accessible from at least sx.h.
840 */
841int
842_sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
843{
844	GIANT_DECLARE;
845#ifdef ADAPTIVE_SX
846	volatile struct thread *owner;
847#endif
848#ifdef LOCK_PROFILING
849	uint64_t waittime = 0;
850	int contested = 0;
851#endif
852	uintptr_t x;
853	int error = 0;
854#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
855	struct lock_delay_arg lda;
856#endif
857#ifdef KDTRACE_HOOKS
858	uintptr_t state;
859	u_int sleep_cnt = 0;
860	int64_t sleep_time = 0;
861	int64_t all_time = 0;
862#endif
863
864	if (SCHEDULER_STOPPED())
865		return (0);
866
867#if defined(ADAPTIVE_SX)
868	lock_delay_arg_init(&lda, &sx_delay);
869#elif defined(KDTRACE_HOOKS)
870	lock_delay_arg_init(&lda, NULL);
871#endif
872#ifdef KDTRACE_HOOKS
873	state = sx->sx_lock;
874	all_time -= lockstat_nsecs(&sx->lock_object);
875#endif
876
877	/*
878	 * As with rwlocks, we don't make any attempt to try to block
879	 * shared locks once there is an exclusive waiter.
880	 */
881	for (;;) {
882#ifdef KDTRACE_HOOKS
883		lda.spin_cnt++;
884#endif
885		x = sx->sx_lock;
886
887		/*
888		 * If no other thread has an exclusive lock then try to bump up
889		 * the count of sharers.  Since we have to preserve the state
890		 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
891		 * shared lock loop back and retry.
892		 */
893		if (x & SX_LOCK_SHARED) {
894			MPASS(!(x & SX_LOCK_SHARED_WAITERS));
895			if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
896			    x + SX_ONE_SHARER)) {
897				if (LOCK_LOG_TEST(&sx->lock_object, 0))
898					CTR4(KTR_LOCK,
899					    "%s: %p succeed %p -> %p", __func__,
900					    sx, (void *)x,
901					    (void *)(x + SX_ONE_SHARER));
902				break;
903			}
904			continue;
905		}
906#ifdef HWPMC_HOOKS
907		PMC_SOFT_CALL( , , lock, failed);
908#endif
909		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
910		    &waittime);
911
912#ifdef ADAPTIVE_SX
913		/*
914		 * If the owner is running on another CPU, spin until
915		 * the owner stops running or the state of the lock
916		 * changes.
917		 */
918		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
919			x = SX_OWNER(x);
920			owner = (struct thread *)x;
921			if (TD_IS_RUNNING(owner)) {
922				if (LOCK_LOG_TEST(&sx->lock_object, 0))
923					CTR3(KTR_LOCK,
924					    "%s: spinning on %p held by %p",
925					    __func__, sx, owner);
926				KTR_STATE1(KTR_SCHED, "thread",
927				    sched_tdname(curthread), "spinning",
928				    "lockname:\"%s\"", sx->lock_object.lo_name);
929				GIANT_SAVE();
930				while (SX_OWNER(sx->sx_lock) == x &&
931				    TD_IS_RUNNING(owner))
932					lock_delay(&lda);
933				KTR_STATE0(KTR_SCHED, "thread",
934				    sched_tdname(curthread), "running");
935				continue;
936			}
937		}
938#endif
939
940		/*
941		 * Some other thread already has an exclusive lock, so
942		 * start the process of blocking.
943		 */
944		sleepq_lock(&sx->lock_object);
945		x = sx->sx_lock;
946
947		/*
948		 * The lock could have been released while we spun.
949		 * In this case loop back and retry.
950		 */
951		if (x & SX_LOCK_SHARED) {
952			sleepq_release(&sx->lock_object);
953			continue;
954		}
955
956#ifdef ADAPTIVE_SX
957		/*
958		 * If the owner is running on another CPU, spin until
959		 * the owner stops running or the state of the lock
960		 * changes.
961		 */
962		if (!(x & SX_LOCK_SHARED) &&
963		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
964			owner = (struct thread *)SX_OWNER(x);
965			if (TD_IS_RUNNING(owner)) {
966				sleepq_release(&sx->lock_object);
967				continue;
968			}
969		}
970#endif
971
972		/*
973		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
974		 * fail to set it drop the sleep queue lock and loop
975		 * back.
976		 */
977		if (!(x & SX_LOCK_SHARED_WAITERS)) {
978			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
979			    x | SX_LOCK_SHARED_WAITERS)) {
980				sleepq_release(&sx->lock_object);
981				continue;
982			}
983			if (LOCK_LOG_TEST(&sx->lock_object, 0))
984				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
985				    __func__, sx);
986		}
987
988		/*
989		 * Since we have been unable to acquire the shared lock,
990		 * we have to sleep.
991		 */
992		if (LOCK_LOG_TEST(&sx->lock_object, 0))
993			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
994			    __func__, sx);
995
996#ifdef KDTRACE_HOOKS
997		sleep_time -= lockstat_nsecs(&sx->lock_object);
998#endif
999		GIANT_SAVE();
1000		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1001		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1002		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1003		if (!(opts & SX_INTERRUPTIBLE))
1004			sleepq_wait(&sx->lock_object, 0);
1005		else
1006			error = sleepq_wait_sig(&sx->lock_object, 0);
1007#ifdef KDTRACE_HOOKS
1008		sleep_time += lockstat_nsecs(&sx->lock_object);
1009		sleep_cnt++;
1010#endif
1011		if (error) {
1012			if (LOCK_LOG_TEST(&sx->lock_object, 0))
1013				CTR2(KTR_LOCK,
1014			"%s: interruptible sleep by %p suspended by signal",
1015				    __func__, sx);
1016			break;
1017		}
1018		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1019			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1020			    __func__, sx);
1021	}
1022#ifdef KDTRACE_HOOKS
1023	all_time += lockstat_nsecs(&sx->lock_object);
1024	if (sleep_time)
1025		LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1026		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1027		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1028	if (lda.spin_cnt > sleep_cnt)
1029		LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1030		    LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1031		    (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1032#endif
1033	if (error == 0)
1034		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1035		    contested, waittime, file, line, LOCKSTAT_READER);
1036	GIANT_RESTORE();
1037	return (error);
1038}
1039
1040/*
1041 * This function represents the so-called 'hard case' for sx_sunlock
1042 * operation.  All 'easy case' failures are redirected to this.  Note
1043 * that ideally this would be a static function, but it needs to be
1044 * accessible from at least sx.h.
1045 */
1046void
1047_sx_sunlock_hard(struct sx *sx, const char *file, int line)
1048{
1049	uintptr_t x;
1050	int wakeup_swapper;
1051
1052	if (SCHEDULER_STOPPED())
1053		return;
1054
1055	for (;;) {
1056		x = sx->sx_lock;
1057
1058		/*
1059		 * We should never have sharers while at least one thread
1060		 * holds a shared lock.
1061		 */
1062		KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
1063		    ("%s: waiting sharers", __func__));
1064
1065		/*
1066		 * See if there is more than one shared lock held.  If
1067		 * so, just drop one and return.
1068		 */
1069		if (SX_SHARERS(x) > 1) {
1070			if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
1071			    x - SX_ONE_SHARER)) {
1072				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1073					CTR4(KTR_LOCK,
1074					    "%s: %p succeeded %p -> %p",
1075					    __func__, sx, (void *)x,
1076					    (void *)(x - SX_ONE_SHARER));
1077				break;
1078			}
1079			continue;
1080		}
1081
1082		/*
1083		 * If there aren't any waiters for an exclusive lock,
1084		 * then try to drop it quickly.
1085		 */
1086		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
1087			MPASS(x == SX_SHARERS_LOCK(1));
1088			if (atomic_cmpset_rel_ptr(&sx->sx_lock,
1089			    SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
1090				if (LOCK_LOG_TEST(&sx->lock_object, 0))
1091					CTR2(KTR_LOCK, "%s: %p last succeeded",
1092					    __func__, sx);
1093				break;
1094			}
1095			continue;
1096		}
1097
1098		/*
1099		 * At this point, there should just be one sharer with
1100		 * exclusive waiters.
1101		 */
1102		MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1103
1104		sleepq_lock(&sx->lock_object);
1105
1106		/*
1107		 * Wake up semantic here is quite simple:
1108		 * Just wake up all the exclusive waiters.
1109		 * Note that the state of the lock could have changed,
1110		 * so if it fails loop back and retry.
1111		 */
1112		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1113		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1114		    SX_LOCK_UNLOCKED)) {
1115			sleepq_release(&sx->lock_object);
1116			continue;
1117		}
1118		if (LOCK_LOG_TEST(&sx->lock_object, 0))
1119			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1120			    "exclusive queue", __func__, sx);
1121		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1122		    0, SQ_EXCLUSIVE_QUEUE);
1123		sleepq_release(&sx->lock_object);
1124		if (wakeup_swapper)
1125			kick_proc0();
1126		break;
1127	}
1128}
1129
1130#ifdef INVARIANT_SUPPORT
1131#ifndef INVARIANTS
1132#undef	_sx_assert
1133#endif
1134
1135/*
1136 * In the non-WITNESS case, sx_assert() can only detect that at least
1137 * *some* thread owns an slock, but it cannot guarantee that *this*
1138 * thread owns an slock.
1139 */
1140void
1141_sx_assert(const struct sx *sx, int what, const char *file, int line)
1142{
1143#ifndef WITNESS
1144	int slocked = 0;
1145#endif
1146
1147	if (panicstr != NULL)
1148		return;
1149	switch (what) {
1150	case SA_SLOCKED:
1151	case SA_SLOCKED | SA_NOTRECURSED:
1152	case SA_SLOCKED | SA_RECURSED:
1153#ifndef WITNESS
1154		slocked = 1;
1155		/* FALLTHROUGH */
1156#endif
1157	case SA_LOCKED:
1158	case SA_LOCKED | SA_NOTRECURSED:
1159	case SA_LOCKED | SA_RECURSED:
1160#ifdef WITNESS
1161		witness_assert(&sx->lock_object, what, file, line);
1162#else
1163		/*
1164		 * If some other thread has an exclusive lock or we
1165		 * have one and are asserting a shared lock, fail.
1166		 * Also, if no one has a lock at all, fail.
1167		 */
1168		if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1169		    (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1170		    sx_xholder(sx) != curthread)))
1171			panic("Lock %s not %slocked @ %s:%d\n",
1172			    sx->lock_object.lo_name, slocked ? "share " : "",
1173			    file, line);
1174
1175		if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1176			if (sx_recursed(sx)) {
1177				if (what & SA_NOTRECURSED)
1178					panic("Lock %s recursed @ %s:%d\n",
1179					    sx->lock_object.lo_name, file,
1180					    line);
1181			} else if (what & SA_RECURSED)
1182				panic("Lock %s not recursed @ %s:%d\n",
1183				    sx->lock_object.lo_name, file, line);
1184		}
1185#endif
1186		break;
1187	case SA_XLOCKED:
1188	case SA_XLOCKED | SA_NOTRECURSED:
1189	case SA_XLOCKED | SA_RECURSED:
1190		if (sx_xholder(sx) != curthread)
1191			panic("Lock %s not exclusively locked @ %s:%d\n",
1192			    sx->lock_object.lo_name, file, line);
1193		if (sx_recursed(sx)) {
1194			if (what & SA_NOTRECURSED)
1195				panic("Lock %s recursed @ %s:%d\n",
1196				    sx->lock_object.lo_name, file, line);
1197		} else if (what & SA_RECURSED)
1198			panic("Lock %s not recursed @ %s:%d\n",
1199			    sx->lock_object.lo_name, file, line);
1200		break;
1201	case SA_UNLOCKED:
1202#ifdef WITNESS
1203		witness_assert(&sx->lock_object, what, file, line);
1204#else
1205		/*
1206		 * If we hold an exclusve lock fail.  We can't
1207		 * reliably check to see if we hold a shared lock or
1208		 * not.
1209		 */
1210		if (sx_xholder(sx) == curthread)
1211			panic("Lock %s exclusively locked @ %s:%d\n",
1212			    sx->lock_object.lo_name, file, line);
1213#endif
1214		break;
1215	default:
1216		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1217		    line);
1218	}
1219}
1220#endif	/* INVARIANT_SUPPORT */
1221
1222#ifdef DDB
1223static void
1224db_show_sx(const struct lock_object *lock)
1225{
1226	struct thread *td;
1227	const struct sx *sx;
1228
1229	sx = (const struct sx *)lock;
1230
1231	db_printf(" state: ");
1232	if (sx->sx_lock == SX_LOCK_UNLOCKED)
1233		db_printf("UNLOCKED\n");
1234	else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1235		db_printf("DESTROYED\n");
1236		return;
1237	} else if (sx->sx_lock & SX_LOCK_SHARED)
1238		db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1239	else {
1240		td = sx_xholder(sx);
1241		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1242		    td->td_tid, td->td_proc->p_pid, td->td_name);
1243		if (sx_recursed(sx))
1244			db_printf(" recursed: %d\n", sx->sx_recurse);
1245	}
1246
1247	db_printf(" waiters: ");
1248	switch(sx->sx_lock &
1249	    (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1250	case SX_LOCK_SHARED_WAITERS:
1251		db_printf("shared\n");
1252		break;
1253	case SX_LOCK_EXCLUSIVE_WAITERS:
1254		db_printf("exclusive\n");
1255		break;
1256	case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1257		db_printf("exclusive and shared\n");
1258		break;
1259	default:
1260		db_printf("none\n");
1261	}
1262}
1263
1264/*
1265 * Check to see if a thread that is blocked on a sleep queue is actually
1266 * blocked on an sx lock.  If so, output some details and return true.
1267 * If the lock has an exclusive owner, return that in *ownerp.
1268 */
1269int
1270sx_chain(struct thread *td, struct thread **ownerp)
1271{
1272	struct sx *sx;
1273
1274	/*
1275	 * Check to see if this thread is blocked on an sx lock.
1276	 * First, we check the lock class.  If that is ok, then we
1277	 * compare the lock name against the wait message.
1278	 */
1279	sx = td->td_wchan;
1280	if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1281	    sx->lock_object.lo_name != td->td_wmesg)
1282		return (0);
1283
1284	/* We think we have an sx lock, so output some details. */
1285	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1286	*ownerp = sx_xholder(sx);
1287	if (sx->sx_lock & SX_LOCK_SHARED)
1288		db_printf("SLOCK (count %ju)\n",
1289		    (uintmax_t)SX_SHARERS(sx->sx_lock));
1290	else
1291		db_printf("XLOCK\n");
1292	return (1);
1293}
1294#endif
1295