Deleted Added
sdiff udiff text old ( 327409 ) new ( 327413 )
full compact
1/*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30/*
31 * Shared/exclusive locks. This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39#include "opt_ddb.h"
40#include "opt_hwpmc_hooks.h"
41#include "opt_no_adaptive_sx.h"
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sx.c 327409 2017-12-31 03:35:34Z mjg $");
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kdb.h>
49#include <sys/kernel.h>
50#include <sys/ktr.h>
51#include <sys/lock.h>
52#include <sys/mutex.h>
53#include <sys/proc.h>
54#include <sys/sched.h>
55#include <sys/sleepqueue.h>
56#include <sys/sx.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59
60#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
61#include <machine/cpu.h>
62#endif
63
64#ifdef DDB
65#include <ddb/ddb.h>
66#endif
67
68#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
69#define ADAPTIVE_SX
70#endif
71
72CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
73
74#ifdef HWPMC_HOOKS
75#include <sys/pmckern.h>
76PMC_SOFT_DECLARE( , , lock, failed);
77#endif
78
79/* Handy macros for sleep queues. */
80#define SQ_EXCLUSIVE_QUEUE 0
81#define SQ_SHARED_QUEUE 1
82
83/*
84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
85 * drop Giant anytime we have to sleep or if we adaptively spin.
86 */
87#define GIANT_DECLARE \
88 int _giantcnt = 0; \
89 WITNESS_SAVE_DECL(Giant) \
90
91#define GIANT_SAVE() do { \
92 if (mtx_owned(&Giant)) { \
93 WITNESS_SAVE(&Giant.lock_object, Giant); \
94 while (mtx_owned(&Giant)) { \
95 _giantcnt++; \
96 mtx_unlock(&Giant); \
97 } \
98 } \
99} while (0)
100
101#define GIANT_RESTORE() do { \
102 if (_giantcnt > 0) { \
103 mtx_assert(&Giant, MA_NOTOWNED); \
104 while (_giantcnt--) \
105 mtx_lock(&Giant); \
106 WITNESS_RESTORE(&Giant.lock_object, Giant); \
107 } \
108} while (0)
109
110/*
111 * Returns true if an exclusive lock is recursed. It assumes
112 * curthread currently has an exclusive lock.
113 */
114#define sx_recursed(sx) ((sx)->sx_recurse != 0)
115
116static void assert_sx(const struct lock_object *lock, int what);
117#ifdef DDB
118static void db_show_sx(const struct lock_object *lock);
119#endif
120static void lock_sx(struct lock_object *lock, uintptr_t how);
121#ifdef KDTRACE_HOOKS
122static int owner_sx(const struct lock_object *lock, struct thread **owner);
123#endif
124static uintptr_t unlock_sx(struct lock_object *lock);
125
126struct lock_class lock_class_sx = {
127 .lc_name = "sx",
128 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
129 .lc_assert = assert_sx,
130#ifdef DDB
131 .lc_ddb_show = db_show_sx,
132#endif
133 .lc_lock = lock_sx,
134 .lc_unlock = unlock_sx,
135#ifdef KDTRACE_HOOKS
136 .lc_owner = owner_sx,
137#endif
138};
139
140#ifndef INVARIANTS
141#define _sx_assert(sx, what, file, line)
142#endif
143
144#ifdef ADAPTIVE_SX
145static __read_frequently u_int asx_retries = 10;
146static __read_frequently u_int asx_loops = 10000;
147static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
148SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
149SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
150
151static struct lock_delay_config __read_frequently sx_delay;
152
153SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
154 0, "");
155SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
156 0, "");
157
158LOCK_DELAY_SYSINIT_DEFAULT(sx_delay);
159#endif
160
161void
162assert_sx(const struct lock_object *lock, int what)
163{
164
165 sx_assert((const struct sx *)lock, what);
166}
167
168void
169lock_sx(struct lock_object *lock, uintptr_t how)
170{
171 struct sx *sx;
172
173 sx = (struct sx *)lock;
174 if (how)
175 sx_slock(sx);
176 else
177 sx_xlock(sx);
178}
179
180uintptr_t
181unlock_sx(struct lock_object *lock)
182{
183 struct sx *sx;
184
185 sx = (struct sx *)lock;
186 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
187 if (sx_xlocked(sx)) {
188 sx_xunlock(sx);
189 return (0);
190 } else {
191 sx_sunlock(sx);
192 return (1);
193 }
194}
195
196#ifdef KDTRACE_HOOKS
197int
198owner_sx(const struct lock_object *lock, struct thread **owner)
199{
200 const struct sx *sx = (const struct sx *)lock;
201 uintptr_t x = sx->sx_lock;
202
203 *owner = (struct thread *)SX_OWNER(x);
204 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
205 (*owner != NULL));
206}
207#endif
208
209void
210sx_sysinit(void *arg)
211{
212 struct sx_args *sargs = arg;
213
214 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
215}
216
217void
218sx_init_flags(struct sx *sx, const char *description, int opts)
219{
220 int flags;
221
222 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
223 SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
224 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
225 ("%s: sx_lock not aligned for %s: %p", __func__, description,
226 &sx->sx_lock));
227
228 flags = LO_SLEEPABLE | LO_UPGRADABLE;
229 if (opts & SX_DUPOK)
230 flags |= LO_DUPOK;
231 if (opts & SX_NOPROFILE)
232 flags |= LO_NOPROFILE;
233 if (!(opts & SX_NOWITNESS))
234 flags |= LO_WITNESS;
235 if (opts & SX_RECURSE)
236 flags |= LO_RECURSABLE;
237 if (opts & SX_QUIET)
238 flags |= LO_QUIET;
239 if (opts & SX_NEW)
240 flags |= LO_NEW;
241
242 flags |= opts & SX_NOADAPTIVE;
243 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
244 sx->sx_lock = SX_LOCK_UNLOCKED;
245 sx->sx_recurse = 0;
246}
247
248void
249sx_destroy(struct sx *sx)
250{
251
252 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
253 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
254 sx->sx_lock = SX_LOCK_DESTROYED;
255 lock_destroy(&sx->lock_object);
256}
257
258int
259sx_try_slock_(struct sx *sx, const char *file, int line)
260{
261 uintptr_t x;
262
263 if (SCHEDULER_STOPPED())
264 return (1);
265
266 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
267 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
268 curthread, sx->lock_object.lo_name, file, line));
269
270 x = sx->sx_lock;
271 for (;;) {
272 KASSERT(x != SX_LOCK_DESTROYED,
273 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
274 if (!(x & SX_LOCK_SHARED))
275 break;
276 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
277 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
278 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
279 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
280 sx, 0, 0, file, line, LOCKSTAT_READER);
281 TD_LOCKS_INC(curthread);
282 return (1);
283 }
284 }
285
286 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
287 return (0);
288}
289
290int
291_sx_xlock(struct sx *sx, int opts, const char *file, int line)
292{
293 uintptr_t tid, x;
294 int error = 0;
295
296 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
297 !TD_IS_IDLETHREAD(curthread),
298 ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
299 curthread, sx->lock_object.lo_name, file, line));
300 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
301 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
302 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
303 line, NULL);
304 tid = (uintptr_t)curthread;
305 x = SX_LOCK_UNLOCKED;
306 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
307 error = _sx_xlock_hard(sx, x, tid, opts, file, line);
308 else
309 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
310 0, 0, file, line, LOCKSTAT_WRITER);
311 if (!error) {
312 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
313 file, line);
314 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
315 TD_LOCKS_INC(curthread);
316 }
317
318 return (error);
319}
320
321int
322sx_try_xlock_(struct sx *sx, const char *file, int line)
323{
324 struct thread *td;
325 uintptr_t tid, x;
326 int rval;
327 bool recursed;
328
329 td = curthread;
330 tid = (uintptr_t)td;
331 if (SCHEDULER_STOPPED_TD(td))
332 return (1);
333
334 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
335 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
336 curthread, sx->lock_object.lo_name, file, line));
337 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
338 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
339
340 rval = 1;
341 recursed = false;
342 x = SX_LOCK_UNLOCKED;
343 for (;;) {
344 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
345 break;
346 if (x == SX_LOCK_UNLOCKED)
347 continue;
348 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
349 sx->sx_recurse++;
350 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
351 break;
352 }
353 rval = 0;
354 break;
355 }
356
357 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
358 if (rval) {
359 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
360 file, line);
361 if (!recursed)
362 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
363 sx, 0, 0, file, line, LOCKSTAT_WRITER);
364 TD_LOCKS_INC(curthread);
365 }
366
367 return (rval);
368}
369
370void
371_sx_xunlock(struct sx *sx, const char *file, int line)
372{
373
374 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
375 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
376 _sx_assert(sx, SA_XLOCKED, file, line);
377 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
378 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
379 line);
380#if LOCK_DEBUG > 0
381 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
382#else
383 __sx_xunlock(sx, curthread, file, line);
384#endif
385 TD_LOCKS_DEC(curthread);
386}
387
388/*
389 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
390 * This will only succeed if this thread holds a single shared lock.
391 * Return 1 if if the upgrade succeed, 0 otherwise.
392 */
393int
394sx_try_upgrade_(struct sx *sx, const char *file, int line)
395{
396 uintptr_t x;
397 int success;
398
399 if (SCHEDULER_STOPPED())
400 return (1);
401
402 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
403 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
404 _sx_assert(sx, SA_SLOCKED, file, line);
405
406 /*
407 * Try to switch from one shared lock to an exclusive lock. We need
408 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
409 * we will wake up the exclusive waiters when we drop the lock.
410 */
411 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
412 success = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
413 (uintptr_t)curthread | x);
414 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
415 if (success) {
416 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
417 file, line);
418 LOCKSTAT_RECORD0(sx__upgrade, sx);
419 }
420 return (success);
421}
422
423/*
424 * Downgrade an unrecursed exclusive lock into a single shared lock.
425 */
426void
427sx_downgrade_(struct sx *sx, const char *file, int line)
428{
429 uintptr_t x;
430 int wakeup_swapper;
431
432 if (SCHEDULER_STOPPED())
433 return;
434
435 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
436 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
437 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
438#ifndef INVARIANTS
439 if (sx_recursed(sx))
440 panic("downgrade of a recursed lock");
441#endif
442
443 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
444
445 /*
446 * Try to switch from an exclusive lock with no shared waiters
447 * to one sharer with no shared waiters. If there are
448 * exclusive waiters, we don't need to lock the sleep queue so
449 * long as we preserve the flag. We do one quick try and if
450 * that fails we grab the sleepq lock to keep the flags from
451 * changing and do it the slow way.
452 *
453 * We have to lock the sleep queue if there are shared waiters
454 * so we can wake them up.
455 */
456 x = sx->sx_lock;
457 if (!(x & SX_LOCK_SHARED_WAITERS) &&
458 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
459 (x & SX_LOCK_EXCLUSIVE_WAITERS)))
460 goto out;
461
462 /*
463 * Lock the sleep queue so we can read the waiters bits
464 * without any races and wakeup any shared waiters.
465 */
466 sleepq_lock(&sx->lock_object);
467
468 /*
469 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
470 * shared lock. If there are any shared waiters, wake them up.
471 */
472 wakeup_swapper = 0;
473 x = sx->sx_lock;
474 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
475 (x & SX_LOCK_EXCLUSIVE_WAITERS));
476 if (x & SX_LOCK_SHARED_WAITERS)
477 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
478 0, SQ_SHARED_QUEUE);
479 sleepq_release(&sx->lock_object);
480
481 if (wakeup_swapper)
482 kick_proc0();
483
484out:
485 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
486 LOCKSTAT_RECORD0(sx__downgrade, sx);
487}
488
489/*
490 * This function represents the so-called 'hard case' for sx_xlock
491 * operation. All 'easy case' failures are redirected to this. Note
492 * that ideally this would be a static function, but it needs to be
493 * accessible from at least sx.h.
494 */
495int
496_sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
497 const char *file, int line)
498{
499 GIANT_DECLARE;
500#ifdef ADAPTIVE_SX
501 volatile struct thread *owner;
502 u_int i, spintries = 0;
503#endif
504#ifdef LOCK_PROFILING
505 uint64_t waittime = 0;
506 int contested = 0;
507#endif
508 int error = 0;
509#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
510 struct lock_delay_arg lda;
511#endif
512#ifdef KDTRACE_HOOKS
513 uintptr_t state;
514 u_int sleep_cnt = 0;
515 int64_t sleep_time = 0;
516 int64_t all_time = 0;
517#endif
518
519 if (SCHEDULER_STOPPED())
520 return (0);
521
522#if defined(ADAPTIVE_SX)
523 lock_delay_arg_init(&lda, &sx_delay);
524#elif defined(KDTRACE_HOOKS)
525 lock_delay_arg_init(&lda, NULL);
526#endif
527
528 if (__predict_false(x == SX_LOCK_UNLOCKED))
529 x = SX_READ_VALUE(sx);
530
531 /* If we already hold an exclusive lock, then recurse. */
532 if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
533 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
534 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
535 sx->lock_object.lo_name, file, line));
536 sx->sx_recurse++;
537 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
538 if (LOCK_LOG_TEST(&sx->lock_object, 0))
539 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
540 return (0);
541 }
542
543 if (LOCK_LOG_TEST(&sx->lock_object, 0))
544 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
545 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
546
547#ifdef KDTRACE_HOOKS
548 all_time -= lockstat_nsecs(&sx->lock_object);
549 state = x;
550#endif
551 for (;;) {
552 if (x == SX_LOCK_UNLOCKED) {
553 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
554 break;
555 continue;
556 }
557#ifdef KDTRACE_HOOKS
558 lda.spin_cnt++;
559#endif
560#ifdef HWPMC_HOOKS
561 PMC_SOFT_CALL( , , lock, failed);
562#endif
563 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
564 &waittime);
565#ifdef ADAPTIVE_SX
566 /*
567 * If the lock is write locked and the owner is
568 * running on another CPU, spin until the owner stops
569 * running or the state of the lock changes.
570 */
571 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
572 if ((x & SX_LOCK_SHARED) == 0) {
573 owner = lv_sx_owner(x);
574 if (TD_IS_RUNNING(owner)) {
575 if (LOCK_LOG_TEST(&sx->lock_object, 0))
576 CTR3(KTR_LOCK,
577 "%s: spinning on %p held by %p",
578 __func__, sx, owner);
579 KTR_STATE1(KTR_SCHED, "thread",
580 sched_tdname(curthread), "spinning",
581 "lockname:\"%s\"",
582 sx->lock_object.lo_name);
583 GIANT_SAVE();
584 do {
585 lock_delay(&lda);
586 x = SX_READ_VALUE(sx);
587 owner = lv_sx_owner(x);
588 } while (owner != NULL &&
589 TD_IS_RUNNING(owner));
590 KTR_STATE0(KTR_SCHED, "thread",
591 sched_tdname(curthread), "running");
592 continue;
593 }
594 } else if (SX_SHARERS(x) && spintries < asx_retries) {
595 KTR_STATE1(KTR_SCHED, "thread",
596 sched_tdname(curthread), "spinning",
597 "lockname:\"%s\"", sx->lock_object.lo_name);
598 GIANT_SAVE();
599 spintries++;
600 for (i = 0; i < asx_loops; i++) {
601 if (LOCK_LOG_TEST(&sx->lock_object, 0))
602 CTR4(KTR_LOCK,
603 "%s: shared spinning on %p with %u and %u",
604 __func__, sx, spintries, i);
605 x = sx->sx_lock;
606 if ((x & SX_LOCK_SHARED) == 0 ||
607 SX_SHARERS(x) == 0)
608 break;
609 cpu_spinwait();
610#ifdef KDTRACE_HOOKS
611 lda.spin_cnt++;
612#endif
613 }
614 KTR_STATE0(KTR_SCHED, "thread",
615 sched_tdname(curthread), "running");
616 x = SX_READ_VALUE(sx);
617 if (i != asx_loops)
618 continue;
619 }
620 }
621#endif
622
623 sleepq_lock(&sx->lock_object);
624 x = SX_READ_VALUE(sx);
625
626 /*
627 * If the lock was released while spinning on the
628 * sleep queue chain lock, try again.
629 */
630 if (x == SX_LOCK_UNLOCKED) {
631 sleepq_release(&sx->lock_object);
632 continue;
633 }
634
635#ifdef ADAPTIVE_SX
636 /*
637 * The current lock owner might have started executing
638 * on another CPU (or the lock could have changed
639 * owners) while we were waiting on the sleep queue
640 * chain lock. If so, drop the sleep queue lock and try
641 * again.
642 */
643 if (!(x & SX_LOCK_SHARED) &&
644 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
645 owner = (struct thread *)SX_OWNER(x);
646 if (TD_IS_RUNNING(owner)) {
647 sleepq_release(&sx->lock_object);
648 continue;
649 }
650 }
651#endif
652
653 /*
654 * If an exclusive lock was released with both shared
655 * and exclusive waiters and a shared waiter hasn't
656 * woken up and acquired the lock yet, sx_lock will be
657 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
658 * If we see that value, try to acquire it once. Note
659 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
660 * as there are other exclusive waiters still. If we
661 * fail, restart the loop.
662 */
663 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
664 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
665 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
666 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
667 sleepq_release(&sx->lock_object);
668 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
669 __func__, sx);
670 break;
671 }
672 sleepq_release(&sx->lock_object);
673 x = SX_READ_VALUE(sx);
674 continue;
675 }
676
677 /*
678 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail,
679 * than loop back and retry.
680 */
681 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
682 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
683 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
684 sleepq_release(&sx->lock_object);
685 x = SX_READ_VALUE(sx);
686 continue;
687 }
688 if (LOCK_LOG_TEST(&sx->lock_object, 0))
689 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
690 __func__, sx);
691 }
692
693 /*
694 * Since we have been unable to acquire the exclusive
695 * lock and the exclusive waiters flag is set, we have
696 * to sleep.
697 */
698 if (LOCK_LOG_TEST(&sx->lock_object, 0))
699 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
700 __func__, sx);
701
702#ifdef KDTRACE_HOOKS
703 sleep_time -= lockstat_nsecs(&sx->lock_object);
704#endif
705 GIANT_SAVE();
706 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
707 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
708 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
709 if (!(opts & SX_INTERRUPTIBLE))
710 sleepq_wait(&sx->lock_object, 0);
711 else
712 error = sleepq_wait_sig(&sx->lock_object, 0);
713#ifdef KDTRACE_HOOKS
714 sleep_time += lockstat_nsecs(&sx->lock_object);
715 sleep_cnt++;
716#endif
717 if (error) {
718 if (LOCK_LOG_TEST(&sx->lock_object, 0))
719 CTR2(KTR_LOCK,
720 "%s: interruptible sleep by %p suspended by signal",
721 __func__, sx);
722 break;
723 }
724 if (LOCK_LOG_TEST(&sx->lock_object, 0))
725 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
726 __func__, sx);
727 x = SX_READ_VALUE(sx);
728 }
729#ifdef KDTRACE_HOOKS
730 all_time += lockstat_nsecs(&sx->lock_object);
731 if (sleep_time)
732 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
733 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
734 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
735 if (lda.spin_cnt > sleep_cnt)
736 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
737 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
738 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
739#endif
740 if (!error)
741 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
742 contested, waittime, file, line, LOCKSTAT_WRITER);
743 GIANT_RESTORE();
744 return (error);
745}
746
747/*
748 * This function represents the so-called 'hard case' for sx_xunlock
749 * operation. All 'easy case' failures are redirected to this. Note
750 * that ideally this would be a static function, but it needs to be
751 * accessible from at least sx.h.
752 */
753void
754_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
755{
756 uintptr_t x;
757 int queue, wakeup_swapper;
758
759 if (SCHEDULER_STOPPED())
760 return;
761
762 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
763
764 x = SX_READ_VALUE(sx);
765 if (x & SX_LOCK_RECURSED) {
766 /* The lock is recursed, unrecurse one level. */
767 if ((--sx->sx_recurse) == 0)
768 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
769 if (LOCK_LOG_TEST(&sx->lock_object, 0))
770 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
771 return;
772 }
773
774 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
775 if (x == tid &&
776 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
777 return;
778
779 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
780 SX_LOCK_EXCLUSIVE_WAITERS));
781 if (LOCK_LOG_TEST(&sx->lock_object, 0))
782 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
783
784 sleepq_lock(&sx->lock_object);
785 x = SX_LOCK_UNLOCKED;
786
787 /*
788 * The wake up algorithm here is quite simple and probably not
789 * ideal. It gives precedence to shared waiters if they are
790 * present. For this condition, we have to preserve the
791 * state of the exclusive waiters flag.
792 * If interruptible sleeps left the shared queue empty avoid a
793 * starvation for the threads sleeping on the exclusive queue by giving
794 * them precedence and cleaning up the shared waiters bit anyway.
795 */
796 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
797 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
798 queue = SQ_SHARED_QUEUE;
799 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
800 } else
801 queue = SQ_EXCLUSIVE_QUEUE;
802
803 /* Wake up all the waiters for the specific queue. */
804 if (LOCK_LOG_TEST(&sx->lock_object, 0))
805 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
806 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
807 "exclusive");
808 atomic_store_rel_ptr(&sx->sx_lock, x);
809 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
810 queue);
811 sleepq_release(&sx->lock_object);
812 if (wakeup_swapper)
813 kick_proc0();
814}
815
816static bool __always_inline
817__sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line)
818{
819
820 /*
821 * If no other thread has an exclusive lock then try to bump up
822 * the count of sharers. Since we have to preserve the state
823 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
824 * shared lock loop back and retry.
825 */
826 while (*xp & SX_LOCK_SHARED) {
827 MPASS(!(*xp & SX_LOCK_SHARED_WAITERS));
828 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
829 *xp + SX_ONE_SHARER)) {
830 if (LOCK_LOG_TEST(&sx->lock_object, 0))
831 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
832 __func__, sx, (void *)*xp,
833 (void *)(*xp + SX_ONE_SHARER));
834 return (true);
835 }
836 }
837 return (false);
838}
839
840static int __noinline
841_sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
842{
843 GIANT_DECLARE;
844#ifdef ADAPTIVE_SX
845 volatile struct thread *owner;
846#endif
847#ifdef LOCK_PROFILING
848 uint64_t waittime = 0;
849 int contested = 0;
850#endif
851 int error = 0;
852#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
853 struct lock_delay_arg lda;
854#endif
855#ifdef KDTRACE_HOOKS
856 uintptr_t state;
857 u_int sleep_cnt = 0;
858 int64_t sleep_time = 0;
859 int64_t all_time = 0;
860#endif
861
862 if (SCHEDULER_STOPPED())
863 return (0);
864
865#if defined(ADAPTIVE_SX)
866 lock_delay_arg_init(&lda, &sx_delay);
867#elif defined(KDTRACE_HOOKS)
868 lock_delay_arg_init(&lda, NULL);
869#endif
870#ifdef KDTRACE_HOOKS
871 all_time -= lockstat_nsecs(&sx->lock_object);
872 state = x;
873#endif
874
875 /*
876 * As with rwlocks, we don't make any attempt to try to block
877 * shared locks once there is an exclusive waiter.
878 */
879 for (;;) {
880 if (__sx_slock_try(sx, &x, file, line))
881 break;
882#ifdef KDTRACE_HOOKS
883 lda.spin_cnt++;
884#endif
885
886#ifdef HWPMC_HOOKS
887 PMC_SOFT_CALL( , , lock, failed);
888#endif
889 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
890 &waittime);
891
892#ifdef ADAPTIVE_SX
893 /*
894 * If the owner is running on another CPU, spin until
895 * the owner stops running or the state of the lock
896 * changes.
897 */
898 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
899 owner = lv_sx_owner(x);
900 if (TD_IS_RUNNING(owner)) {
901 if (LOCK_LOG_TEST(&sx->lock_object, 0))
902 CTR3(KTR_LOCK,
903 "%s: spinning on %p held by %p",
904 __func__, sx, owner);
905 KTR_STATE1(KTR_SCHED, "thread",
906 sched_tdname(curthread), "spinning",
907 "lockname:\"%s\"", sx->lock_object.lo_name);
908 GIANT_SAVE();
909 do {
910 lock_delay(&lda);
911 x = SX_READ_VALUE(sx);
912 owner = lv_sx_owner(x);
913 } while (owner != NULL && TD_IS_RUNNING(owner));
914 KTR_STATE0(KTR_SCHED, "thread",
915 sched_tdname(curthread), "running");
916 continue;
917 }
918 }
919#endif
920
921 /*
922 * Some other thread already has an exclusive lock, so
923 * start the process of blocking.
924 */
925 sleepq_lock(&sx->lock_object);
926 x = SX_READ_VALUE(sx);
927
928 /*
929 * The lock could have been released while we spun.
930 * In this case loop back and retry.
931 */
932 if (x & SX_LOCK_SHARED) {
933 sleepq_release(&sx->lock_object);
934 continue;
935 }
936
937#ifdef ADAPTIVE_SX
938 /*
939 * If the owner is running on another CPU, spin until
940 * the owner stops running or the state of the lock
941 * changes.
942 */
943 if (!(x & SX_LOCK_SHARED) &&
944 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
945 owner = (struct thread *)SX_OWNER(x);
946 if (TD_IS_RUNNING(owner)) {
947 sleepq_release(&sx->lock_object);
948 x = SX_READ_VALUE(sx);
949 continue;
950 }
951 }
952#endif
953
954 /*
955 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we
956 * fail to set it drop the sleep queue lock and loop
957 * back.
958 */
959 if (!(x & SX_LOCK_SHARED_WAITERS)) {
960 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
961 x | SX_LOCK_SHARED_WAITERS)) {
962 sleepq_release(&sx->lock_object);
963 x = SX_READ_VALUE(sx);
964 continue;
965 }
966 if (LOCK_LOG_TEST(&sx->lock_object, 0))
967 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
968 __func__, sx);
969 }
970
971 /*
972 * Since we have been unable to acquire the shared lock,
973 * we have to sleep.
974 */
975 if (LOCK_LOG_TEST(&sx->lock_object, 0))
976 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
977 __func__, sx);
978
979#ifdef KDTRACE_HOOKS
980 sleep_time -= lockstat_nsecs(&sx->lock_object);
981#endif
982 GIANT_SAVE();
983 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
984 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
985 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
986 if (!(opts & SX_INTERRUPTIBLE))
987 sleepq_wait(&sx->lock_object, 0);
988 else
989 error = sleepq_wait_sig(&sx->lock_object, 0);
990#ifdef KDTRACE_HOOKS
991 sleep_time += lockstat_nsecs(&sx->lock_object);
992 sleep_cnt++;
993#endif
994 if (error) {
995 if (LOCK_LOG_TEST(&sx->lock_object, 0))
996 CTR2(KTR_LOCK,
997 "%s: interruptible sleep by %p suspended by signal",
998 __func__, sx);
999 break;
1000 }
1001 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1002 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1003 __func__, sx);
1004 x = SX_READ_VALUE(sx);
1005 }
1006#ifdef KDTRACE_HOOKS
1007 all_time += lockstat_nsecs(&sx->lock_object);
1008 if (sleep_time)
1009 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1010 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1011 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1012 if (lda.spin_cnt > sleep_cnt)
1013 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1014 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1015 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1016#endif
1017 if (error == 0) {
1018 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1019 contested, waittime, file, line, LOCKSTAT_READER);
1020 }
1021 GIANT_RESTORE();
1022 return (error);
1023}
1024
1025int
1026_sx_slock(struct sx *sx, int opts, const char *file, int line)
1027{
1028 uintptr_t x;
1029 int error;
1030
1031 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1032 !TD_IS_IDLETHREAD(curthread),
1033 ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1034 curthread, sx->lock_object.lo_name, file, line));
1035 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1036 ("sx_slock() of destroyed sx @ %s:%d", file, line));
1037 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1038
1039 error = 0;
1040 x = SX_READ_VALUE(sx);
1041 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
1042 !__sx_slock_try(sx, &x, file, line)))
1043 error = _sx_slock_hard(sx, opts, file, line, x);
1044 if (error == 0) {
1045 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1046 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1047 TD_LOCKS_INC(curthread);
1048 }
1049 return (error);
1050}
1051
1052static bool __always_inline
1053_sx_sunlock_try(struct sx *sx, uintptr_t *xp)
1054{
1055
1056 for (;;) {
1057 /*
1058 * We should never have sharers while at least one thread
1059 * holds a shared lock.
1060 */
1061 KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS),
1062 ("%s: waiting sharers", __func__));
1063
1064 /*
1065 * See if there is more than one shared lock held. If
1066 * so, just drop one and return.
1067 */
1068 if (SX_SHARERS(*xp) > 1) {
1069 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1070 *xp - SX_ONE_SHARER)) {
1071 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1072 CTR4(KTR_LOCK,
1073 "%s: %p succeeded %p -> %p",
1074 __func__, sx, (void *)*xp,
1075 (void *)(*xp - SX_ONE_SHARER));
1076 return (true);
1077 }
1078 continue;
1079 }
1080
1081 /*
1082 * If there aren't any waiters for an exclusive lock,
1083 * then try to drop it quickly.
1084 */
1085 if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) {
1086 MPASS(*xp == SX_SHARERS_LOCK(1));
1087 *xp = SX_SHARERS_LOCK(1);
1088 if (atomic_fcmpset_rel_ptr(&sx->sx_lock,
1089 xp, SX_LOCK_UNLOCKED)) {
1090 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1091 CTR2(KTR_LOCK, "%s: %p last succeeded",
1092 __func__, sx);
1093 return (true);
1094 }
1095 continue;
1096 }
1097 break;
1098 }
1099 return (false);
1100}
1101
1102static void __noinline
1103_sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line)
1104{
1105 int wakeup_swapper;
1106
1107 if (SCHEDULER_STOPPED())
1108 return;
1109
1110 for (;;) {
1111 if (_sx_sunlock_try(sx, &x))
1112 break;
1113
1114 /*
1115 * At this point, there should just be one sharer with
1116 * exclusive waiters.
1117 */
1118 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1119
1120 sleepq_lock(&sx->lock_object);
1121
1122 /*
1123 * Wake up semantic here is quite simple:
1124 * Just wake up all the exclusive waiters.
1125 * Note that the state of the lock could have changed,
1126 * so if it fails loop back and retry.
1127 */
1128 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1129 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1130 SX_LOCK_UNLOCKED)) {
1131 sleepq_release(&sx->lock_object);
1132 x = SX_READ_VALUE(sx);
1133 continue;
1134 }
1135 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1136 CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1137 "exclusive queue", __func__, sx);
1138 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1139 0, SQ_EXCLUSIVE_QUEUE);
1140 sleepq_release(&sx->lock_object);
1141 if (wakeup_swapper)
1142 kick_proc0();
1143 break;
1144 }
1145 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1146}
1147
1148void
1149_sx_sunlock(struct sx *sx, const char *file, int line)
1150{
1151 uintptr_t x;
1152
1153 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1154 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1155 _sx_assert(sx, SA_SLOCKED, file, line);
1156 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1157 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1158
1159 x = SX_READ_VALUE(sx);
1160 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) ||
1161 !_sx_sunlock_try(sx, &x)))
1162 _sx_sunlock_hard(sx, x, file, line);
1163
1164 TD_LOCKS_DEC(curthread);
1165}
1166
1167#ifdef INVARIANT_SUPPORT
1168#ifndef INVARIANTS
1169#undef _sx_assert
1170#endif
1171
1172/*
1173 * In the non-WITNESS case, sx_assert() can only detect that at least
1174 * *some* thread owns an slock, but it cannot guarantee that *this*
1175 * thread owns an slock.
1176 */
1177void
1178_sx_assert(const struct sx *sx, int what, const char *file, int line)
1179{
1180#ifndef WITNESS
1181 int slocked = 0;
1182#endif
1183
1184 if (panicstr != NULL)
1185 return;
1186 switch (what) {
1187 case SA_SLOCKED:
1188 case SA_SLOCKED | SA_NOTRECURSED:
1189 case SA_SLOCKED | SA_RECURSED:
1190#ifndef WITNESS
1191 slocked = 1;
1192 /* FALLTHROUGH */
1193#endif
1194 case SA_LOCKED:
1195 case SA_LOCKED | SA_NOTRECURSED:
1196 case SA_LOCKED | SA_RECURSED:
1197#ifdef WITNESS
1198 witness_assert(&sx->lock_object, what, file, line);
1199#else
1200 /*
1201 * If some other thread has an exclusive lock or we
1202 * have one and are asserting a shared lock, fail.
1203 * Also, if no one has a lock at all, fail.
1204 */
1205 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1206 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1207 sx_xholder(sx) != curthread)))
1208 panic("Lock %s not %slocked @ %s:%d\n",
1209 sx->lock_object.lo_name, slocked ? "share " : "",
1210 file, line);
1211
1212 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1213 if (sx_recursed(sx)) {
1214 if (what & SA_NOTRECURSED)
1215 panic("Lock %s recursed @ %s:%d\n",
1216 sx->lock_object.lo_name, file,
1217 line);
1218 } else if (what & SA_RECURSED)
1219 panic("Lock %s not recursed @ %s:%d\n",
1220 sx->lock_object.lo_name, file, line);
1221 }
1222#endif
1223 break;
1224 case SA_XLOCKED:
1225 case SA_XLOCKED | SA_NOTRECURSED:
1226 case SA_XLOCKED | SA_RECURSED:
1227 if (sx_xholder(sx) != curthread)
1228 panic("Lock %s not exclusively locked @ %s:%d\n",
1229 sx->lock_object.lo_name, file, line);
1230 if (sx_recursed(sx)) {
1231 if (what & SA_NOTRECURSED)
1232 panic("Lock %s recursed @ %s:%d\n",
1233 sx->lock_object.lo_name, file, line);
1234 } else if (what & SA_RECURSED)
1235 panic("Lock %s not recursed @ %s:%d\n",
1236 sx->lock_object.lo_name, file, line);
1237 break;
1238 case SA_UNLOCKED:
1239#ifdef WITNESS
1240 witness_assert(&sx->lock_object, what, file, line);
1241#else
1242 /*
1243 * If we hold an exclusve lock fail. We can't
1244 * reliably check to see if we hold a shared lock or
1245 * not.
1246 */
1247 if (sx_xholder(sx) == curthread)
1248 panic("Lock %s exclusively locked @ %s:%d\n",
1249 sx->lock_object.lo_name, file, line);
1250#endif
1251 break;
1252 default:
1253 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1254 line);
1255 }
1256}
1257#endif /* INVARIANT_SUPPORT */
1258
1259#ifdef DDB
1260static void
1261db_show_sx(const struct lock_object *lock)
1262{
1263 struct thread *td;
1264 const struct sx *sx;
1265
1266 sx = (const struct sx *)lock;
1267
1268 db_printf(" state: ");
1269 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1270 db_printf("UNLOCKED\n");
1271 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1272 db_printf("DESTROYED\n");
1273 return;
1274 } else if (sx->sx_lock & SX_LOCK_SHARED)
1275 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1276 else {
1277 td = sx_xholder(sx);
1278 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1279 td->td_tid, td->td_proc->p_pid, td->td_name);
1280 if (sx_recursed(sx))
1281 db_printf(" recursed: %d\n", sx->sx_recurse);
1282 }
1283
1284 db_printf(" waiters: ");
1285 switch(sx->sx_lock &
1286 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1287 case SX_LOCK_SHARED_WAITERS:
1288 db_printf("shared\n");
1289 break;
1290 case SX_LOCK_EXCLUSIVE_WAITERS:
1291 db_printf("exclusive\n");
1292 break;
1293 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1294 db_printf("exclusive and shared\n");
1295 break;
1296 default:
1297 db_printf("none\n");
1298 }
1299}
1300
1301/*
1302 * Check to see if a thread that is blocked on a sleep queue is actually
1303 * blocked on an sx lock. If so, output some details and return true.
1304 * If the lock has an exclusive owner, return that in *ownerp.
1305 */
1306int
1307sx_chain(struct thread *td, struct thread **ownerp)
1308{
1309 struct sx *sx;
1310
1311 /*
1312 * Check to see if this thread is blocked on an sx lock.
1313 * First, we check the lock class. If that is ok, then we
1314 * compare the lock name against the wait message.
1315 */
1316 sx = td->td_wchan;
1317 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1318 sx->lock_object.lo_name != td->td_wmesg)
1319 return (0);
1320
1321 /* We think we have an sx lock, so output some details. */
1322 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1323 *ownerp = sx_xholder(sx);
1324 if (sx->sx_lock & SX_LOCK_SHARED)
1325 db_printf("SLOCK (count %ju)\n",
1326 (uintmax_t)SX_SHARERS(sx->sx_lock));
1327 else
1328 db_printf("XLOCK\n");
1329 return (1);
1330}
1331#endif