kern_sx.c revision 278650
1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice(s), this list of conditions and the following disclaimer as 11 * the first lines of this file unmodified other than the possible 12 * addition of one or more copyright notices. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice(s), this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27 * DAMAGE. 28 */ 29 30/* 31 * Shared/exclusive locks. This implementation attempts to ensure 32 * deterministic lock granting behavior, so that slocks and xlocks are 33 * interleaved. 34 * 35 * Priority propagation will not generally raise the priority of lock holders, 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_ddb.h" 40#include "opt_hwpmc_hooks.h" 41#include "opt_kdtrace.h" 42#include "opt_no_adaptive_sx.h" 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: stable/10/sys/kern/kern_sx.c 278650 2015-02-13 00:29:57Z sbruno $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kdb.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/mutex.h> 53#include <sys/proc.h> 54#include <sys/sched.h> 55#include <sys/sleepqueue.h> 56#include <sys/sx.h> 57#include <sys/sysctl.h> 58 59#if defined(SMP) && !defined(NO_ADAPTIVE_SX) 60#include <machine/cpu.h> 61#endif 62 63#ifdef DDB 64#include <ddb/ddb.h> 65#endif 66 67#if defined(SMP) && !defined(NO_ADAPTIVE_SX) 68#define ADAPTIVE_SX 69#endif 70 71CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); 72 73#ifdef HWPMC_HOOKS 74#include <sys/pmckern.h> 75PMC_SOFT_DECLARE( , , lock, failed); 76#endif 77 78/* Handy macros for sleep queues. */ 79#define SQ_EXCLUSIVE_QUEUE 0 80#define SQ_SHARED_QUEUE 1 81 82/* 83 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 84 * drop Giant anytime we have to sleep or if we adaptively spin. 85 */ 86#define GIANT_DECLARE \ 87 int _giantcnt = 0; \ 88 WITNESS_SAVE_DECL(Giant) \ 89 90#define GIANT_SAVE() do { \ 91 if (mtx_owned(&Giant)) { \ 92 WITNESS_SAVE(&Giant.lock_object, Giant); \ 93 while (mtx_owned(&Giant)) { \ 94 _giantcnt++; \ 95 mtx_unlock(&Giant); \ 96 } \ 97 } \ 98} while (0) 99 100#define GIANT_RESTORE() do { \ 101 if (_giantcnt > 0) { \ 102 mtx_assert(&Giant, MA_NOTOWNED); \ 103 while (_giantcnt--) \ 104 mtx_lock(&Giant); \ 105 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 106 } \ 107} while (0) 108 109/* 110 * Returns true if an exclusive lock is recursed. It assumes 111 * curthread currently has an exclusive lock. 112 */ 113#define sx_recurse lock_object.lo_data 114#define sx_recursed(sx) ((sx)->sx_recurse != 0) 115 116static void assert_sx(const struct lock_object *lock, int what); 117#ifdef DDB 118static void db_show_sx(const struct lock_object *lock); 119#endif 120static void lock_sx(struct lock_object *lock, uintptr_t how); 121#ifdef KDTRACE_HOOKS 122static int owner_sx(const struct lock_object *lock, struct thread **owner); 123#endif 124static uintptr_t unlock_sx(struct lock_object *lock); 125 126struct lock_class lock_class_sx = { 127 .lc_name = "sx", 128 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 129 .lc_assert = assert_sx, 130#ifdef DDB 131 .lc_ddb_show = db_show_sx, 132#endif 133 .lc_lock = lock_sx, 134 .lc_unlock = unlock_sx, 135#ifdef KDTRACE_HOOKS 136 .lc_owner = owner_sx, 137#endif 138}; 139 140#ifndef INVARIANTS 141#define _sx_assert(sx, what, file, line) 142#endif 143 144#ifdef ADAPTIVE_SX 145static u_int asx_retries = 10; 146static u_int asx_loops = 10000; 147static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); 148SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 149SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 150#endif 151 152void 153assert_sx(const struct lock_object *lock, int what) 154{ 155 156 sx_assert((const struct sx *)lock, what); 157} 158 159void 160lock_sx(struct lock_object *lock, uintptr_t how) 161{ 162 struct sx *sx; 163 164 sx = (struct sx *)lock; 165 if (how) 166 sx_slock(sx); 167 else 168 sx_xlock(sx); 169} 170 171uintptr_t 172unlock_sx(struct lock_object *lock) 173{ 174 struct sx *sx; 175 176 sx = (struct sx *)lock; 177 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 178 if (sx_xlocked(sx)) { 179 sx_xunlock(sx); 180 return (0); 181 } else { 182 sx_sunlock(sx); 183 return (1); 184 } 185} 186 187#ifdef KDTRACE_HOOKS 188int 189owner_sx(const struct lock_object *lock, struct thread **owner) 190{ 191 const struct sx *sx = (const struct sx *)lock; 192 uintptr_t x = sx->sx_lock; 193 194 *owner = (struct thread *)SX_OWNER(x); 195 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 196 (*owner != NULL)); 197} 198#endif 199 200void 201sx_sysinit(void *arg) 202{ 203 struct sx_args *sargs = arg; 204 205 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 206} 207 208void 209sx_init_flags(struct sx *sx, const char *description, int opts) 210{ 211 int flags; 212 213 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 214 SX_NOPROFILE | SX_NOADAPTIVE)) == 0); 215 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 216 ("%s: sx_lock not aligned for %s: %p", __func__, description, 217 &sx->sx_lock)); 218 219 flags = LO_SLEEPABLE | LO_UPGRADABLE; 220 if (opts & SX_DUPOK) 221 flags |= LO_DUPOK; 222 if (opts & SX_NOPROFILE) 223 flags |= LO_NOPROFILE; 224 if (!(opts & SX_NOWITNESS)) 225 flags |= LO_WITNESS; 226 if (opts & SX_RECURSE) 227 flags |= LO_RECURSABLE; 228 if (opts & SX_QUIET) 229 flags |= LO_QUIET; 230 231 flags |= opts & SX_NOADAPTIVE; 232 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 233 sx->sx_lock = SX_LOCK_UNLOCKED; 234 sx->sx_recurse = 0; 235} 236 237void 238sx_destroy(struct sx *sx) 239{ 240 241 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 242 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 243 sx->sx_lock = SX_LOCK_DESTROYED; 244 lock_destroy(&sx->lock_object); 245} 246 247int 248_sx_slock(struct sx *sx, int opts, const char *file, int line) 249{ 250 int error = 0; 251 252 if (SCHEDULER_STOPPED()) 253 return (0); 254 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 255 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 256 curthread, sx->lock_object.lo_name, file, line)); 257 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 258 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 259 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 260 error = __sx_slock(sx, opts, file, line); 261 if (!error) { 262 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 263 WITNESS_LOCK(&sx->lock_object, 0, file, line); 264 curthread->td_locks++; 265 } 266 267 return (error); 268} 269 270int 271sx_try_slock_(struct sx *sx, const char *file, int line) 272{ 273 uintptr_t x; 274 275 if (SCHEDULER_STOPPED()) 276 return (1); 277 278 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 279 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 280 curthread, sx->lock_object.lo_name, file, line)); 281 282 for (;;) { 283 x = sx->sx_lock; 284 KASSERT(x != SX_LOCK_DESTROYED, 285 ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 286 if (!(x & SX_LOCK_SHARED)) 287 break; 288 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) { 289 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 290 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 291 curthread->td_locks++; 292 return (1); 293 } 294 } 295 296 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 297 return (0); 298} 299 300int 301_sx_xlock(struct sx *sx, int opts, const char *file, int line) 302{ 303 int error = 0; 304 305 if (SCHEDULER_STOPPED()) 306 return (0); 307 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 308 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 309 curthread, sx->lock_object.lo_name, file, line)); 310 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 311 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 312 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 313 line, NULL); 314 error = __sx_xlock(sx, curthread, opts, file, line); 315 if (!error) { 316 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 317 file, line); 318 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 319 curthread->td_locks++; 320 } 321 322 return (error); 323} 324 325int 326sx_try_xlock_(struct sx *sx, const char *file, int line) 327{ 328 int rval; 329 330 if (SCHEDULER_STOPPED()) 331 return (1); 332 333 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 334 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 335 curthread, sx->lock_object.lo_name, file, line)); 336 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 337 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 338 339 if (sx_xlocked(sx) && 340 (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) { 341 sx->sx_recurse++; 342 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 343 rval = 1; 344 } else 345 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, 346 (uintptr_t)curthread); 347 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 348 if (rval) { 349 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 350 file, line); 351 curthread->td_locks++; 352 } 353 354 return (rval); 355} 356 357void 358_sx_sunlock(struct sx *sx, const char *file, int line) 359{ 360 361 if (SCHEDULER_STOPPED()) 362 return; 363 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 364 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 365 _sx_assert(sx, SA_SLOCKED, file, line); 366 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 367 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 368 __sx_sunlock(sx, file, line); 369 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx); 370 curthread->td_locks--; 371} 372 373void 374_sx_xunlock(struct sx *sx, const char *file, int line) 375{ 376 377 if (SCHEDULER_STOPPED()) 378 return; 379 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 380 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 381 _sx_assert(sx, SA_XLOCKED, file, line); 382 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 383 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 384 line); 385 if (!sx_recursed(sx)) 386 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx); 387 __sx_xunlock(sx, curthread, file, line); 388 curthread->td_locks--; 389} 390 391/* 392 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 393 * This will only succeed if this thread holds a single shared lock. 394 * Return 1 if if the upgrade succeed, 0 otherwise. 395 */ 396int 397sx_try_upgrade_(struct sx *sx, const char *file, int line) 398{ 399 uintptr_t x; 400 int success; 401 402 if (SCHEDULER_STOPPED()) 403 return (1); 404 405 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 406 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 407 _sx_assert(sx, SA_SLOCKED, file, line); 408 409 /* 410 * Try to switch from one shared lock to an exclusive lock. We need 411 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 412 * we will wake up the exclusive waiters when we drop the lock. 413 */ 414 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 415 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 416 (uintptr_t)curthread | x); 417 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 418 if (success) { 419 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 420 file, line); 421 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx); 422 } 423 return (success); 424} 425 426/* 427 * Downgrade an unrecursed exclusive lock into a single shared lock. 428 */ 429void 430sx_downgrade_(struct sx *sx, const char *file, int line) 431{ 432 uintptr_t x; 433 int wakeup_swapper; 434 435 if (SCHEDULER_STOPPED()) 436 return; 437 438 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 439 ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 440 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 441#ifndef INVARIANTS 442 if (sx_recursed(sx)) 443 panic("downgrade of a recursed lock"); 444#endif 445 446 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 447 448 /* 449 * Try to switch from an exclusive lock with no shared waiters 450 * to one sharer with no shared waiters. If there are 451 * exclusive waiters, we don't need to lock the sleep queue so 452 * long as we preserve the flag. We do one quick try and if 453 * that fails we grab the sleepq lock to keep the flags from 454 * changing and do it the slow way. 455 * 456 * We have to lock the sleep queue if there are shared waiters 457 * so we can wake them up. 458 */ 459 x = sx->sx_lock; 460 if (!(x & SX_LOCK_SHARED_WAITERS) && 461 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 462 (x & SX_LOCK_EXCLUSIVE_WAITERS))) { 463 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 464 return; 465 } 466 467 /* 468 * Lock the sleep queue so we can read the waiters bits 469 * without any races and wakeup any shared waiters. 470 */ 471 sleepq_lock(&sx->lock_object); 472 473 /* 474 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 475 * shared lock. If there are any shared waiters, wake them up. 476 */ 477 wakeup_swapper = 0; 478 x = sx->sx_lock; 479 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 480 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 481 if (x & SX_LOCK_SHARED_WAITERS) 482 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 483 0, SQ_SHARED_QUEUE); 484 sleepq_release(&sx->lock_object); 485 486 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 487 LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx); 488 489 if (wakeup_swapper) 490 kick_proc0(); 491} 492 493/* 494 * This function represents the so-called 'hard case' for sx_xlock 495 * operation. All 'easy case' failures are redirected to this. Note 496 * that ideally this would be a static function, but it needs to be 497 * accessible from at least sx.h. 498 */ 499int 500_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, 501 int line) 502{ 503 GIANT_DECLARE; 504#ifdef ADAPTIVE_SX 505 volatile struct thread *owner; 506 u_int i, spintries = 0; 507#endif 508 uintptr_t x; 509#ifdef LOCK_PROFILING 510 uint64_t waittime = 0; 511 int contested = 0; 512#endif 513 int error = 0; 514#ifdef KDTRACE_HOOKS 515 uint64_t spin_cnt = 0; 516 uint64_t sleep_cnt = 0; 517 int64_t sleep_time = 0; 518#endif 519 520 if (SCHEDULER_STOPPED()) 521 return (0); 522 523 /* If we already hold an exclusive lock, then recurse. */ 524 if (sx_xlocked(sx)) { 525 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 526 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 527 sx->lock_object.lo_name, file, line)); 528 sx->sx_recurse++; 529 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 530 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 531 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 532 return (0); 533 } 534 535 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 536 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 537 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 538 539 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) { 540#ifdef KDTRACE_HOOKS 541 spin_cnt++; 542#endif 543#ifdef HWPMC_HOOKS 544 PMC_SOFT_CALL( , , lock, failed); 545#endif 546 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 547 &waittime); 548#ifdef ADAPTIVE_SX 549 /* 550 * If the lock is write locked and the owner is 551 * running on another CPU, spin until the owner stops 552 * running or the state of the lock changes. 553 */ 554 x = sx->sx_lock; 555 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 556 if ((x & SX_LOCK_SHARED) == 0) { 557 x = SX_OWNER(x); 558 owner = (struct thread *)x; 559 if (TD_IS_RUNNING(owner)) { 560 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 561 CTR3(KTR_LOCK, 562 "%s: spinning on %p held by %p", 563 __func__, sx, owner); 564 KTR_STATE1(KTR_SCHED, "thread", 565 sched_tdname(curthread), "spinning", 566 "lockname:\"%s\"", 567 sx->lock_object.lo_name); 568 GIANT_SAVE(); 569 while (SX_OWNER(sx->sx_lock) == x && 570 TD_IS_RUNNING(owner)) { 571 cpu_spinwait(); 572#ifdef KDTRACE_HOOKS 573 spin_cnt++; 574#endif 575 } 576 KTR_STATE0(KTR_SCHED, "thread", 577 sched_tdname(curthread), "running"); 578 continue; 579 } 580 } else if (SX_SHARERS(x) && spintries < asx_retries) { 581 KTR_STATE1(KTR_SCHED, "thread", 582 sched_tdname(curthread), "spinning", 583 "lockname:\"%s\"", sx->lock_object.lo_name); 584 GIANT_SAVE(); 585 spintries++; 586 for (i = 0; i < asx_loops; i++) { 587 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 588 CTR4(KTR_LOCK, 589 "%s: shared spinning on %p with %u and %u", 590 __func__, sx, spintries, i); 591 x = sx->sx_lock; 592 if ((x & SX_LOCK_SHARED) == 0 || 593 SX_SHARERS(x) == 0) 594 break; 595 cpu_spinwait(); 596#ifdef KDTRACE_HOOKS 597 spin_cnt++; 598#endif 599 } 600 KTR_STATE0(KTR_SCHED, "thread", 601 sched_tdname(curthread), "running"); 602 if (i != asx_loops) 603 continue; 604 } 605 } 606#endif 607 608 sleepq_lock(&sx->lock_object); 609 x = sx->sx_lock; 610 611 /* 612 * If the lock was released while spinning on the 613 * sleep queue chain lock, try again. 614 */ 615 if (x == SX_LOCK_UNLOCKED) { 616 sleepq_release(&sx->lock_object); 617 continue; 618 } 619 620#ifdef ADAPTIVE_SX 621 /* 622 * The current lock owner might have started executing 623 * on another CPU (or the lock could have changed 624 * owners) while we were waiting on the sleep queue 625 * chain lock. If so, drop the sleep queue lock and try 626 * again. 627 */ 628 if (!(x & SX_LOCK_SHARED) && 629 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 630 owner = (struct thread *)SX_OWNER(x); 631 if (TD_IS_RUNNING(owner)) { 632 sleepq_release(&sx->lock_object); 633 continue; 634 } 635 } 636#endif 637 638 /* 639 * If an exclusive lock was released with both shared 640 * and exclusive waiters and a shared waiter hasn't 641 * woken up and acquired the lock yet, sx_lock will be 642 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 643 * If we see that value, try to acquire it once. Note 644 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 645 * as there are other exclusive waiters still. If we 646 * fail, restart the loop. 647 */ 648 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { 649 if (atomic_cmpset_acq_ptr(&sx->sx_lock, 650 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, 651 tid | SX_LOCK_EXCLUSIVE_WAITERS)) { 652 sleepq_release(&sx->lock_object); 653 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 654 __func__, sx); 655 break; 656 } 657 sleepq_release(&sx->lock_object); 658 continue; 659 } 660 661 /* 662 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 663 * than loop back and retry. 664 */ 665 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 666 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 667 x | SX_LOCK_EXCLUSIVE_WAITERS)) { 668 sleepq_release(&sx->lock_object); 669 continue; 670 } 671 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 672 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 673 __func__, sx); 674 } 675 676 /* 677 * Since we have been unable to acquire the exclusive 678 * lock and the exclusive waiters flag is set, we have 679 * to sleep. 680 */ 681 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 682 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 683 __func__, sx); 684 685#ifdef KDTRACE_HOOKS 686 sleep_time -= lockstat_nsecs(); 687#endif 688 GIANT_SAVE(); 689 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 690 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 691 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 692 if (!(opts & SX_INTERRUPTIBLE)) 693 sleepq_wait(&sx->lock_object, 0); 694 else 695 error = sleepq_wait_sig(&sx->lock_object, 0); 696#ifdef KDTRACE_HOOKS 697 sleep_time += lockstat_nsecs(); 698 sleep_cnt++; 699#endif 700 if (error) { 701 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 702 CTR2(KTR_LOCK, 703 "%s: interruptible sleep by %p suspended by signal", 704 __func__, sx); 705 break; 706 } 707 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 708 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 709 __func__, sx); 710 } 711 712 GIANT_RESTORE(); 713 if (!error) 714 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx, 715 contested, waittime, file, line); 716#ifdef KDTRACE_HOOKS 717 if (sleep_time) 718 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time); 719 if (spin_cnt > sleep_cnt) 720 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt)); 721#endif 722 return (error); 723} 724 725/* 726 * This function represents the so-called 'hard case' for sx_xunlock 727 * operation. All 'easy case' failures are redirected to this. Note 728 * that ideally this would be a static function, but it needs to be 729 * accessible from at least sx.h. 730 */ 731void 732_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 733{ 734 uintptr_t x; 735 int queue, wakeup_swapper; 736 737 if (SCHEDULER_STOPPED()) 738 return; 739 740 MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); 741 742 /* If the lock is recursed, then unrecurse one level. */ 743 if (sx_xlocked(sx) && sx_recursed(sx)) { 744 if ((--sx->sx_recurse) == 0) 745 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 746 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 747 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 748 return; 749 } 750 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | 751 SX_LOCK_EXCLUSIVE_WAITERS)); 752 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 753 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 754 755 sleepq_lock(&sx->lock_object); 756 x = SX_LOCK_UNLOCKED; 757 758 /* 759 * The wake up algorithm here is quite simple and probably not 760 * ideal. It gives precedence to shared waiters if they are 761 * present. For this condition, we have to preserve the 762 * state of the exclusive waiters flag. 763 * If interruptible sleeps left the shared queue empty avoid a 764 * starvation for the threads sleeping on the exclusive queue by giving 765 * them precedence and cleaning up the shared waiters bit anyway. 766 */ 767 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 && 768 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 769 queue = SQ_SHARED_QUEUE; 770 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); 771 } else 772 queue = SQ_EXCLUSIVE_QUEUE; 773 774 /* Wake up all the waiters for the specific queue. */ 775 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 776 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 777 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 778 "exclusive"); 779 atomic_store_rel_ptr(&sx->sx_lock, x); 780 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 781 queue); 782 sleepq_release(&sx->lock_object); 783 if (wakeup_swapper) 784 kick_proc0(); 785} 786 787/* 788 * This function represents the so-called 'hard case' for sx_slock 789 * operation. All 'easy case' failures are redirected to this. Note 790 * that ideally this would be a static function, but it needs to be 791 * accessible from at least sx.h. 792 */ 793int 794_sx_slock_hard(struct sx *sx, int opts, const char *file, int line) 795{ 796 GIANT_DECLARE; 797#ifdef ADAPTIVE_SX 798 volatile struct thread *owner; 799#endif 800#ifdef LOCK_PROFILING 801 uint64_t waittime = 0; 802 int contested = 0; 803#endif 804 uintptr_t x; 805 int error = 0; 806#ifdef KDTRACE_HOOKS 807 uint64_t spin_cnt = 0; 808 uint64_t sleep_cnt = 0; 809 int64_t sleep_time = 0; 810#endif 811 812 if (SCHEDULER_STOPPED()) 813 return (0); 814 815 /* 816 * As with rwlocks, we don't make any attempt to try to block 817 * shared locks once there is an exclusive waiter. 818 */ 819 for (;;) { 820#ifdef KDTRACE_HOOKS 821 spin_cnt++; 822#endif 823 x = sx->sx_lock; 824 825 /* 826 * If no other thread has an exclusive lock then try to bump up 827 * the count of sharers. Since we have to preserve the state 828 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 829 * shared lock loop back and retry. 830 */ 831 if (x & SX_LOCK_SHARED) { 832 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); 833 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, 834 x + SX_ONE_SHARER)) { 835 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 836 CTR4(KTR_LOCK, 837 "%s: %p succeed %p -> %p", __func__, 838 sx, (void *)x, 839 (void *)(x + SX_ONE_SHARER)); 840 break; 841 } 842 continue; 843 } 844#ifdef HWPMC_HOOKS 845 PMC_SOFT_CALL( , , lock, failed); 846#endif 847 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 848 &waittime); 849 850#ifdef ADAPTIVE_SX 851 /* 852 * If the owner is running on another CPU, spin until 853 * the owner stops running or the state of the lock 854 * changes. 855 */ 856 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 857 x = SX_OWNER(x); 858 owner = (struct thread *)x; 859 if (TD_IS_RUNNING(owner)) { 860 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 861 CTR3(KTR_LOCK, 862 "%s: spinning on %p held by %p", 863 __func__, sx, owner); 864 KTR_STATE1(KTR_SCHED, "thread", 865 sched_tdname(curthread), "spinning", 866 "lockname:\"%s\"", sx->lock_object.lo_name); 867 GIANT_SAVE(); 868 while (SX_OWNER(sx->sx_lock) == x && 869 TD_IS_RUNNING(owner)) { 870#ifdef KDTRACE_HOOKS 871 spin_cnt++; 872#endif 873 cpu_spinwait(); 874 } 875 KTR_STATE0(KTR_SCHED, "thread", 876 sched_tdname(curthread), "running"); 877 continue; 878 } 879 } 880#endif 881 882 /* 883 * Some other thread already has an exclusive lock, so 884 * start the process of blocking. 885 */ 886 sleepq_lock(&sx->lock_object); 887 x = sx->sx_lock; 888 889 /* 890 * The lock could have been released while we spun. 891 * In this case loop back and retry. 892 */ 893 if (x & SX_LOCK_SHARED) { 894 sleepq_release(&sx->lock_object); 895 continue; 896 } 897 898#ifdef ADAPTIVE_SX 899 /* 900 * If the owner is running on another CPU, spin until 901 * the owner stops running or the state of the lock 902 * changes. 903 */ 904 if (!(x & SX_LOCK_SHARED) && 905 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 906 owner = (struct thread *)SX_OWNER(x); 907 if (TD_IS_RUNNING(owner)) { 908 sleepq_release(&sx->lock_object); 909 continue; 910 } 911 } 912#endif 913 914 /* 915 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 916 * fail to set it drop the sleep queue lock and loop 917 * back. 918 */ 919 if (!(x & SX_LOCK_SHARED_WAITERS)) { 920 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 921 x | SX_LOCK_SHARED_WAITERS)) { 922 sleepq_release(&sx->lock_object); 923 continue; 924 } 925 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 926 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 927 __func__, sx); 928 } 929 930 /* 931 * Since we have been unable to acquire the shared lock, 932 * we have to sleep. 933 */ 934 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 935 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 936 __func__, sx); 937 938#ifdef KDTRACE_HOOKS 939 sleep_time -= lockstat_nsecs(); 940#endif 941 GIANT_SAVE(); 942 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 943 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 944 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 945 if (!(opts & SX_INTERRUPTIBLE)) 946 sleepq_wait(&sx->lock_object, 0); 947 else 948 error = sleepq_wait_sig(&sx->lock_object, 0); 949#ifdef KDTRACE_HOOKS 950 sleep_time += lockstat_nsecs(); 951 sleep_cnt++; 952#endif 953 if (error) { 954 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 955 CTR2(KTR_LOCK, 956 "%s: interruptible sleep by %p suspended by signal", 957 __func__, sx); 958 break; 959 } 960 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 961 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 962 __func__, sx); 963 } 964 if (error == 0) 965 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 966 contested, waittime, file, line); 967#ifdef KDTRACE_HOOKS 968 if (sleep_time) 969 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time); 970 if (spin_cnt > sleep_cnt) 971 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt)); 972#endif 973 GIANT_RESTORE(); 974 return (error); 975} 976 977/* 978 * This function represents the so-called 'hard case' for sx_sunlock 979 * operation. All 'easy case' failures are redirected to this. Note 980 * that ideally this would be a static function, but it needs to be 981 * accessible from at least sx.h. 982 */ 983void 984_sx_sunlock_hard(struct sx *sx, const char *file, int line) 985{ 986 uintptr_t x; 987 int wakeup_swapper; 988 989 if (SCHEDULER_STOPPED()) 990 return; 991 992 for (;;) { 993 x = sx->sx_lock; 994 995 /* 996 * We should never have sharers while at least one thread 997 * holds a shared lock. 998 */ 999 KASSERT(!(x & SX_LOCK_SHARED_WAITERS), 1000 ("%s: waiting sharers", __func__)); 1001 1002 /* 1003 * See if there is more than one shared lock held. If 1004 * so, just drop one and return. 1005 */ 1006 if (SX_SHARERS(x) > 1) { 1007 if (atomic_cmpset_rel_ptr(&sx->sx_lock, x, 1008 x - SX_ONE_SHARER)) { 1009 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1010 CTR4(KTR_LOCK, 1011 "%s: %p succeeded %p -> %p", 1012 __func__, sx, (void *)x, 1013 (void *)(x - SX_ONE_SHARER)); 1014 break; 1015 } 1016 continue; 1017 } 1018 1019 /* 1020 * If there aren't any waiters for an exclusive lock, 1021 * then try to drop it quickly. 1022 */ 1023 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 1024 MPASS(x == SX_SHARERS_LOCK(1)); 1025 if (atomic_cmpset_rel_ptr(&sx->sx_lock, 1026 SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) { 1027 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1028 CTR2(KTR_LOCK, "%s: %p last succeeded", 1029 __func__, sx); 1030 break; 1031 } 1032 continue; 1033 } 1034 1035 /* 1036 * At this point, there should just be one sharer with 1037 * exclusive waiters. 1038 */ 1039 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); 1040 1041 sleepq_lock(&sx->lock_object); 1042 1043 /* 1044 * Wake up semantic here is quite simple: 1045 * Just wake up all the exclusive waiters. 1046 * Note that the state of the lock could have changed, 1047 * so if it fails loop back and retry. 1048 */ 1049 if (!atomic_cmpset_rel_ptr(&sx->sx_lock, 1050 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, 1051 SX_LOCK_UNLOCKED)) { 1052 sleepq_release(&sx->lock_object); 1053 continue; 1054 } 1055 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1056 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1057 "exclusive queue", __func__, sx); 1058 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1059 0, SQ_EXCLUSIVE_QUEUE); 1060 sleepq_release(&sx->lock_object); 1061 if (wakeup_swapper) 1062 kick_proc0(); 1063 break; 1064 } 1065} 1066 1067#ifdef INVARIANT_SUPPORT 1068#ifndef INVARIANTS 1069#undef _sx_assert 1070#endif 1071 1072/* 1073 * In the non-WITNESS case, sx_assert() can only detect that at least 1074 * *some* thread owns an slock, but it cannot guarantee that *this* 1075 * thread owns an slock. 1076 */ 1077void 1078_sx_assert(const struct sx *sx, int what, const char *file, int line) 1079{ 1080#ifndef WITNESS 1081 int slocked = 0; 1082#endif 1083 1084 if (panicstr != NULL) 1085 return; 1086 switch (what) { 1087 case SA_SLOCKED: 1088 case SA_SLOCKED | SA_NOTRECURSED: 1089 case SA_SLOCKED | SA_RECURSED: 1090#ifndef WITNESS 1091 slocked = 1; 1092 /* FALLTHROUGH */ 1093#endif 1094 case SA_LOCKED: 1095 case SA_LOCKED | SA_NOTRECURSED: 1096 case SA_LOCKED | SA_RECURSED: 1097#ifdef WITNESS 1098 witness_assert(&sx->lock_object, what, file, line); 1099#else 1100 /* 1101 * If some other thread has an exclusive lock or we 1102 * have one and are asserting a shared lock, fail. 1103 * Also, if no one has a lock at all, fail. 1104 */ 1105 if (sx->sx_lock == SX_LOCK_UNLOCKED || 1106 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 1107 sx_xholder(sx) != curthread))) 1108 panic("Lock %s not %slocked @ %s:%d\n", 1109 sx->lock_object.lo_name, slocked ? "share " : "", 1110 file, line); 1111 1112 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 1113 if (sx_recursed(sx)) { 1114 if (what & SA_NOTRECURSED) 1115 panic("Lock %s recursed @ %s:%d\n", 1116 sx->lock_object.lo_name, file, 1117 line); 1118 } else if (what & SA_RECURSED) 1119 panic("Lock %s not recursed @ %s:%d\n", 1120 sx->lock_object.lo_name, file, line); 1121 } 1122#endif 1123 break; 1124 case SA_XLOCKED: 1125 case SA_XLOCKED | SA_NOTRECURSED: 1126 case SA_XLOCKED | SA_RECURSED: 1127 if (sx_xholder(sx) != curthread) 1128 panic("Lock %s not exclusively locked @ %s:%d\n", 1129 sx->lock_object.lo_name, file, line); 1130 if (sx_recursed(sx)) { 1131 if (what & SA_NOTRECURSED) 1132 panic("Lock %s recursed @ %s:%d\n", 1133 sx->lock_object.lo_name, file, line); 1134 } else if (what & SA_RECURSED) 1135 panic("Lock %s not recursed @ %s:%d\n", 1136 sx->lock_object.lo_name, file, line); 1137 break; 1138 case SA_UNLOCKED: 1139#ifdef WITNESS 1140 witness_assert(&sx->lock_object, what, file, line); 1141#else 1142 /* 1143 * If we hold an exclusve lock fail. We can't 1144 * reliably check to see if we hold a shared lock or 1145 * not. 1146 */ 1147 if (sx_xholder(sx) == curthread) 1148 panic("Lock %s exclusively locked @ %s:%d\n", 1149 sx->lock_object.lo_name, file, line); 1150#endif 1151 break; 1152 default: 1153 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 1154 line); 1155 } 1156} 1157#endif /* INVARIANT_SUPPORT */ 1158 1159#ifdef DDB 1160static void 1161db_show_sx(const struct lock_object *lock) 1162{ 1163 struct thread *td; 1164 const struct sx *sx; 1165 1166 sx = (const struct sx *)lock; 1167 1168 db_printf(" state: "); 1169 if (sx->sx_lock == SX_LOCK_UNLOCKED) 1170 db_printf("UNLOCKED\n"); 1171 else if (sx->sx_lock == SX_LOCK_DESTROYED) { 1172 db_printf("DESTROYED\n"); 1173 return; 1174 } else if (sx->sx_lock & SX_LOCK_SHARED) 1175 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 1176 else { 1177 td = sx_xholder(sx); 1178 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1179 td->td_tid, td->td_proc->p_pid, td->td_name); 1180 if (sx_recursed(sx)) 1181 db_printf(" recursed: %d\n", sx->sx_recurse); 1182 } 1183 1184 db_printf(" waiters: "); 1185 switch(sx->sx_lock & 1186 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 1187 case SX_LOCK_SHARED_WAITERS: 1188 db_printf("shared\n"); 1189 break; 1190 case SX_LOCK_EXCLUSIVE_WAITERS: 1191 db_printf("exclusive\n"); 1192 break; 1193 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 1194 db_printf("exclusive and shared\n"); 1195 break; 1196 default: 1197 db_printf("none\n"); 1198 } 1199} 1200 1201/* 1202 * Check to see if a thread that is blocked on a sleep queue is actually 1203 * blocked on an sx lock. If so, output some details and return true. 1204 * If the lock has an exclusive owner, return that in *ownerp. 1205 */ 1206int 1207sx_chain(struct thread *td, struct thread **ownerp) 1208{ 1209 struct sx *sx; 1210 1211 /* 1212 * Check to see if this thread is blocked on an sx lock. 1213 * First, we check the lock class. If that is ok, then we 1214 * compare the lock name against the wait message. 1215 */ 1216 sx = td->td_wchan; 1217 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 1218 sx->lock_object.lo_name != td->td_wmesg) 1219 return (0); 1220 1221 /* We think we have an sx lock, so output some details. */ 1222 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 1223 *ownerp = sx_xholder(sx); 1224 if (sx->sx_lock & SX_LOCK_SHARED) 1225 db_printf("SLOCK (count %ju)\n", 1226 (uintmax_t)SX_SHARERS(sx->sx_lock)); 1227 else 1228 db_printf("XLOCK\n"); 1229 return (1); 1230} 1231#endif 1232