kern_lock.c revision 196772
1/*- 2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include "opt_adaptive_lockmgrs.h" 30#include "opt_ddb.h" 31#include "opt_kdtrace.h" 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 196772 2009-09-02 17:33:51Z attilio $"); 35 36#include <sys/param.h> 37#include <sys/ktr.h> 38#include <sys/linker_set.h> 39#include <sys/lock.h> 40#include <sys/lock_profile.h> 41#include <sys/lockmgr.h> 42#include <sys/mutex.h> 43#include <sys/proc.h> 44#include <sys/sleepqueue.h> 45#ifdef DEBUG_LOCKS 46#include <sys/stack.h> 47#endif 48#include <sys/sysctl.h> 49#include <sys/systm.h> 50 51#include <machine/cpu.h> 52 53#ifdef DDB 54#include <ddb/ddb.h> 55#endif 56 57CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 58 (LK_ADAPTIVE | LK_NOSHARE)); 59CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 60 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 61 62#define SQ_EXCLUSIVE_QUEUE 0 63#define SQ_SHARED_QUEUE 1 64 65#ifndef INVARIANTS 66#define _lockmgr_assert(lk, what, file, line) 67#define TD_LOCKS_INC(td) 68#define TD_LOCKS_DEC(td) 69#else 70#define TD_LOCKS_INC(td) ((td)->td_locks++) 71#define TD_LOCKS_DEC(td) ((td)->td_locks--) 72#endif 73#define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 74#define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 75 76#ifndef DEBUG_LOCKS 77#define STACK_PRINT(lk) 78#define STACK_SAVE(lk) 79#define STACK_ZERO(lk) 80#else 81#define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 82#define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 83#define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 84#endif 85 86#define LOCK_LOG2(lk, string, arg1, arg2) \ 87 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 88 CTR2(KTR_LOCK, (string), (arg1), (arg2)) 89#define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 90 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 91 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 92 93#define GIANT_DECLARE \ 94 int _i = 0; \ 95 WITNESS_SAVE_DECL(Giant) 96#define GIANT_RESTORE() do { \ 97 if (_i > 0) { \ 98 while (_i--) \ 99 mtx_lock(&Giant); \ 100 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 101 } \ 102} while (0) 103#define GIANT_SAVE() do { \ 104 if (mtx_owned(&Giant)) { \ 105 WITNESS_SAVE(&Giant.lock_object, Giant); \ 106 while (mtx_owned(&Giant)) { \ 107 _i++; \ 108 mtx_unlock(&Giant); \ 109 } \ 110 } \ 111} while (0) 112 113#define LK_CAN_SHARE(x) \ 114 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 115 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \ 116 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 117#define LK_TRYOP(x) \ 118 ((x) & LK_NOWAIT) 119 120#define LK_CAN_WITNESS(x) \ 121 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 122#define LK_TRYWIT(x) \ 123 (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 124 125#define LK_CAN_ADAPT(lk, f) \ 126 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 127 ((f) & LK_SLEEPFAIL) == 0) 128 129#define lockmgr_disowned(lk) \ 130 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 131 132#define lockmgr_xlocked(lk) \ 133 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 134 135static void assert_lockmgr(struct lock_object *lock, int how); 136#ifdef DDB 137static void db_show_lockmgr(struct lock_object *lock); 138#endif 139static void lock_lockmgr(struct lock_object *lock, int how); 140#ifdef KDTRACE_HOOKS 141static int owner_lockmgr(struct lock_object *lock, struct thread **owner); 142#endif 143static int unlock_lockmgr(struct lock_object *lock); 144 145struct lock_class lock_class_lockmgr = { 146 .lc_name = "lockmgr", 147 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 148 .lc_assert = assert_lockmgr, 149#ifdef DDB 150 .lc_ddb_show = db_show_lockmgr, 151#endif 152 .lc_lock = lock_lockmgr, 153 .lc_unlock = unlock_lockmgr, 154#ifdef KDTRACE_HOOKS 155 .lc_owner = owner_lockmgr, 156#endif 157}; 158 159#ifdef ADAPTIVE_LOCKMGRS 160static u_int alk_retries = 10; 161static u_int alk_loops = 10000; 162SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging"); 163SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 164SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 165#endif 166 167static __inline struct thread * 168lockmgr_xholder(struct lock *lk) 169{ 170 uintptr_t x; 171 172 x = lk->lk_lock; 173 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 174} 175 176/* 177 * It assumes sleepq_lock held and returns with this one unheld. 178 * It also assumes the generic interlock is sane and previously checked. 179 * If LK_INTERLOCK is specified the interlock is not reacquired after the 180 * sleep. 181 */ 182static __inline int 183sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 184 const char *wmesg, int pri, int timo, int queue) 185{ 186 GIANT_DECLARE; 187 struct lock_class *class; 188 int catch, error; 189 190 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 191 catch = pri & PCATCH; 192 pri &= PRIMASK; 193 error = 0; 194 195 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 196 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 197 198 if (flags & LK_INTERLOCK) 199 class->lc_unlock(ilk); 200 GIANT_SAVE(); 201 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 202 SLEEPQ_INTERRUPTIBLE : 0), queue); 203 if ((flags & LK_TIMELOCK) && timo) 204 sleepq_set_timeout(&lk->lock_object, timo); 205 206 /* 207 * Decisional switch for real sleeping. 208 */ 209 if ((flags & LK_TIMELOCK) && timo && catch) 210 error = sleepq_timedwait_sig(&lk->lock_object, pri); 211 else if ((flags & LK_TIMELOCK) && timo) 212 error = sleepq_timedwait(&lk->lock_object, pri); 213 else if (catch) 214 error = sleepq_wait_sig(&lk->lock_object, pri); 215 else 216 sleepq_wait(&lk->lock_object, pri); 217 GIANT_RESTORE(); 218 if ((flags & LK_SLEEPFAIL) && error == 0) 219 error = ENOLCK; 220 221 return (error); 222} 223 224static __inline int 225wakeupshlk(struct lock *lk, const char *file, int line) 226{ 227 uintptr_t v, x; 228 int queue, wakeup_swapper; 229 230 TD_LOCKS_DEC(curthread); 231 TD_SLOCKS_DEC(curthread); 232 WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 233 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 234 235 wakeup_swapper = 0; 236 for (;;) { 237 x = lk->lk_lock; 238 239 /* 240 * If there is more than one shared lock held, just drop one 241 * and return. 242 */ 243 if (LK_SHARERS(x) > 1) { 244 if (atomic_cmpset_ptr(&lk->lk_lock, x, 245 x - LK_ONE_SHARER)) 246 break; 247 continue; 248 } 249 250 /* 251 * If there are not waiters on the exclusive queue, drop the 252 * lock quickly. 253 */ 254 if ((x & LK_ALL_WAITERS) == 0) { 255 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 256 LK_SHARERS_LOCK(1)); 257 if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 258 break; 259 continue; 260 } 261 262 /* 263 * We should have a sharer with waiters, so enter the hard 264 * path in order to handle wakeups correctly. 265 */ 266 sleepq_lock(&lk->lock_object); 267 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 268 v = LK_UNLOCKED; 269 270 /* 271 * If the lock has exclusive waiters, give them preference in 272 * order to avoid deadlock with shared runners up. 273 */ 274 if (x & LK_EXCLUSIVE_WAITERS) { 275 queue = SQ_EXCLUSIVE_QUEUE; 276 v |= (x & LK_SHARED_WAITERS); 277 } else { 278 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 279 LK_SHARED_WAITERS); 280 queue = SQ_SHARED_QUEUE; 281 } 282 283 if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 284 v)) { 285 sleepq_release(&lk->lock_object); 286 continue; 287 } 288 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 289 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 290 "exclusive"); 291 wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 292 0, queue); 293 sleepq_release(&lk->lock_object); 294 break; 295 } 296 297 lock_profile_release_lock(&lk->lock_object); 298 return (wakeup_swapper); 299} 300 301static void 302assert_lockmgr(struct lock_object *lock, int what) 303{ 304 305 panic("lockmgr locks do not support assertions"); 306} 307 308static void 309lock_lockmgr(struct lock_object *lock, int how) 310{ 311 312 panic("lockmgr locks do not support sleep interlocking"); 313} 314 315static int 316unlock_lockmgr(struct lock_object *lock) 317{ 318 319 panic("lockmgr locks do not support sleep interlocking"); 320} 321 322#ifdef KDTRACE_HOOKS 323static int 324owner_lockmgr(struct lock_object *lock, struct thread **owner) 325{ 326 327 panic("lockmgr locks do not support owner inquiring"); 328} 329#endif 330 331void 332lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 333{ 334 int iflags; 335 336 MPASS((flags & ~LK_INIT_MASK) == 0); 337 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 338 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 339 &lk->lk_lock)); 340 341 iflags = LO_SLEEPABLE | LO_UPGRADABLE; 342 if (flags & LK_CANRECURSE) 343 iflags |= LO_RECURSABLE; 344 if ((flags & LK_NODUP) == 0) 345 iflags |= LO_DUPOK; 346 if (flags & LK_NOPROFILE) 347 iflags |= LO_NOPROFILE; 348 if ((flags & LK_NOWITNESS) == 0) 349 iflags |= LO_WITNESS; 350 if (flags & LK_QUIET) 351 iflags |= LO_QUIET; 352 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 353 354 lk->lk_lock = LK_UNLOCKED; 355 lk->lk_recurse = 0; 356 lk->lk_timo = timo; 357 lk->lk_pri = pri; 358 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 359 STACK_ZERO(lk); 360} 361 362void 363lockdestroy(struct lock *lk) 364{ 365 366 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 367 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 368 lock_destroy(&lk->lock_object); 369} 370 371int 372__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 373 const char *wmesg, int pri, int timo, const char *file, int line) 374{ 375 GIANT_DECLARE; 376 struct lock_class *class; 377 const char *iwmesg; 378 uintptr_t tid, v, x; 379 u_int op; 380 int error, ipri, itimo, queue, wakeup_swapper; 381#ifdef LOCK_PROFILING 382 uint64_t waittime = 0; 383 int contested = 0; 384#endif 385#ifdef ADAPTIVE_LOCKMGRS 386 volatile struct thread *owner; 387 u_int i, spintries = 0; 388#endif 389 390 error = 0; 391 tid = (uintptr_t)curthread; 392 op = (flags & LK_TYPE_MASK); 393 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 394 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 395 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 396 397 MPASS((flags & ~LK_TOTAL_MASK) == 0); 398 KASSERT((op & (op - 1)) == 0, 399 ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 400 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 401 (op != LK_DOWNGRADE && op != LK_RELEASE), 402 ("%s: Invalid flags in regard of the operation desired @ %s:%d", 403 __func__, file, line)); 404 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 405 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 406 __func__, file, line)); 407 408 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 409 if (panicstr != NULL) { 410 if (flags & LK_INTERLOCK) 411 class->lc_unlock(ilk); 412 return (0); 413 } 414 415 if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE)) 416 op = LK_EXCLUSIVE; 417 418 wakeup_swapper = 0; 419 switch (op) { 420 case LK_SHARED: 421 if (LK_CAN_WITNESS(flags)) 422 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 423 file, line, ilk); 424 for (;;) { 425 x = lk->lk_lock; 426 427 /* 428 * If no other thread has an exclusive lock, or 429 * no exclusive waiter is present, bump the count of 430 * sharers. Since we have to preserve the state of 431 * waiters, if we fail to acquire the shared lock 432 * loop back and retry. 433 */ 434 if (LK_CAN_SHARE(x)) { 435 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 436 x + LK_ONE_SHARER)) 437 break; 438 continue; 439 } 440 lock_profile_obtain_lock_failed(&lk->lock_object, 441 &contested, &waittime); 442 443 /* 444 * If the lock is already held by curthread in 445 * exclusive way avoid a deadlock. 446 */ 447 if (LK_HOLDER(x) == tid) { 448 LOCK_LOG2(lk, 449 "%s: %p already held in exclusive mode", 450 __func__, lk); 451 error = EDEADLK; 452 break; 453 } 454 455 /* 456 * If the lock is expected to not sleep just give up 457 * and return. 458 */ 459 if (LK_TRYOP(flags)) { 460 LOCK_LOG2(lk, "%s: %p fails the try operation", 461 __func__, lk); 462 error = EBUSY; 463 break; 464 } 465 466#ifdef ADAPTIVE_LOCKMGRS 467 /* 468 * If the owner is running on another CPU, spin until 469 * the owner stops running or the state of the lock 470 * changes. We need a double-state handle here 471 * because for a failed acquisition the lock can be 472 * either held in exclusive mode or shared mode 473 * (for the writer starvation avoidance technique). 474 */ 475 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 476 LK_HOLDER(x) != LK_KERNPROC) { 477 owner = (struct thread *)LK_HOLDER(x); 478 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 479 CTR3(KTR_LOCK, 480 "%s: spinning on %p held by %p", 481 __func__, lk, owner); 482 483 /* 484 * If we are holding also an interlock drop it 485 * in order to avoid a deadlock if the lockmgr 486 * owner is adaptively spinning on the 487 * interlock itself. 488 */ 489 if (flags & LK_INTERLOCK) { 490 class->lc_unlock(ilk); 491 flags &= ~LK_INTERLOCK; 492 } 493 GIANT_SAVE(); 494 while (LK_HOLDER(lk->lk_lock) == 495 (uintptr_t)owner && TD_IS_RUNNING(owner)) 496 cpu_spinwait(); 497 GIANT_RESTORE(); 498 continue; 499 } else if (LK_CAN_ADAPT(lk, flags) && 500 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 501 spintries < alk_retries) { 502 if (flags & LK_INTERLOCK) { 503 class->lc_unlock(ilk); 504 flags &= ~LK_INTERLOCK; 505 } 506 GIANT_SAVE(); 507 spintries++; 508 for (i = 0; i < alk_loops; i++) { 509 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 510 CTR4(KTR_LOCK, 511 "%s: shared spinning on %p with %u and %u", 512 __func__, lk, spintries, i); 513 x = lk->lk_lock; 514 if ((x & LK_SHARE) == 0 || 515 LK_CAN_SHARE(x) != 0) 516 break; 517 cpu_spinwait(); 518 } 519 GIANT_RESTORE(); 520 if (i != alk_loops) 521 continue; 522 } 523#endif 524 525 /* 526 * Acquire the sleepqueue chain lock because we 527 * probabilly will need to manipulate waiters flags. 528 */ 529 sleepq_lock(&lk->lock_object); 530 x = lk->lk_lock; 531 532 /* 533 * if the lock can be acquired in shared mode, try 534 * again. 535 */ 536 if (LK_CAN_SHARE(x)) { 537 sleepq_release(&lk->lock_object); 538 continue; 539 } 540 541#ifdef ADAPTIVE_LOCKMGRS 542 /* 543 * The current lock owner might have started executing 544 * on another CPU (or the lock could have changed 545 * owner) while we were waiting on the turnstile 546 * chain lock. If so, drop the turnstile lock and try 547 * again. 548 */ 549 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 550 LK_HOLDER(x) != LK_KERNPROC) { 551 owner = (struct thread *)LK_HOLDER(x); 552 if (TD_IS_RUNNING(owner)) { 553 sleepq_release(&lk->lock_object); 554 continue; 555 } 556 } 557#endif 558 559 /* 560 * Try to set the LK_SHARED_WAITERS flag. If we fail, 561 * loop back and retry. 562 */ 563 if ((x & LK_SHARED_WAITERS) == 0) { 564 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 565 x | LK_SHARED_WAITERS)) { 566 sleepq_release(&lk->lock_object); 567 continue; 568 } 569 LOCK_LOG2(lk, "%s: %p set shared waiters flag", 570 __func__, lk); 571 } 572 573 /* 574 * As far as we have been unable to acquire the 575 * shared lock and the shared waiters flag is set, 576 * we will sleep. 577 */ 578 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 579 SQ_SHARED_QUEUE); 580 flags &= ~LK_INTERLOCK; 581 if (error) { 582 LOCK_LOG3(lk, 583 "%s: interrupted sleep for %p with %d", 584 __func__, lk, error); 585 break; 586 } 587 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 588 __func__, lk); 589 } 590 if (error == 0) { 591 lock_profile_obtain_lock_success(&lk->lock_object, 592 contested, waittime, file, line); 593 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 594 line); 595 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 596 line); 597 TD_LOCKS_INC(curthread); 598 TD_SLOCKS_INC(curthread); 599 STACK_SAVE(lk); 600 } 601 break; 602 case LK_UPGRADE: 603 _lockmgr_assert(lk, KA_SLOCKED, file, line); 604 v = lk->lk_lock; 605 x = v & LK_ALL_WAITERS; 606 v &= LK_EXCLUSIVE_SPINNERS; 607 608 /* 609 * Try to switch from one shared lock to an exclusive one. 610 * We need to preserve waiters flags during the operation. 611 */ 612 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 613 tid | x)) { 614 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 615 line); 616 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 617 LK_TRYWIT(flags), file, line); 618 TD_SLOCKS_DEC(curthread); 619 break; 620 } 621 622 /* 623 * We have been unable to succeed in upgrading, so just 624 * give up the shared lock. 625 */ 626 wakeup_swapper |= wakeupshlk(lk, file, line); 627 628 /* FALLTHROUGH */ 629 case LK_EXCLUSIVE: 630 if (LK_CAN_WITNESS(flags)) 631 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 632 LOP_EXCLUSIVE, file, line, ilk); 633 634 /* 635 * If curthread already holds the lock and this one is 636 * allowed to recurse, simply recurse on it. 637 */ 638 if (lockmgr_xlocked(lk)) { 639 if ((flags & LK_CANRECURSE) == 0 && 640 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 641 642 /* 643 * If the lock is expected to not panic just 644 * give up and return. 645 */ 646 if (LK_TRYOP(flags)) { 647 LOCK_LOG2(lk, 648 "%s: %p fails the try operation", 649 __func__, lk); 650 error = EBUSY; 651 break; 652 } 653 if (flags & LK_INTERLOCK) 654 class->lc_unlock(ilk); 655 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 656 __func__, iwmesg, file, line); 657 } 658 lk->lk_recurse++; 659 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 660 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 661 lk->lk_recurse, file, line); 662 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 663 LK_TRYWIT(flags), file, line); 664 TD_LOCKS_INC(curthread); 665 break; 666 } 667 668 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 669 tid)) { 670 lock_profile_obtain_lock_failed(&lk->lock_object, 671 &contested, &waittime); 672 673 /* 674 * If the lock is expected to not sleep just give up 675 * and return. 676 */ 677 if (LK_TRYOP(flags)) { 678 LOCK_LOG2(lk, "%s: %p fails the try operation", 679 __func__, lk); 680 error = EBUSY; 681 break; 682 } 683 684#ifdef ADAPTIVE_LOCKMGRS 685 /* 686 * If the owner is running on another CPU, spin until 687 * the owner stops running or the state of the lock 688 * changes. 689 */ 690 x = lk->lk_lock; 691 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 692 LK_HOLDER(x) != LK_KERNPROC) { 693 owner = (struct thread *)LK_HOLDER(x); 694 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 695 CTR3(KTR_LOCK, 696 "%s: spinning on %p held by %p", 697 __func__, lk, owner); 698 699 /* 700 * If we are holding also an interlock drop it 701 * in order to avoid a deadlock if the lockmgr 702 * owner is adaptively spinning on the 703 * interlock itself. 704 */ 705 if (flags & LK_INTERLOCK) { 706 class->lc_unlock(ilk); 707 flags &= ~LK_INTERLOCK; 708 } 709 GIANT_SAVE(); 710 while (LK_HOLDER(lk->lk_lock) == 711 (uintptr_t)owner && TD_IS_RUNNING(owner)) 712 cpu_spinwait(); 713 GIANT_RESTORE(); 714 continue; 715 } else if (LK_CAN_ADAPT(lk, flags) && 716 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 717 spintries < alk_retries) { 718 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 719 !atomic_cmpset_ptr(&lk->lk_lock, x, 720 x | LK_EXCLUSIVE_SPINNERS)) 721 continue; 722 if (flags & LK_INTERLOCK) { 723 class->lc_unlock(ilk); 724 flags &= ~LK_INTERLOCK; 725 } 726 GIANT_SAVE(); 727 spintries++; 728 for (i = 0; i < alk_loops; i++) { 729 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 730 CTR4(KTR_LOCK, 731 "%s: shared spinning on %p with %u and %u", 732 __func__, lk, spintries, i); 733 if ((lk->lk_lock & 734 LK_EXCLUSIVE_SPINNERS) == 0) 735 break; 736 cpu_spinwait(); 737 } 738 GIANT_RESTORE(); 739 if (i != alk_loops) 740 continue; 741 } 742#endif 743 744 /* 745 * Acquire the sleepqueue chain lock because we 746 * probabilly will need to manipulate waiters flags. 747 */ 748 sleepq_lock(&lk->lock_object); 749 x = lk->lk_lock; 750 751 /* 752 * if the lock has been released while we spun on 753 * the sleepqueue chain lock just try again. 754 */ 755 if (x == LK_UNLOCKED) { 756 sleepq_release(&lk->lock_object); 757 continue; 758 } 759 760#ifdef ADAPTIVE_LOCKMGRS 761 /* 762 * The current lock owner might have started executing 763 * on another CPU (or the lock could have changed 764 * owner) while we were waiting on the turnstile 765 * chain lock. If so, drop the turnstile lock and try 766 * again. 767 */ 768 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 769 LK_HOLDER(x) != LK_KERNPROC) { 770 owner = (struct thread *)LK_HOLDER(x); 771 if (TD_IS_RUNNING(owner)) { 772 sleepq_release(&lk->lock_object); 773 continue; 774 } 775 } 776#endif 777 778 /* 779 * The lock can be in the state where there is a 780 * pending queue of waiters, but still no owner. 781 * This happens when the lock is contested and an 782 * owner is going to claim the lock. 783 * If curthread is the one successfully acquiring it 784 * claim lock ownership and return, preserving waiters 785 * flags. 786 */ 787 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 788 if ((x & ~v) == LK_UNLOCKED) { 789 v &= ~LK_EXCLUSIVE_SPINNERS; 790 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 791 tid | v)) { 792 sleepq_release(&lk->lock_object); 793 LOCK_LOG2(lk, 794 "%s: %p claimed by a new writer", 795 __func__, lk); 796 break; 797 } 798 sleepq_release(&lk->lock_object); 799 continue; 800 } 801 802 /* 803 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 804 * fail, loop back and retry. 805 */ 806 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 807 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 808 x | LK_EXCLUSIVE_WAITERS)) { 809 sleepq_release(&lk->lock_object); 810 continue; 811 } 812 LOCK_LOG2(lk, "%s: %p set excl waiters flag", 813 __func__, lk); 814 } 815 816 /* 817 * As far as we have been unable to acquire the 818 * exclusive lock and the exclusive waiters flag 819 * is set, we will sleep. 820 */ 821 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 822 SQ_EXCLUSIVE_QUEUE); 823 flags &= ~LK_INTERLOCK; 824 if (error) { 825 LOCK_LOG3(lk, 826 "%s: interrupted sleep for %p with %d", 827 __func__, lk, error); 828 break; 829 } 830 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 831 __func__, lk); 832 } 833 if (error == 0) { 834 lock_profile_obtain_lock_success(&lk->lock_object, 835 contested, waittime, file, line); 836 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 837 lk->lk_recurse, file, line); 838 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 839 LK_TRYWIT(flags), file, line); 840 TD_LOCKS_INC(curthread); 841 STACK_SAVE(lk); 842 } 843 break; 844 case LK_DOWNGRADE: 845 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 846 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 847 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 848 TD_SLOCKS_INC(curthread); 849 850 /* 851 * In order to preserve waiters flags, just spin. 852 */ 853 for (;;) { 854 x = lk->lk_lock; 855 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 856 x &= LK_ALL_WAITERS; 857 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 858 LK_SHARERS_LOCK(1) | x)) 859 break; 860 cpu_spinwait(); 861 } 862 break; 863 case LK_RELEASE: 864 _lockmgr_assert(lk, KA_LOCKED, file, line); 865 x = lk->lk_lock; 866 867 if ((x & LK_SHARE) == 0) { 868 869 /* 870 * As first option, treact the lock as if it has not 871 * any waiter. 872 * Fix-up the tid var if the lock has been disowned. 873 */ 874 if (LK_HOLDER(x) == LK_KERNPROC) 875 tid = LK_KERNPROC; 876 else { 877 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 878 file, line); 879 TD_LOCKS_DEC(curthread); 880 } 881 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 882 lk->lk_recurse, file, line); 883 884 /* 885 * The lock is held in exclusive mode. 886 * If the lock is recursed also, then unrecurse it. 887 */ 888 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 889 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 890 lk); 891 lk->lk_recurse--; 892 break; 893 } 894 if (tid != LK_KERNPROC) 895 lock_profile_release_lock(&lk->lock_object); 896 897 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 898 LK_UNLOCKED)) 899 break; 900 901 sleepq_lock(&lk->lock_object); 902 x = lk->lk_lock; 903 v = LK_UNLOCKED; 904 905 /* 906 * If the lock has exclusive waiters, give them 907 * preference in order to avoid deadlock with 908 * shared runners up. 909 */ 910 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 911 if (x & LK_EXCLUSIVE_WAITERS) { 912 queue = SQ_EXCLUSIVE_QUEUE; 913 v |= (x & LK_SHARED_WAITERS); 914 } else { 915 MPASS((x & LK_ALL_WAITERS) == 916 LK_SHARED_WAITERS); 917 queue = SQ_SHARED_QUEUE; 918 } 919 920 LOCK_LOG3(lk, 921 "%s: %p waking up threads on the %s queue", 922 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 923 "exclusive"); 924 atomic_store_rel_ptr(&lk->lk_lock, v); 925 wakeup_swapper = sleepq_broadcast(&lk->lock_object, 926 SLEEPQ_LK, 0, queue); 927 sleepq_release(&lk->lock_object); 928 break; 929 } else 930 wakeup_swapper = wakeupshlk(lk, file, line); 931 break; 932 case LK_DRAIN: 933 if (LK_CAN_WITNESS(flags)) 934 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 935 LOP_EXCLUSIVE, file, line, ilk); 936 937 /* 938 * Trying to drain a lock we already own will result in a 939 * deadlock. 940 */ 941 if (lockmgr_xlocked(lk)) { 942 if (flags & LK_INTERLOCK) 943 class->lc_unlock(ilk); 944 panic("%s: draining %s with the lock held @ %s:%d\n", 945 __func__, iwmesg, file, line); 946 } 947 948 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 949 lock_profile_obtain_lock_failed(&lk->lock_object, 950 &contested, &waittime); 951 952 /* 953 * If the lock is expected to not sleep just give up 954 * and return. 955 */ 956 if (LK_TRYOP(flags)) { 957 LOCK_LOG2(lk, "%s: %p fails the try operation", 958 __func__, lk); 959 error = EBUSY; 960 break; 961 } 962 963 /* 964 * Acquire the sleepqueue chain lock because we 965 * probabilly will need to manipulate waiters flags. 966 */ 967 sleepq_lock(&lk->lock_object); 968 x = lk->lk_lock; 969 970 /* 971 * if the lock has been released while we spun on 972 * the sleepqueue chain lock just try again. 973 */ 974 if (x == LK_UNLOCKED) { 975 sleepq_release(&lk->lock_object); 976 continue; 977 } 978 979 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 980 if ((x & ~v) == LK_UNLOCKED) { 981 v = (x & ~LK_EXCLUSIVE_SPINNERS); 982 if (v & LK_EXCLUSIVE_WAITERS) { 983 queue = SQ_EXCLUSIVE_QUEUE; 984 v &= ~LK_EXCLUSIVE_WAITERS; 985 } else { 986 MPASS(v & LK_SHARED_WAITERS); 987 queue = SQ_SHARED_QUEUE; 988 v &= ~LK_SHARED_WAITERS; 989 } 990 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 991 sleepq_release(&lk->lock_object); 992 continue; 993 } 994 LOCK_LOG3(lk, 995 "%s: %p waking up all threads on the %s queue", 996 __func__, lk, queue == SQ_SHARED_QUEUE ? 997 "shared" : "exclusive"); 998 wakeup_swapper |= sleepq_broadcast( 999 &lk->lock_object, SLEEPQ_LK, 0, queue); 1000 1001 /* 1002 * If shared waiters have been woken up we need 1003 * to wait for one of them to acquire the lock 1004 * before to set the exclusive waiters in 1005 * order to avoid a deadlock. 1006 */ 1007 if (queue == SQ_SHARED_QUEUE) { 1008 for (v = lk->lk_lock; 1009 (v & LK_SHARE) && !LK_SHARERS(v); 1010 v = lk->lk_lock) 1011 cpu_spinwait(); 1012 } 1013 } 1014 1015 /* 1016 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1017 * fail, loop back and retry. 1018 */ 1019 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1020 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1021 x | LK_EXCLUSIVE_WAITERS)) { 1022 sleepq_release(&lk->lock_object); 1023 continue; 1024 } 1025 LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1026 __func__, lk); 1027 } 1028 1029 /* 1030 * As far as we have been unable to acquire the 1031 * exclusive lock and the exclusive waiters flag 1032 * is set, we will sleep. 1033 */ 1034 if (flags & LK_INTERLOCK) { 1035 class->lc_unlock(ilk); 1036 flags &= ~LK_INTERLOCK; 1037 } 1038 GIANT_SAVE(); 1039 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1040 SQ_EXCLUSIVE_QUEUE); 1041 sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1042 GIANT_RESTORE(); 1043 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1044 __func__, lk); 1045 } 1046 1047 if (error == 0) { 1048 lock_profile_obtain_lock_success(&lk->lock_object, 1049 contested, waittime, file, line); 1050 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1051 lk->lk_recurse, file, line); 1052 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1053 LK_TRYWIT(flags), file, line); 1054 TD_LOCKS_INC(curthread); 1055 STACK_SAVE(lk); 1056 } 1057 break; 1058 default: 1059 if (flags & LK_INTERLOCK) 1060 class->lc_unlock(ilk); 1061 panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1062 } 1063 1064 if (flags & LK_INTERLOCK) 1065 class->lc_unlock(ilk); 1066 if (wakeup_swapper) 1067 kick_proc0(); 1068 1069 return (error); 1070} 1071 1072void 1073_lockmgr_disown(struct lock *lk, const char *file, int line) 1074{ 1075 uintptr_t tid, x; 1076 1077 tid = (uintptr_t)curthread; 1078 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 1079 1080 /* 1081 * If the owner is already LK_KERNPROC just skip the whole operation. 1082 */ 1083 if (LK_HOLDER(lk->lk_lock) != tid) 1084 return; 1085 lock_profile_release_lock(&lk->lock_object); 1086 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1087 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1088 TD_LOCKS_DEC(curthread); 1089 1090 /* 1091 * In order to preserve waiters flags, just spin. 1092 */ 1093 for (;;) { 1094 x = lk->lk_lock; 1095 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1096 x &= LK_ALL_WAITERS; 1097 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1098 LK_KERNPROC | x)) 1099 return; 1100 cpu_spinwait(); 1101 } 1102} 1103 1104void 1105lockmgr_printinfo(struct lock *lk) 1106{ 1107 struct thread *td; 1108 uintptr_t x; 1109 1110 if (lk->lk_lock == LK_UNLOCKED) 1111 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1112 else if (lk->lk_lock & LK_SHARE) 1113 printf("lock type %s: SHARED (count %ju)\n", 1114 lk->lock_object.lo_name, 1115 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1116 else { 1117 td = lockmgr_xholder(lk); 1118 printf("lock type %s: EXCL by thread %p (pid %d)\n", 1119 lk->lock_object.lo_name, td, td->td_proc->p_pid); 1120 } 1121 1122 x = lk->lk_lock; 1123 if (x & LK_EXCLUSIVE_WAITERS) 1124 printf(" with exclusive waiters pending\n"); 1125 if (x & LK_SHARED_WAITERS) 1126 printf(" with shared waiters pending\n"); 1127 if (x & LK_EXCLUSIVE_SPINNERS) 1128 printf(" with exclusive spinners pending\n"); 1129 1130 STACK_PRINT(lk); 1131} 1132 1133int 1134lockstatus(struct lock *lk) 1135{ 1136 uintptr_t v, x; 1137 int ret; 1138 1139 ret = LK_SHARED; 1140 x = lk->lk_lock; 1141 v = LK_HOLDER(x); 1142 1143 if ((x & LK_SHARE) == 0) { 1144 if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1145 ret = LK_EXCLUSIVE; 1146 else 1147 ret = LK_EXCLOTHER; 1148 } else if (x == LK_UNLOCKED) 1149 ret = 0; 1150 1151 return (ret); 1152} 1153 1154#ifdef INVARIANT_SUPPORT 1155#ifndef INVARIANTS 1156#undef _lockmgr_assert 1157#endif 1158 1159void 1160_lockmgr_assert(struct lock *lk, int what, const char *file, int line) 1161{ 1162 int slocked = 0; 1163 1164 if (panicstr != NULL) 1165 return; 1166 switch (what) { 1167 case KA_SLOCKED: 1168 case KA_SLOCKED | KA_NOTRECURSED: 1169 case KA_SLOCKED | KA_RECURSED: 1170 slocked = 1; 1171 case KA_LOCKED: 1172 case KA_LOCKED | KA_NOTRECURSED: 1173 case KA_LOCKED | KA_RECURSED: 1174#ifdef WITNESS 1175 1176 /* 1177 * We cannot trust WITNESS if the lock is held in exclusive 1178 * mode and a call to lockmgr_disown() happened. 1179 * Workaround this skipping the check if the lock is held in 1180 * exclusive mode even for the KA_LOCKED case. 1181 */ 1182 if (slocked || (lk->lk_lock & LK_SHARE)) { 1183 witness_assert(&lk->lock_object, what, file, line); 1184 break; 1185 } 1186#endif 1187 if (lk->lk_lock == LK_UNLOCKED || 1188 ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1189 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 1190 panic("Lock %s not %slocked @ %s:%d\n", 1191 lk->lock_object.lo_name, slocked ? "share" : "", 1192 file, line); 1193 1194 if ((lk->lk_lock & LK_SHARE) == 0) { 1195 if (lockmgr_recursed(lk)) { 1196 if (what & KA_NOTRECURSED) 1197 panic("Lock %s recursed @ %s:%d\n", 1198 lk->lock_object.lo_name, file, 1199 line); 1200 } else if (what & KA_RECURSED) 1201 panic("Lock %s not recursed @ %s:%d\n", 1202 lk->lock_object.lo_name, file, line); 1203 } 1204 break; 1205 case KA_XLOCKED: 1206 case KA_XLOCKED | KA_NOTRECURSED: 1207 case KA_XLOCKED | KA_RECURSED: 1208 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 1209 panic("Lock %s not exclusively locked @ %s:%d\n", 1210 lk->lock_object.lo_name, file, line); 1211 if (lockmgr_recursed(lk)) { 1212 if (what & KA_NOTRECURSED) 1213 panic("Lock %s recursed @ %s:%d\n", 1214 lk->lock_object.lo_name, file, line); 1215 } else if (what & KA_RECURSED) 1216 panic("Lock %s not recursed @ %s:%d\n", 1217 lk->lock_object.lo_name, file, line); 1218 break; 1219 case KA_UNLOCKED: 1220 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1221 panic("Lock %s exclusively locked @ %s:%d\n", 1222 lk->lock_object.lo_name, file, line); 1223 break; 1224 default: 1225 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1226 line); 1227 } 1228} 1229#endif 1230 1231#ifdef DDB 1232int 1233lockmgr_chain(struct thread *td, struct thread **ownerp) 1234{ 1235 struct lock *lk; 1236 1237 lk = td->td_wchan; 1238 1239 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1240 return (0); 1241 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1242 if (lk->lk_lock & LK_SHARE) 1243 db_printf("SHARED (count %ju)\n", 1244 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1245 else 1246 db_printf("EXCL\n"); 1247 *ownerp = lockmgr_xholder(lk); 1248 1249 return (1); 1250} 1251 1252static void 1253db_show_lockmgr(struct lock_object *lock) 1254{ 1255 struct thread *td; 1256 struct lock *lk; 1257 1258 lk = (struct lock *)lock; 1259 1260 db_printf(" state: "); 1261 if (lk->lk_lock == LK_UNLOCKED) 1262 db_printf("UNLOCKED\n"); 1263 else if (lk->lk_lock & LK_SHARE) 1264 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1265 else { 1266 td = lockmgr_xholder(lk); 1267 if (td == (struct thread *)LK_KERNPROC) 1268 db_printf("XLOCK: LK_KERNPROC\n"); 1269 else 1270 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1271 td->td_tid, td->td_proc->p_pid, 1272 td->td_proc->p_comm); 1273 if (lockmgr_recursed(lk)) 1274 db_printf(" recursed: %d\n", lk->lk_recurse); 1275 } 1276 db_printf(" waiters: "); 1277 switch (lk->lk_lock & LK_ALL_WAITERS) { 1278 case LK_SHARED_WAITERS: 1279 db_printf("shared\n"); 1280 break; 1281 case LK_EXCLUSIVE_WAITERS: 1282 db_printf("exclusive\n"); 1283 break; 1284 case LK_ALL_WAITERS: 1285 db_printf("shared and exclusive\n"); 1286 break; 1287 default: 1288 db_printf("none\n"); 1289 } 1290 db_printf(" spinners: "); 1291 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1292 db_printf("exclusive\n"); 1293 else 1294 db_printf("none\n"); 1295} 1296#endif 1297