kern_lock.c revision 174948
1/*- 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 174948 2007-12-27 22:56:57Z attilio $"); 45 46#include "opt_ddb.h" 47#include "opt_global.h" 48 49#include <sys/param.h> 50#include <sys/kdb.h> 51#include <sys/kernel.h> 52#include <sys/ktr.h> 53#include <sys/lock.h> 54#include <sys/lockmgr.h> 55#include <sys/mutex.h> 56#include <sys/proc.h> 57#include <sys/systm.h> 58#include <sys/lock_profile.h> 59#ifdef DEBUG_LOCKS 60#include <sys/stack.h> 61#endif 62 63static void assert_lockmgr(struct lock_object *lock, int what); 64#ifdef DDB 65#include <ddb/ddb.h> 66static void db_show_lockmgr(struct lock_object *lock); 67#endif 68static void lock_lockmgr(struct lock_object *lock, int how); 69static int unlock_lockmgr(struct lock_object *lock); 70 71struct lock_class lock_class_lockmgr = { 72 .lc_name = "lockmgr", 73 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 74 .lc_assert = assert_lockmgr, 75#ifdef DDB 76 .lc_ddb_show = db_show_lockmgr, 77#endif 78 .lc_lock = lock_lockmgr, 79 .lc_unlock = unlock_lockmgr, 80}; 81 82/* 83 * Locking primitives implementation. 84 * Locks provide shared/exclusive sychronization. 85 */ 86 87void 88assert_lockmgr(struct lock_object *lock, int what) 89{ 90 91 panic("lockmgr locks do not support assertions"); 92} 93 94void 95lock_lockmgr(struct lock_object *lock, int how) 96{ 97 98 panic("lockmgr locks do not support sleep interlocking"); 99} 100 101int 102unlock_lockmgr(struct lock_object *lock) 103{ 104 105 panic("lockmgr locks do not support sleep interlocking"); 106} 107 108#define COUNT(td, x) if ((td)) (td)->td_locks += (x) 109#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 110 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 111 112static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime); 113static int acquiredrain(struct lock *lkp, int extflags) ; 114 115static __inline void 116sharelock(struct thread *td, struct lock *lkp, int incr) { 117 lkp->lk_flags |= LK_SHARE_NONZERO; 118 lkp->lk_sharecount += incr; 119 COUNT(td, incr); 120} 121 122static __inline void 123shareunlock(struct thread *td, struct lock *lkp, int decr) { 124 125 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 126 127 COUNT(td, -decr); 128 if (lkp->lk_sharecount == decr) { 129 lkp->lk_flags &= ~LK_SHARE_NONZERO; 130 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 131 wakeup(lkp); 132 } 133 lkp->lk_sharecount = 0; 134 } else { 135 lkp->lk_sharecount -= decr; 136 } 137} 138 139static int 140acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime) 141{ 142 struct lock *lkp = *lkpp; 143 int error; 144 CTR3(KTR_LOCK, 145 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 146 lkp, extflags, wanted); 147 148 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 149 return EBUSY; 150 error = 0; 151 if ((lkp->lk_flags & wanted) != 0) 152 lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime); 153 154 while ((lkp->lk_flags & wanted) != 0) { 155 CTR2(KTR_LOCK, 156 "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 157 lkp, lkp->lk_flags); 158 lkp->lk_flags |= LK_WAIT_NONZERO; 159 lkp->lk_waitcount++; 160 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 161 lkp->lk_wmesg, 162 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 163 lkp->lk_waitcount--; 164 if (lkp->lk_waitcount == 0) 165 lkp->lk_flags &= ~LK_WAIT_NONZERO; 166 if (error) 167 break; 168 if (extflags & LK_SLEEPFAIL) { 169 error = ENOLCK; 170 break; 171 } 172 if (lkp->lk_newlock != NULL) { 173 mtx_lock(lkp->lk_newlock->lk_interlock); 174 mtx_unlock(lkp->lk_interlock); 175 if (lkp->lk_waitcount == 0) 176 wakeup((void *)(&lkp->lk_newlock)); 177 *lkpp = lkp = lkp->lk_newlock; 178 } 179 } 180 mtx_assert(lkp->lk_interlock, MA_OWNED); 181 return (error); 182} 183 184/* 185 * Set, change, or release a lock. 186 * 187 * Shared requests increment the shared count. Exclusive requests set the 188 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 189 * accepted shared locks and shared-to-exclusive upgrades to go away. 190 */ 191int 192_lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, 193 struct thread *td, char *file, int line) 194 195{ 196 int error; 197 struct thread *thr; 198 int extflags, lockflags; 199 int contested = 0; 200 uint64_t waitstart = 0; 201 202 /* 203 * Lock owner can only be curthread or, at least, NULL in order to 204 * have a deadlock free implementation of the primitive. 205 */ 206 KASSERT(td == NULL || td == curthread, 207 ("lockmgr: owner thread (%p) cannot differ from curthread or NULL", 208 td)); 209 210 error = 0; 211 if (td == NULL) 212 thr = LK_KERNPROC; 213 else 214 thr = td; 215 216 if ((flags & LK_INTERNAL) == 0) 217 mtx_lock(lkp->lk_interlock); 218 CTR6(KTR_LOCK, 219 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 220 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 221 lkp->lk_exclusivecount, flags, td); 222#ifdef DEBUG_LOCKS 223 { 224 struct stack stack; /* XXX */ 225 stack_save(&stack); 226 CTRSTACK(KTR_LOCK, &stack, 0, 1); 227 } 228#endif 229 230 if (flags & LK_INTERLOCK) { 231 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 232 mtx_unlock(interlkp); 233 } 234 235 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 236 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 237 &lkp->lk_interlock->lock_object, 238 "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 239 240 if (panicstr != NULL) { 241 mtx_unlock(lkp->lk_interlock); 242 return (0); 243 } 244 if ((lkp->lk_flags & LK_NOSHARE) && 245 (flags & LK_TYPE_MASK) == LK_SHARED) { 246 flags &= ~LK_TYPE_MASK; 247 flags |= LK_EXCLUSIVE; 248 } 249 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 250 251 switch (flags & LK_TYPE_MASK) { 252 253 case LK_SHARED: 254 /* 255 * If we are not the exclusive lock holder, we have to block 256 * while there is an exclusive lock holder or while an 257 * exclusive lock request or upgrade request is in progress. 258 * 259 * However, if TDP_DEADLKTREAT is set, we override exclusive 260 * lock requests or upgrade requests ( but not the exclusive 261 * lock itself ). 262 */ 263 if (lkp->lk_lockholder != thr) { 264 lockflags = LK_HAVE_EXCL; 265 if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 266 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 267 error = acquire(&lkp, extflags, lockflags, &contested, &waitstart); 268 if (error) 269 break; 270 sharelock(td, lkp, 1); 271 if (lkp->lk_sharecount == 1) 272 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 273 274#if defined(DEBUG_LOCKS) 275 stack_save(&lkp->lk_stack); 276#endif 277 break; 278 } 279 /* 280 * We hold an exclusive lock, so downgrade it to shared. 281 * An alternative would be to fail with EDEADLK. 282 */ 283 sharelock(td, lkp, 1); 284 if (lkp->lk_sharecount == 1) 285 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 286 /* FALLTHROUGH downgrade */ 287 288 case LK_DOWNGRADE: 289 KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 290 ("lockmgr: not holding exclusive lock " 291 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 292 lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 293 sharelock(td, lkp, lkp->lk_exclusivecount); 294 COUNT(td, -lkp->lk_exclusivecount); 295 lkp->lk_exclusivecount = 0; 296 lkp->lk_flags &= ~LK_HAVE_EXCL; 297 lkp->lk_lockholder = LK_NOPROC; 298 if (lkp->lk_waitcount) 299 wakeup((void *)lkp); 300 break; 301 302 case LK_EXCLUPGRADE: 303 /* 304 * If another process is ahead of us to get an upgrade, 305 * then we want to fail rather than have an intervening 306 * exclusive access. 307 */ 308 if (lkp->lk_flags & LK_WANT_UPGRADE) { 309 shareunlock(td, lkp, 1); 310 error = EBUSY; 311 break; 312 } 313 /* FALLTHROUGH normal upgrade */ 314 315 case LK_UPGRADE: 316 /* 317 * Upgrade a shared lock to an exclusive one. If another 318 * shared lock has already requested an upgrade to an 319 * exclusive lock, our shared lock is released and an 320 * exclusive lock is requested (which will be granted 321 * after the upgrade). If we return an error, the file 322 * will always be unlocked. 323 */ 324 if (lkp->lk_lockholder == thr) 325 panic("lockmgr: upgrade exclusive lock"); 326 if (lkp->lk_sharecount <= 0) 327 panic("lockmgr: upgrade without shared"); 328 shareunlock(td, lkp, 1); 329 if (lkp->lk_sharecount == 0) 330 lock_profile_release_lock(&lkp->lk_object); 331 /* 332 * If we are just polling, check to see if we will block. 333 */ 334 if ((extflags & LK_NOWAIT) && 335 ((lkp->lk_flags & LK_WANT_UPGRADE) || 336 lkp->lk_sharecount > 1)) { 337 error = EBUSY; 338 break; 339 } 340 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 341 /* 342 * We are first shared lock to request an upgrade, so 343 * request upgrade and wait for the shared count to 344 * drop to zero, then take exclusive lock. 345 */ 346 lkp->lk_flags |= LK_WANT_UPGRADE; 347 error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart); 348 lkp->lk_flags &= ~LK_WANT_UPGRADE; 349 350 if (error) { 351 if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 352 wakeup((void *)lkp); 353 break; 354 } 355 if (lkp->lk_exclusivecount != 0) 356 panic("lockmgr: non-zero exclusive count"); 357 lkp->lk_flags |= LK_HAVE_EXCL; 358 lkp->lk_lockholder = thr; 359 lkp->lk_exclusivecount = 1; 360 COUNT(td, 1); 361 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 362#if defined(DEBUG_LOCKS) 363 stack_save(&lkp->lk_stack); 364#endif 365 break; 366 } 367 /* 368 * Someone else has requested upgrade. Release our shared 369 * lock, awaken upgrade requestor if we are the last shared 370 * lock, then request an exclusive lock. 371 */ 372 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 373 LK_WAIT_NONZERO) 374 wakeup((void *)lkp); 375 /* FALLTHROUGH exclusive request */ 376 377 case LK_EXCLUSIVE: 378 if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 379 /* 380 * Recursive lock. 381 */ 382 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 383 panic("lockmgr: locking against myself"); 384 if ((extflags & LK_CANRECURSE) != 0) { 385 lkp->lk_exclusivecount++; 386 COUNT(td, 1); 387 break; 388 } 389 } 390 /* 391 * If we are just polling, check to see if we will sleep. 392 */ 393 if ((extflags & LK_NOWAIT) && 394 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 395 error = EBUSY; 396 break; 397 } 398 /* 399 * Try to acquire the want_exclusive flag. 400 */ 401 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart); 402 if (error) 403 break; 404 lkp->lk_flags |= LK_WANT_EXCL; 405 /* 406 * Wait for shared locks and upgrades to finish. 407 */ 408 error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart); 409 lkp->lk_flags &= ~LK_WANT_EXCL; 410 if (error) { 411 if (lkp->lk_flags & LK_WAIT_NONZERO) 412 wakeup((void *)lkp); 413 break; 414 } 415 lkp->lk_flags |= LK_HAVE_EXCL; 416 lkp->lk_lockholder = thr; 417 if (lkp->lk_exclusivecount != 0) 418 panic("lockmgr: non-zero exclusive count"); 419 lkp->lk_exclusivecount = 1; 420 COUNT(td, 1); 421 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 422#if defined(DEBUG_LOCKS) 423 stack_save(&lkp->lk_stack); 424#endif 425 break; 426 427 case LK_RELEASE: 428 if (lkp->lk_exclusivecount != 0) { 429 if (lkp->lk_lockholder != thr && 430 lkp->lk_lockholder != LK_KERNPROC) { 431 panic("lockmgr: thread %p, not %s %p unlocking", 432 thr, "exclusive lock holder", 433 lkp->lk_lockholder); 434 } 435 if (lkp->lk_lockholder != LK_KERNPROC) 436 COUNT(td, -1); 437 if (lkp->lk_exclusivecount == 1) { 438 lkp->lk_flags &= ~LK_HAVE_EXCL; 439 lkp->lk_lockholder = LK_NOPROC; 440 lkp->lk_exclusivecount = 0; 441 lock_profile_release_lock(&lkp->lk_object); 442 } else { 443 lkp->lk_exclusivecount--; 444 } 445 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 446 shareunlock(td, lkp, 1); 447 else { 448 printf("lockmgr: thread %p unlocking unheld lock\n", 449 thr); 450 kdb_backtrace(); 451 } 452 453 if (lkp->lk_flags & LK_WAIT_NONZERO) 454 wakeup((void *)lkp); 455 break; 456 457 case LK_DRAIN: 458 /* 459 * Check that we do not already hold the lock, as it can 460 * never drain if we do. Unfortunately, we have no way to 461 * check for holding a shared lock, but at least we can 462 * check for an exclusive one. 463 */ 464 if (lkp->lk_lockholder == thr) 465 panic("lockmgr: draining against myself"); 466 467 error = acquiredrain(lkp, extflags); 468 if (error) 469 break; 470 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 471 lkp->lk_lockholder = thr; 472 lkp->lk_exclusivecount = 1; 473 COUNT(td, 1); 474#if defined(DEBUG_LOCKS) 475 stack_save(&lkp->lk_stack); 476#endif 477 break; 478 479 default: 480 mtx_unlock(lkp->lk_interlock); 481 panic("lockmgr: unknown locktype request %d", 482 flags & LK_TYPE_MASK); 483 /* NOTREACHED */ 484 } 485 if ((lkp->lk_flags & LK_WAITDRAIN) && 486 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 487 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 488 lkp->lk_flags &= ~LK_WAITDRAIN; 489 wakeup((void *)&lkp->lk_flags); 490 } 491 mtx_unlock(lkp->lk_interlock); 492 return (error); 493} 494 495static int 496acquiredrain(struct lock *lkp, int extflags) { 497 int error; 498 499 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 500 return EBUSY; 501 } 502 while (lkp->lk_flags & LK_ALL) { 503 lkp->lk_flags |= LK_WAITDRAIN; 504 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 505 lkp->lk_wmesg, 506 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 507 if (error) 508 return error; 509 if (extflags & LK_SLEEPFAIL) { 510 return ENOLCK; 511 } 512 } 513 return 0; 514} 515 516/* 517 * Initialize a lock; required before use. 518 */ 519void 520lockinit(lkp, prio, wmesg, timo, flags) 521 struct lock *lkp; 522 int prio; 523 const char *wmesg; 524 int timo; 525 int flags; 526{ 527 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 528 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 529 530 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 531 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 532 lkp->lk_sharecount = 0; 533 lkp->lk_waitcount = 0; 534 lkp->lk_exclusivecount = 0; 535 lkp->lk_prio = prio; 536 lkp->lk_timo = timo; 537 lkp->lk_lockholder = LK_NOPROC; 538 lkp->lk_newlock = NULL; 539#ifdef DEBUG_LOCKS 540 stack_zero(&lkp->lk_stack); 541#endif 542 lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, 543 LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE); 544} 545 546/* 547 * Destroy a lock. 548 */ 549void 550lockdestroy(lkp) 551 struct lock *lkp; 552{ 553 554 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 555 lkp, lkp->lk_wmesg); 556 lock_destroy(&lkp->lk_object); 557} 558 559/* 560 * Determine the status of a lock. 561 */ 562int 563lockstatus(lkp, td) 564 struct lock *lkp; 565 struct thread *td; 566{ 567 int lock_type = 0; 568 int interlocked; 569 570 if (!kdb_active) { 571 interlocked = 1; 572 mtx_lock(lkp->lk_interlock); 573 } else 574 interlocked = 0; 575 if (lkp->lk_exclusivecount != 0) { 576 if (td == NULL || lkp->lk_lockholder == td) 577 lock_type = LK_EXCLUSIVE; 578 else 579 lock_type = LK_EXCLOTHER; 580 } else if (lkp->lk_sharecount != 0) 581 lock_type = LK_SHARED; 582 if (interlocked) 583 mtx_unlock(lkp->lk_interlock); 584 return (lock_type); 585} 586 587/* 588 * Determine the number of holders of a lock. 589 */ 590int 591lockcount(lkp) 592 struct lock *lkp; 593{ 594 int count; 595 596 mtx_lock(lkp->lk_interlock); 597 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 598 mtx_unlock(lkp->lk_interlock); 599 return (count); 600} 601 602/* 603 * Determine the number of waiters on a lock. 604 */ 605int 606lockwaiters(lkp) 607 struct lock *lkp; 608{ 609 int count; 610 611 mtx_lock(lkp->lk_interlock); 612 count = lkp->lk_waitcount; 613 mtx_unlock(lkp->lk_interlock); 614 return (count); 615} 616 617/* 618 * Print out information about state of a lock. Used by VOP_PRINT 619 * routines to display status about contained locks. 620 */ 621void 622lockmgr_printinfo(lkp) 623 struct lock *lkp; 624{ 625 626 if (lkp->lk_sharecount) 627 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 628 lkp->lk_sharecount); 629 else if (lkp->lk_flags & LK_HAVE_EXCL) 630 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 631 lkp->lk_wmesg, lkp->lk_exclusivecount, 632 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 633 if (lkp->lk_waitcount > 0) 634 printf(" with %d pending", lkp->lk_waitcount); 635#ifdef DEBUG_LOCKS 636 stack_print_ddb(&lkp->lk_stack); 637#endif 638} 639 640#ifdef DDB 641/* 642 * Check to see if a thread that is blocked on a sleep queue is actually 643 * blocked on a 'struct lock'. If so, output some details and return true. 644 * If the lock has an exclusive owner, return that in *ownerp. 645 */ 646int 647lockmgr_chain(struct thread *td, struct thread **ownerp) 648{ 649 struct lock *lkp; 650 651 lkp = td->td_wchan; 652 653 /* Simple test to see if wchan points to a lockmgr lock. */ 654 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 655 lkp->lk_wmesg == td->td_wmesg) 656 goto ok; 657 658 /* 659 * If this thread is doing a DRAIN, then it would be asleep on 660 * &lkp->lk_flags rather than lkp. 661 */ 662 lkp = (struct lock *)((char *)td->td_wchan - 663 offsetof(struct lock, lk_flags)); 664 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 665 lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN)) 666 goto ok; 667 668 /* Doen't seem to be a lockmgr lock. */ 669 return (0); 670 671ok: 672 /* Ok, we think we have a lockmgr lock, so output some details. */ 673 db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg); 674 if (lkp->lk_sharecount) { 675 db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 676 *ownerp = NULL; 677 } else { 678 db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount); 679 *ownerp = lkp->lk_lockholder; 680 } 681 return (1); 682} 683 684void 685db_show_lockmgr(struct lock_object *lock) 686{ 687 struct thread *td; 688 struct lock *lkp; 689 690 lkp = (struct lock *)lock; 691 692 db_printf(" lock type: %s\n", lkp->lk_wmesg); 693 db_printf(" state: "); 694 if (lkp->lk_sharecount) 695 db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 696 else if (lkp->lk_flags & LK_HAVE_EXCL) { 697 td = lkp->lk_lockholder; 698 db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); 699 db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, 700 td->td_proc->p_pid, td->td_name); 701 } else 702 db_printf("UNLOCKED\n"); 703 if (lkp->lk_waitcount > 0) 704 db_printf(" waiters: %d\n", lkp->lk_waitcount); 705} 706#endif 707