subr_witness.c revision 166525
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of the `witness' lock verifier. Originally implemented for 34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 35 * classes in FreeBSD. 36 */ 37 38/* 39 * Main Entry: witness 40 * Pronunciation: 'wit-n&s 41 * Function: noun 42 * Etymology: Middle English witnesse, from Old English witnes knowledge, 43 * testimony, witness, from 2wit 44 * Date: before 12th century 45 * 1 : attestation of a fact or event : TESTIMONY 46 * 2 : one that gives evidence; specifically : one who testifies in 47 * a cause or before a judicial tribunal 48 * 3 : one asked to be present at a transaction so as to be able to 49 * testify to its having taken place 50 * 4 : one who has personal knowledge of something 51 * 5 a : something serving as evidence or proof : SIGN 52 * b : public affirmation by word or example of usually 53 * religious faith or conviction <the heroic witness to divine 54 * life -- Pilot> 55 * 6 capitalized : a member of the Jehovah's Witnesses 56 */ 57 58/* 59 * Special rules concerning Giant and lock orders: 60 * 61 * 1) Giant must be acquired before any other mutexes. Stated another way, 62 * no other mutex may be held when Giant is acquired. 63 * 64 * 2) Giant must be released when blocking on a sleepable lock. 65 * 66 * This rule is less obvious, but is a result of Giant providing the same 67 * semantics as spl(). Basically, when a thread sleeps, it must release 68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 69 * 2). 70 * 71 * 3) Giant may be acquired before or after sleepable locks. 72 * 73 * This rule is also not quite as obvious. Giant may be acquired after 74 * a sleepable lock because it is a non-sleepable lock and non-sleepable 75 * locks may always be acquired while holding a sleepable lock. The second 76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 80 * execute. Thus, acquiring Giant both before and after a sleepable lock 81 * will not result in a lock order reversal. 82 */ 83 84#include <sys/cdefs.h> 85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 166525 2007-02-06 05:51:55Z mpp $"); 86 87#include "opt_ddb.h" 88#include "opt_witness.h" 89 90#include <sys/param.h> 91#include <sys/bus.h> 92#include <sys/kdb.h> 93#include <sys/kernel.h> 94#include <sys/ktr.h> 95#include <sys/lock.h> 96#include <sys/malloc.h> 97#include <sys/mutex.h> 98#include <sys/priv.h> 99#include <sys/proc.h> 100#include <sys/sysctl.h> 101#include <sys/systm.h> 102 103#include <ddb/ddb.h> 104 105#include <machine/stdarg.h> 106 107/* Note that these traces do not work with KTR_ALQ. */ 108#if 0 109#define KTR_WITNESS KTR_SUBSYS 110#else 111#define KTR_WITNESS 0 112#endif 113 114/* Easier to stay with the old names. */ 115#define lo_list lo_witness_data.lod_list 116#define lo_witness lo_witness_data.lod_witness 117 118/* Define this to check for blessed mutexes */ 119#undef BLESSING 120 121#define WITNESS_COUNT 1024 122#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 123/* 124 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 125 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 126 * probably be safe for the most part, but it's still a SWAG. 127 */ 128#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 129 130#define WITNESS_NCHILDREN 6 131 132struct witness_child_list_entry; 133 134struct witness { 135 const char *w_name; 136 struct lock_class *w_class; 137 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 138 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 139 struct witness_child_list_entry *w_children; /* Great evilness... */ 140 const char *w_file; 141 int w_line; 142 u_int w_level; 143 u_int w_refcount; 144 u_char w_Giant_squawked:1; 145 u_char w_other_squawked:1; 146 u_char w_same_squawked:1; 147 u_char w_displayed:1; 148}; 149 150struct witness_child_list_entry { 151 struct witness_child_list_entry *wcl_next; 152 struct witness *wcl_children[WITNESS_NCHILDREN]; 153 u_int wcl_count; 154}; 155 156STAILQ_HEAD(witness_list, witness); 157 158#ifdef BLESSING 159struct witness_blessed { 160 const char *b_lock1; 161 const char *b_lock2; 162}; 163#endif 164 165struct witness_order_list_entry { 166 const char *w_name; 167 struct lock_class *w_class; 168}; 169 170#ifdef BLESSING 171static int blessed(struct witness *, struct witness *); 172#endif 173static int depart(struct witness *w); 174static struct witness *enroll(const char *description, 175 struct lock_class *lock_class); 176static int insertchild(struct witness *parent, struct witness *child); 177static int isitmychild(struct witness *parent, struct witness *child); 178static int isitmydescendant(struct witness *parent, struct witness *child); 179static int itismychild(struct witness *parent, struct witness *child); 180static void removechild(struct witness *parent, struct witness *child); 181static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 182static const char *fixup_filename(const char *file); 183static struct witness *witness_get(void); 184static void witness_free(struct witness *m); 185static struct witness_child_list_entry *witness_child_get(void); 186static void witness_child_free(struct witness_child_list_entry *wcl); 187static struct lock_list_entry *witness_lock_list_get(void); 188static void witness_lock_list_free(struct lock_list_entry *lle); 189static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 190 struct lock_object *lock); 191static void witness_list_lock(struct lock_instance *instance); 192#ifdef DDB 193static void witness_leveldescendents(struct witness *parent, int level); 194static void witness_levelall(void); 195static void witness_displaydescendants(void(*)(const char *fmt, ...), 196 struct witness *, int indent); 197static void witness_display_list(void(*prnt)(const char *fmt, ...), 198 struct witness_list *list); 199static void witness_display(void(*)(const char *fmt, ...)); 200static void witness_list(struct thread *td); 201#endif 202 203SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 204 205/* 206 * If set to 0, witness is disabled. If set to a non-zero value, witness 207 * performs full lock order checking for all locks. At runtime, this 208 * value may be set to 0 to turn off witness. witness is not allowed be 209 * turned on once it is turned off, however. 210 */ 211static int witness_watch = 1; 212TUNABLE_INT("debug.witness.watch", &witness_watch); 213SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 214 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 215 216#ifdef KDB 217/* 218 * When KDB is enabled and witness_kdb is set to 1, it will cause the system 219 * to drop into kdebug() when: 220 * - a lock hierarchy violation occurs 221 * - locks are held when going to sleep. 222 */ 223#ifdef WITNESS_KDB 224int witness_kdb = 1; 225#else 226int witness_kdb = 0; 227#endif 228TUNABLE_INT("debug.witness.kdb", &witness_kdb); 229SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 230 231/* 232 * When KDB is enabled and witness_trace is set to 1, it will cause the system 233 * to print a stack trace: 234 * - a lock hierarchy violation occurs 235 * - locks are held when going to sleep. 236 */ 237int witness_trace = 1; 238TUNABLE_INT("debug.witness.trace", &witness_trace); 239SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 240#endif /* KDB */ 241 242#ifdef WITNESS_SKIPSPIN 243int witness_skipspin = 1; 244#else 245int witness_skipspin = 0; 246#endif 247TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 248SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, 249 &witness_skipspin, 0, ""); 250 251static struct mtx w_mtx; 252static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 253static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 254static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 255static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 256static struct witness_child_list_entry *w_child_free = NULL; 257static struct lock_list_entry *w_lock_list_free = NULL; 258 259static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt; 260SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 261SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 262SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 263 ""); 264SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD, 265 &w_child_free_cnt, 0, ""); 266SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0, 267 ""); 268 269static struct witness w_data[WITNESS_COUNT]; 270static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 271static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 272 273static struct witness_order_list_entry order_lists[] = { 274 /* 275 * sx locks 276 */ 277 { "proctree", &lock_class_sx }, 278 { "allproc", &lock_class_sx }, 279 { NULL, NULL }, 280 /* 281 * Various mutexes 282 */ 283 { "Giant", &lock_class_mtx_sleep }, 284 { "filedesc structure", &lock_class_mtx_sleep }, 285 { "pipe mutex", &lock_class_mtx_sleep }, 286 { "sigio lock", &lock_class_mtx_sleep }, 287 { "process group", &lock_class_mtx_sleep }, 288 { "process lock", &lock_class_mtx_sleep }, 289 { "session", &lock_class_mtx_sleep }, 290 { "uidinfo hash", &lock_class_mtx_sleep }, 291 { "uidinfo struct", &lock_class_mtx_sleep }, 292 { "allprison", &lock_class_mtx_sleep }, 293 { NULL, NULL }, 294 /* 295 * Sockets 296 */ 297 { "filedesc structure", &lock_class_mtx_sleep }, 298 { "accept", &lock_class_mtx_sleep }, 299 { "so_snd", &lock_class_mtx_sleep }, 300 { "so_rcv", &lock_class_mtx_sleep }, 301 { "sellck", &lock_class_mtx_sleep }, 302 { NULL, NULL }, 303 /* 304 * Routing 305 */ 306 { "so_rcv", &lock_class_mtx_sleep }, 307 { "radix node head", &lock_class_mtx_sleep }, 308 { "rtentry", &lock_class_mtx_sleep }, 309 { "ifaddr", &lock_class_mtx_sleep }, 310 { NULL, NULL }, 311 /* 312 * Multicast - protocol locks before interface locks, after UDP locks. 313 */ 314 { "udpinp", &lock_class_mtx_sleep }, 315 { "in_multi_mtx", &lock_class_mtx_sleep }, 316 { "igmp_mtx", &lock_class_mtx_sleep }, 317 { "if_addr_mtx", &lock_class_mtx_sleep }, 318 { NULL, NULL }, 319 /* 320 * UNIX Domain Sockets 321 */ 322 { "unp", &lock_class_mtx_sleep }, 323 { "so_snd", &lock_class_mtx_sleep }, 324 { NULL, NULL }, 325 /* 326 * UDP/IP 327 */ 328 { "udp", &lock_class_mtx_sleep }, 329 { "udpinp", &lock_class_mtx_sleep }, 330 { "so_snd", &lock_class_mtx_sleep }, 331 { NULL, NULL }, 332 /* 333 * TCP/IP 334 */ 335 { "tcp", &lock_class_mtx_sleep }, 336 { "tcpinp", &lock_class_mtx_sleep }, 337 { "so_snd", &lock_class_mtx_sleep }, 338 { NULL, NULL }, 339 /* 340 * SLIP 341 */ 342 { "slip_mtx", &lock_class_mtx_sleep }, 343 { "slip sc_mtx", &lock_class_mtx_sleep }, 344 { NULL, NULL }, 345 /* 346 * netatalk 347 */ 348 { "ddp_list_mtx", &lock_class_mtx_sleep }, 349 { "ddp_mtx", &lock_class_mtx_sleep }, 350 { NULL, NULL }, 351 /* 352 * BPF 353 */ 354 { "bpf global lock", &lock_class_mtx_sleep }, 355 { "bpf interface lock", &lock_class_mtx_sleep }, 356 { "bpf cdev lock", &lock_class_mtx_sleep }, 357 { NULL, NULL }, 358 /* 359 * NFS server 360 */ 361 { "nfsd_mtx", &lock_class_mtx_sleep }, 362 { "so_snd", &lock_class_mtx_sleep }, 363 { NULL, NULL }, 364 /* 365 * CDEV 366 */ 367 { "system map", &lock_class_mtx_sleep }, 368 { "vm page queue mutex", &lock_class_mtx_sleep }, 369 { "vm page queue free mutex", &lock_class_mtx_sleep }, 370 { "vnode interlock", &lock_class_mtx_sleep }, 371 { "cdev", &lock_class_mtx_sleep }, 372 { NULL, NULL }, 373 /* 374 * kqueue/VFS interaction 375 */ 376 { "kqueue", &lock_class_mtx_sleep }, 377 { "struct mount mtx", &lock_class_mtx_sleep }, 378 { "vnode interlock", &lock_class_mtx_sleep }, 379 { NULL, NULL }, 380 /* 381 * spin locks 382 */ 383#ifdef SMP 384 { "ap boot", &lock_class_mtx_spin }, 385#endif 386 { "rm.mutex_mtx", &lock_class_mtx_spin }, 387 { "sio", &lock_class_mtx_spin }, 388#ifdef __i386__ 389 { "cy", &lock_class_mtx_spin }, 390#endif 391 { "scc_hwmtx", &lock_class_mtx_spin }, 392 { "uart_hwmtx", &lock_class_mtx_spin }, 393 { "zstty", &lock_class_mtx_spin }, 394 { "ng_node", &lock_class_mtx_spin }, 395 { "ng_worklist", &lock_class_mtx_spin }, 396 { "fast_taskqueue", &lock_class_mtx_spin }, 397 { "intr table", &lock_class_mtx_spin }, 398 { "sleepq chain", &lock_class_mtx_spin }, 399 { "sched lock", &lock_class_mtx_spin }, 400 { "turnstile chain", &lock_class_mtx_spin }, 401 { "td_contested", &lock_class_mtx_spin }, 402 { "callout", &lock_class_mtx_spin }, 403 { "entropy harvest mutex", &lock_class_mtx_spin }, 404 { "syscons video lock", &lock_class_mtx_spin }, 405 /* 406 * leaf locks 407 */ 408 { "allpmaps", &lock_class_mtx_spin }, 409 { "icu", &lock_class_mtx_spin }, 410#ifdef SMP 411 { "smp rendezvous", &lock_class_mtx_spin }, 412#if defined(__i386__) || defined(__amd64__) 413 { "tlb", &lock_class_mtx_spin }, 414#endif 415#ifdef __sparc64__ 416 { "ipi", &lock_class_mtx_spin }, 417 { "rtc_mtx", &lock_class_mtx_spin }, 418#endif 419#endif 420 { "clk", &lock_class_mtx_spin }, 421 { "mutex profiling lock", &lock_class_mtx_spin }, 422 { "kse zombie lock", &lock_class_mtx_spin }, 423 { "ALD Queue", &lock_class_mtx_spin }, 424#ifdef __ia64__ 425 { "MCA spin lock", &lock_class_mtx_spin }, 426#endif 427#if defined(__i386__) || defined(__amd64__) 428 { "pcicfg", &lock_class_mtx_spin }, 429 { "NDIS thread lock", &lock_class_mtx_spin }, 430#endif 431 { "tw_osl_io_lock", &lock_class_mtx_spin }, 432 { "tw_osl_q_lock", &lock_class_mtx_spin }, 433 { "tw_cl_io_lock", &lock_class_mtx_spin }, 434 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 435 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 436 { NULL, NULL }, 437 { NULL, NULL } 438}; 439 440#ifdef BLESSING 441/* 442 * Pairs of locks which have been blessed 443 * Don't complain about order problems with blessed locks 444 */ 445static struct witness_blessed blessed_list[] = { 446}; 447static int blessed_count = 448 sizeof(blessed_list) / sizeof(struct witness_blessed); 449#endif 450 451/* 452 * List of locks initialized prior to witness being initialized whose 453 * enrollment is currently deferred. 454 */ 455STAILQ_HEAD(, lock_object) pending_locks = 456 STAILQ_HEAD_INITIALIZER(pending_locks); 457 458/* 459 * This global is set to 0 once it becomes safe to use the witness code. 460 */ 461static int witness_cold = 1; 462 463/* 464 * This global is set to 1 once the static lock orders have been enrolled 465 * so that a warning can be issued for any spin locks enrolled later. 466 */ 467static int witness_spin_warn = 0; 468 469/* 470 * The WITNESS-enabled diagnostic code. Note that the witness code does 471 * assume that the early boot is single-threaded at least until after this 472 * routine is completed. 473 */ 474static void 475witness_initialize(void *dummy __unused) 476{ 477 struct lock_object *lock; 478 struct witness_order_list_entry *order; 479 struct witness *w, *w1; 480 int i; 481 482 /* 483 * We have to release Giant before initializing its witness 484 * structure so that WITNESS doesn't get confused. 485 */ 486 mtx_unlock(&Giant); 487 mtx_assert(&Giant, MA_NOTOWNED); 488 489 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 490 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 491 MTX_NOWITNESS | MTX_NOPROFILE); 492 for (i = 0; i < WITNESS_COUNT; i++) 493 witness_free(&w_data[i]); 494 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 495 witness_child_free(&w_childdata[i]); 496 for (i = 0; i < LOCK_CHILDCOUNT; i++) 497 witness_lock_list_free(&w_locklistdata[i]); 498 499 /* First add in all the specified order lists. */ 500 for (order = order_lists; order->w_name != NULL; order++) { 501 w = enroll(order->w_name, order->w_class); 502 if (w == NULL) 503 continue; 504 w->w_file = "order list"; 505 for (order++; order->w_name != NULL; order++) { 506 w1 = enroll(order->w_name, order->w_class); 507 if (w1 == NULL) 508 continue; 509 w1->w_file = "order list"; 510 if (!itismychild(w, w1)) 511 panic("Not enough memory for static orders!"); 512 w = w1; 513 } 514 } 515 witness_spin_warn = 1; 516 517 /* Iterate through all locks and add them to witness. */ 518 while (!STAILQ_EMPTY(&pending_locks)) { 519 lock = STAILQ_FIRST(&pending_locks); 520 STAILQ_REMOVE_HEAD(&pending_locks, lo_list); 521 KASSERT(lock->lo_flags & LO_WITNESS, 522 ("%s: lock %s is on pending list but not LO_WITNESS", 523 __func__, lock->lo_name)); 524 lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock)); 525 } 526 527 /* Mark the witness code as being ready for use. */ 528 witness_cold = 0; 529 530 mtx_lock(&Giant); 531} 532SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 533 534static int 535sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 536{ 537 int error, value; 538 539 value = witness_watch; 540 error = sysctl_handle_int(oidp, &value, 0, req); 541 if (error != 0 || req->newptr == NULL) 542 return (error); 543 /* 544 * XXXRW: Why a priv check here? 545 */ 546 error = priv_check(req->td, PRIV_WITNESS); 547 if (error != 0) 548 return (error); 549 if (value == witness_watch) 550 return (0); 551 if (value != 0) 552 return (EINVAL); 553 witness_watch = 0; 554 return (0); 555} 556 557void 558witness_init(struct lock_object *lock) 559{ 560 struct lock_class *class; 561 562 /* Various sanity checks. */ 563 class = LOCK_CLASS(lock); 564 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 565 (class->lc_flags & LC_RECURSABLE) == 0) 566 panic("%s: lock (%s) %s can not be recursable", __func__, 567 class->lc_name, lock->lo_name); 568 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 569 (class->lc_flags & LC_SLEEPABLE) == 0) 570 panic("%s: lock (%s) %s can not be sleepable", __func__, 571 class->lc_name, lock->lo_name); 572 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 573 (class->lc_flags & LC_UPGRADABLE) == 0) 574 panic("%s: lock (%s) %s can not be upgradable", __func__, 575 class->lc_name, lock->lo_name); 576 577 /* 578 * If we shouldn't watch this lock, then just clear lo_witness. 579 * Otherwise, if witness_cold is set, then it is too early to 580 * enroll this lock, so defer it to witness_initialize() by adding 581 * it to the pending_locks list. If it is not too early, then enroll 582 * the lock now. 583 */ 584 if (witness_watch == 0 || panicstr != NULL || 585 (lock->lo_flags & LO_WITNESS) == 0) 586 lock->lo_witness = NULL; 587 else if (witness_cold) { 588 STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list); 589 lock->lo_flags |= LO_ENROLLPEND; 590 } else 591 lock->lo_witness = enroll(lock->lo_type, class); 592} 593 594void 595witness_destroy(struct lock_object *lock) 596{ 597 struct lock_class *class; 598 struct witness *w; 599 600 class = LOCK_CLASS(lock); 601 if (witness_cold) 602 panic("lock (%s) %s destroyed while witness_cold", 603 class->lc_name, lock->lo_name); 604 605 /* XXX: need to verify that no one holds the lock */ 606 if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS && 607 lock->lo_witness != NULL) { 608 w = lock->lo_witness; 609 mtx_lock_spin(&w_mtx); 610 MPASS(w->w_refcount > 0); 611 w->w_refcount--; 612 613 /* 614 * Lock is already released if we have an allocation failure 615 * and depart() fails. 616 */ 617 if (w->w_refcount != 0 || depart(w)) 618 mtx_unlock_spin(&w_mtx); 619 } 620 621 /* 622 * If this lock is destroyed before witness is up and running, 623 * remove it from the pending list. 624 */ 625 if (lock->lo_flags & LO_ENROLLPEND) { 626 STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list); 627 lock->lo_flags &= ~LO_ENROLLPEND; 628 } 629} 630 631#ifdef DDB 632static void 633witness_levelall (void) 634{ 635 struct witness_list *list; 636 struct witness *w, *w1; 637 638 /* 639 * First clear all levels. 640 */ 641 STAILQ_FOREACH(w, &w_all, w_list) { 642 w->w_level = 0; 643 } 644 645 /* 646 * Look for locks with no parent and level all their descendants. 647 */ 648 STAILQ_FOREACH(w, &w_all, w_list) { 649 /* 650 * This is just an optimization, technically we could get 651 * away just walking the all list each time. 652 */ 653 if (w->w_class->lc_flags & LC_SLEEPLOCK) 654 list = &w_sleep; 655 else 656 list = &w_spin; 657 STAILQ_FOREACH(w1, list, w_typelist) { 658 if (isitmychild(w1, w)) 659 goto skip; 660 } 661 witness_leveldescendents(w, 0); 662 skip: 663 ; /* silence GCC 3.x */ 664 } 665} 666 667static void 668witness_leveldescendents(struct witness *parent, int level) 669{ 670 struct witness_child_list_entry *wcl; 671 int i; 672 673 if (parent->w_level < level) 674 parent->w_level = level; 675 level++; 676 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 677 for (i = 0; i < wcl->wcl_count; i++) 678 witness_leveldescendents(wcl->wcl_children[i], level); 679} 680 681static void 682witness_displaydescendants(void(*prnt)(const char *fmt, ...), 683 struct witness *parent, int indent) 684{ 685 struct witness_child_list_entry *wcl; 686 int i, level; 687 688 level = parent->w_level; 689 prnt("%-2d", level); 690 for (i = 0; i < indent; i++) 691 prnt(" "); 692 if (parent->w_refcount > 0) 693 prnt("%s", parent->w_name); 694 else 695 prnt("(dead)"); 696 if (parent->w_displayed) { 697 prnt(" -- (already displayed)\n"); 698 return; 699 } 700 parent->w_displayed = 1; 701 if (parent->w_refcount > 0) { 702 if (parent->w_file != NULL) 703 prnt(" -- last acquired @ %s:%d", parent->w_file, 704 parent->w_line); 705 } 706 prnt("\n"); 707 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 708 for (i = 0; i < wcl->wcl_count; i++) 709 witness_displaydescendants(prnt, 710 wcl->wcl_children[i], indent + 1); 711} 712 713static void 714witness_display_list(void(*prnt)(const char *fmt, ...), 715 struct witness_list *list) 716{ 717 struct witness *w; 718 719 STAILQ_FOREACH(w, list, w_typelist) { 720 if (w->w_file == NULL || w->w_level > 0) 721 continue; 722 /* 723 * This lock has no anscestors, display its descendants. 724 */ 725 witness_displaydescendants(prnt, w, 0); 726 } 727} 728 729static void 730witness_display(void(*prnt)(const char *fmt, ...)) 731{ 732 struct witness *w; 733 734 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 735 witness_levelall(); 736 737 /* Clear all the displayed flags. */ 738 STAILQ_FOREACH(w, &w_all, w_list) { 739 w->w_displayed = 0; 740 } 741 742 /* 743 * First, handle sleep locks which have been acquired at least 744 * once. 745 */ 746 prnt("Sleep locks:\n"); 747 witness_display_list(prnt, &w_sleep); 748 749 /* 750 * Now do spin locks which have been acquired at least once. 751 */ 752 prnt("\nSpin locks:\n"); 753 witness_display_list(prnt, &w_spin); 754 755 /* 756 * Finally, any locks which have not been acquired yet. 757 */ 758 prnt("\nLocks which were never acquired:\n"); 759 STAILQ_FOREACH(w, &w_all, w_list) { 760 if (w->w_file != NULL || w->w_refcount == 0) 761 continue; 762 prnt("%s\n", w->w_name); 763 } 764} 765#endif /* DDB */ 766 767/* Trim useless garbage from filenames. */ 768static const char * 769fixup_filename(const char *file) 770{ 771 772 if (file == NULL) 773 return (NULL); 774 while (strncmp(file, "../", 3) == 0) 775 file += 3; 776 return (file); 777} 778 779int 780witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 781{ 782 783 if (witness_watch == 0 || panicstr != NULL) 784 return (0); 785 786 /* Require locks that witness knows about. */ 787 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 788 lock2->lo_witness == NULL) 789 return (EINVAL); 790 791 MPASS(!mtx_owned(&w_mtx)); 792 mtx_lock_spin(&w_mtx); 793 794 /* 795 * If we already have either an explicit or implied lock order that 796 * is the other way around, then return an error. 797 */ 798 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 799 mtx_unlock_spin(&w_mtx); 800 return (EDOOFUS); 801 } 802 803 /* Try to add the new order. */ 804 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 805 lock2->lo_type, lock1->lo_type); 806 if (!itismychild(lock1->lo_witness, lock2->lo_witness)) 807 return (ENOMEM); 808 mtx_unlock_spin(&w_mtx); 809 return (0); 810} 811 812void 813witness_checkorder(struct lock_object *lock, int flags, const char *file, 814 int line) 815{ 816 struct lock_list_entry **lock_list, *lle; 817 struct lock_instance *lock1, *lock2; 818 struct lock_class *class; 819 struct witness *w, *w1; 820 struct thread *td; 821 int i, j; 822 823 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 824 panicstr != NULL) 825 return; 826 827 /* 828 * Try locks do not block if they fail to acquire the lock, thus 829 * there is no danger of deadlocks or of switching while holding a 830 * spin lock if we acquire a lock via a try operation. This 831 * function shouldn't even be called for try locks, so panic if 832 * that happens. 833 */ 834 if (flags & LOP_TRYLOCK) 835 panic("%s should not be called for try lock operations", 836 __func__); 837 838 w = lock->lo_witness; 839 class = LOCK_CLASS(lock); 840 td = curthread; 841 file = fixup_filename(file); 842 843 if (class->lc_flags & LC_SLEEPLOCK) { 844 /* 845 * Since spin locks include a critical section, this check 846 * implicitly enforces a lock order of all sleep locks before 847 * all spin locks. 848 */ 849 if (td->td_critnest != 0 && !kdb_active) 850 panic("blockable sleep lock (%s) %s @ %s:%d", 851 class->lc_name, lock->lo_name, file, line); 852 853 /* 854 * If this is the first lock acquired then just return as 855 * no order checking is needed. 856 */ 857 if (td->td_sleeplocks == NULL) 858 return; 859 lock_list = &td->td_sleeplocks; 860 } else { 861 /* 862 * If this is the first lock, just return as no order 863 * checking is needed. We check this in both if clauses 864 * here as unifying the check would require us to use a 865 * critical section to ensure we don't migrate while doing 866 * the check. Note that if this is not the first lock, we 867 * are already in a critical section and are safe for the 868 * rest of the check. 869 */ 870 if (PCPU_GET(spinlocks) == NULL) 871 return; 872 lock_list = PCPU_PTR(spinlocks); 873 } 874 875 /* 876 * Check to see if we are recursing on a lock we already own. If 877 * so, make sure that we don't mismatch exclusive and shared lock 878 * acquires. 879 */ 880 lock1 = find_instance(*lock_list, lock); 881 if (lock1 != NULL) { 882 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 883 (flags & LOP_EXCLUSIVE) == 0) { 884 printf("shared lock of (%s) %s @ %s:%d\n", 885 class->lc_name, lock->lo_name, file, line); 886 printf("while exclusively locked from %s:%d\n", 887 lock1->li_file, lock1->li_line); 888 panic("share->excl"); 889 } 890 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 891 (flags & LOP_EXCLUSIVE) != 0) { 892 printf("exclusive lock of (%s) %s @ %s:%d\n", 893 class->lc_name, lock->lo_name, file, line); 894 printf("while share locked from %s:%d\n", 895 lock1->li_file, lock1->li_line); 896 panic("excl->share"); 897 } 898 return; 899 } 900 901 /* 902 * Try locks do not block if they fail to acquire the lock, thus 903 * there is no danger of deadlocks or of switching while holding a 904 * spin lock if we acquire a lock via a try operation. 905 */ 906 if (flags & LOP_TRYLOCK) 907 return; 908 909 /* 910 * Check for duplicate locks of the same type. Note that we only 911 * have to check for this on the last lock we just acquired. Any 912 * other cases will be caught as lock order violations. 913 */ 914 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 915 w1 = lock1->li_lock->lo_witness; 916 if (w1 == w) { 917 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) || 918 (flags & LOP_DUPOK)) 919 return; 920 w->w_same_squawked = 1; 921 printf("acquiring duplicate lock of same type: \"%s\"\n", 922 lock->lo_type); 923 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 924 lock1->li_file, lock1->li_line); 925 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 926#ifdef KDB 927 goto debugger; 928#else 929 return; 930#endif 931 } 932 MPASS(!mtx_owned(&w_mtx)); 933 mtx_lock_spin(&w_mtx); 934 /* 935 * If we know that the the lock we are acquiring comes after 936 * the lock we most recently acquired in the lock order tree, 937 * then there is no need for any further checks. 938 */ 939 if (isitmychild(w1, w)) { 940 mtx_unlock_spin(&w_mtx); 941 return; 942 } 943 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 944 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 945 946 MPASS(j < WITNESS_COUNT); 947 lock1 = &lle->ll_children[i]; 948 w1 = lock1->li_lock->lo_witness; 949 950 /* 951 * If this lock doesn't undergo witness checking, 952 * then skip it. 953 */ 954 if (w1 == NULL) { 955 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 956 ("lock missing witness structure")); 957 continue; 958 } 959 /* 960 * If we are locking Giant and this is a sleepable 961 * lock, then skip it. 962 */ 963 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 964 lock == &Giant.mtx_object) 965 continue; 966 /* 967 * If we are locking a sleepable lock and this lock 968 * is Giant, then skip it. 969 */ 970 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 971 lock1->li_lock == &Giant.mtx_object) 972 continue; 973 /* 974 * If we are locking a sleepable lock and this lock 975 * isn't sleepable, we want to treat it as a lock 976 * order violation to enfore a general lock order of 977 * sleepable locks before non-sleepable locks. 978 */ 979 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 980 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 981 goto reversal; 982 /* 983 * If we are locking Giant and this is a non-sleepable 984 * lock, then treat it as a reversal. 985 */ 986 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 987 lock == &Giant.mtx_object) 988 goto reversal; 989 /* 990 * Check the lock order hierarchy for a reveresal. 991 */ 992 if (!isitmydescendant(w, w1)) 993 continue; 994 reversal: 995 /* 996 * We have a lock order violation, check to see if it 997 * is allowed or has already been yelled about. 998 */ 999 mtx_unlock_spin(&w_mtx); 1000#ifdef BLESSING 1001 /* 1002 * If the lock order is blessed, just bail. We don't 1003 * look for other lock order violations though, which 1004 * may be a bug. 1005 */ 1006 if (blessed(w, w1)) 1007 return; 1008#endif 1009 if (lock1->li_lock == &Giant.mtx_object) { 1010 if (w1->w_Giant_squawked) 1011 return; 1012 else 1013 w1->w_Giant_squawked = 1; 1014 } else { 1015 if (w1->w_other_squawked) 1016 return; 1017 else 1018 w1->w_other_squawked = 1; 1019 } 1020 /* 1021 * Ok, yell about it. 1022 */ 1023 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1024 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1025 printf( 1026 "lock order reversal: (sleepable after non-sleepable)\n"); 1027 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1028 && lock == &Giant.mtx_object) 1029 printf( 1030 "lock order reversal: (Giant after non-sleepable)\n"); 1031 else 1032 printf("lock order reversal:\n"); 1033 /* 1034 * Try to locate an earlier lock with 1035 * witness w in our list. 1036 */ 1037 do { 1038 lock2 = &lle->ll_children[i]; 1039 MPASS(lock2->li_lock != NULL); 1040 if (lock2->li_lock->lo_witness == w) 1041 break; 1042 if (i == 0 && lle->ll_next != NULL) { 1043 lle = lle->ll_next; 1044 i = lle->ll_count - 1; 1045 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1046 } else 1047 i--; 1048 } while (i >= 0); 1049 if (i < 0) { 1050 printf(" 1st %p %s (%s) @ %s:%d\n", 1051 lock1->li_lock, lock1->li_lock->lo_name, 1052 lock1->li_lock->lo_type, lock1->li_file, 1053 lock1->li_line); 1054 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1055 lock->lo_name, lock->lo_type, file, line); 1056 } else { 1057 printf(" 1st %p %s (%s) @ %s:%d\n", 1058 lock2->li_lock, lock2->li_lock->lo_name, 1059 lock2->li_lock->lo_type, lock2->li_file, 1060 lock2->li_line); 1061 printf(" 2nd %p %s (%s) @ %s:%d\n", 1062 lock1->li_lock, lock1->li_lock->lo_name, 1063 lock1->li_lock->lo_type, lock1->li_file, 1064 lock1->li_line); 1065 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1066 lock->lo_name, lock->lo_type, file, line); 1067 } 1068#ifdef KDB 1069 goto debugger; 1070#else 1071 return; 1072#endif 1073 } 1074 } 1075 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 1076 /* 1077 * If requested, build a new lock order. However, don't build a new 1078 * relationship between a sleepable lock and Giant if it is in the 1079 * wrong direction. The correct lock order is that sleepable locks 1080 * always come before Giant. 1081 */ 1082 if (flags & LOP_NEWORDER && 1083 !(lock1->li_lock == &Giant.mtx_object && 1084 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1085 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1086 lock->lo_type, lock1->li_lock->lo_type); 1087 if (!itismychild(lock1->li_lock->lo_witness, w)) 1088 /* Witness is dead. */ 1089 return; 1090 } 1091 mtx_unlock_spin(&w_mtx); 1092 return; 1093 1094#ifdef KDB 1095debugger: 1096 if (witness_trace) 1097 kdb_backtrace(); 1098 if (witness_kdb) 1099 kdb_enter(__func__); 1100#endif 1101} 1102 1103void 1104witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1105{ 1106 struct lock_list_entry **lock_list, *lle; 1107 struct lock_instance *instance; 1108 struct witness *w; 1109 struct thread *td; 1110 1111 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1112 panicstr != NULL) 1113 return; 1114 w = lock->lo_witness; 1115 td = curthread; 1116 file = fixup_filename(file); 1117 1118 /* Determine lock list for this lock. */ 1119 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1120 lock_list = &td->td_sleeplocks; 1121 else 1122 lock_list = PCPU_PTR(spinlocks); 1123 1124 /* Check to see if we are recursing on a lock we already own. */ 1125 instance = find_instance(*lock_list, lock); 1126 if (instance != NULL) { 1127 instance->li_flags++; 1128 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1129 td->td_proc->p_pid, lock->lo_name, 1130 instance->li_flags & LI_RECURSEMASK); 1131 instance->li_file = file; 1132 instance->li_line = line; 1133 return; 1134 } 1135 1136 /* Update per-witness last file and line acquire. */ 1137 w->w_file = file; 1138 w->w_line = line; 1139 1140 /* Find the next open lock instance in the list and fill it. */ 1141 lle = *lock_list; 1142 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1143 lle = witness_lock_list_get(); 1144 if (lle == NULL) 1145 return; 1146 lle->ll_next = *lock_list; 1147 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1148 td->td_proc->p_pid, lle); 1149 *lock_list = lle; 1150 } 1151 instance = &lle->ll_children[lle->ll_count++]; 1152 instance->li_lock = lock; 1153 instance->li_line = line; 1154 instance->li_file = file; 1155 if ((flags & LOP_EXCLUSIVE) != 0) 1156 instance->li_flags = LI_EXCLUSIVE; 1157 else 1158 instance->li_flags = 0; 1159 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1160 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1161} 1162 1163void 1164witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1165{ 1166 struct lock_instance *instance; 1167 struct lock_class *class; 1168 1169 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1170 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1171 return; 1172 class = LOCK_CLASS(lock); 1173 file = fixup_filename(file); 1174 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1175 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1176 class->lc_name, lock->lo_name, file, line); 1177 if ((flags & LOP_TRYLOCK) == 0) 1178 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 1179 lock->lo_name, file, line); 1180 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1181 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1182 class->lc_name, lock->lo_name, file, line); 1183 instance = find_instance(curthread->td_sleeplocks, lock); 1184 if (instance == NULL) 1185 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1186 class->lc_name, lock->lo_name, file, line); 1187 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1188 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1189 class->lc_name, lock->lo_name, file, line); 1190 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1191 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1192 class->lc_name, lock->lo_name, 1193 instance->li_flags & LI_RECURSEMASK, file, line); 1194 instance->li_flags |= LI_EXCLUSIVE; 1195} 1196 1197void 1198witness_downgrade(struct lock_object *lock, int flags, const char *file, 1199 int line) 1200{ 1201 struct lock_instance *instance; 1202 struct lock_class *class; 1203 1204 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1205 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1206 return; 1207 class = LOCK_CLASS(lock); 1208 file = fixup_filename(file); 1209 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1210 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1211 class->lc_name, lock->lo_name, file, line); 1212 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1213 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1214 class->lc_name, lock->lo_name, file, line); 1215 instance = find_instance(curthread->td_sleeplocks, lock); 1216 if (instance == NULL) 1217 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1218 class->lc_name, lock->lo_name, file, line); 1219 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1220 panic("downgrade of shared lock (%s) %s @ %s:%d", 1221 class->lc_name, lock->lo_name, file, line); 1222 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1223 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1224 class->lc_name, lock->lo_name, 1225 instance->li_flags & LI_RECURSEMASK, file, line); 1226 instance->li_flags &= ~LI_EXCLUSIVE; 1227} 1228 1229void 1230witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1231{ 1232 struct lock_list_entry **lock_list, *lle; 1233 struct lock_instance *instance; 1234 struct lock_class *class; 1235 struct thread *td; 1236 register_t s; 1237 int i, j; 1238 1239 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1240 panicstr != NULL) 1241 return; 1242 td = curthread; 1243 class = LOCK_CLASS(lock); 1244 file = fixup_filename(file); 1245 1246 /* Find lock instance associated with this lock. */ 1247 if (class->lc_flags & LC_SLEEPLOCK) 1248 lock_list = &td->td_sleeplocks; 1249 else 1250 lock_list = PCPU_PTR(spinlocks); 1251 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1252 for (i = 0; i < (*lock_list)->ll_count; i++) { 1253 instance = &(*lock_list)->ll_children[i]; 1254 if (instance->li_lock == lock) 1255 goto found; 1256 } 1257 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1258 file, line); 1259found: 1260 1261 /* First, check for shared/exclusive mismatches. */ 1262 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1263 (flags & LOP_EXCLUSIVE) == 0) { 1264 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1265 lock->lo_name, file, line); 1266 printf("while exclusively locked from %s:%d\n", 1267 instance->li_file, instance->li_line); 1268 panic("excl->ushare"); 1269 } 1270 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1271 (flags & LOP_EXCLUSIVE) != 0) { 1272 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1273 lock->lo_name, file, line); 1274 printf("while share locked from %s:%d\n", instance->li_file, 1275 instance->li_line); 1276 panic("share->uexcl"); 1277 } 1278 1279 /* If we are recursed, unrecurse. */ 1280 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1281 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1282 td->td_proc->p_pid, instance->li_lock->lo_name, 1283 instance->li_flags); 1284 instance->li_flags--; 1285 return; 1286 } 1287 1288 /* Otherwise, remove this item from the list. */ 1289 s = intr_disable(); 1290 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1291 td->td_proc->p_pid, instance->li_lock->lo_name, 1292 (*lock_list)->ll_count - 1); 1293 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1294 (*lock_list)->ll_children[j] = 1295 (*lock_list)->ll_children[j + 1]; 1296 (*lock_list)->ll_count--; 1297 intr_restore(s); 1298 1299 /* If this lock list entry is now empty, free it. */ 1300 if ((*lock_list)->ll_count == 0) { 1301 lle = *lock_list; 1302 *lock_list = lle->ll_next; 1303 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1304 td->td_proc->p_pid, lle); 1305 witness_lock_list_free(lle); 1306 } 1307} 1308 1309/* 1310 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1311 * exempt Giant and sleepable locks from the checks as well. If any 1312 * non-exempt locks are held, then a supplied message is printed to the 1313 * console along with a list of the offending locks. If indicated in the 1314 * flags then a failure results in a panic as well. 1315 */ 1316int 1317witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1318{ 1319 struct lock_list_entry *lle; 1320 struct lock_instance *lock1; 1321 struct thread *td; 1322 va_list ap; 1323 int i, n; 1324 1325 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1326 return (0); 1327 n = 0; 1328 td = curthread; 1329 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1330 for (i = lle->ll_count - 1; i >= 0; i--) { 1331 lock1 = &lle->ll_children[i]; 1332 if (lock1->li_lock == lock) 1333 continue; 1334 if (flags & WARN_GIANTOK && 1335 lock1->li_lock == &Giant.mtx_object) 1336 continue; 1337 if (flags & WARN_SLEEPOK && 1338 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1339 continue; 1340 if (n == 0) { 1341 va_start(ap, fmt); 1342 vprintf(fmt, ap); 1343 va_end(ap); 1344 printf(" with the following"); 1345 if (flags & WARN_SLEEPOK) 1346 printf(" non-sleepable"); 1347 printf(" locks held:\n"); 1348 } 1349 n++; 1350 witness_list_lock(lock1); 1351 } 1352 if (PCPU_GET(spinlocks) != NULL) { 1353 /* 1354 * Since we already hold a spinlock preemption is 1355 * already blocked. 1356 */ 1357 if (n == 0) { 1358 va_start(ap, fmt); 1359 vprintf(fmt, ap); 1360 va_end(ap); 1361 printf(" with the following"); 1362 if (flags & WARN_SLEEPOK) 1363 printf(" non-sleepable"); 1364 printf(" locks held:\n"); 1365 } 1366 n += witness_list_locks(PCPU_PTR(spinlocks)); 1367 } 1368 if (flags & WARN_PANIC && n) 1369 panic("witness_warn"); 1370#ifdef KDB 1371 else if (witness_kdb && n) 1372 kdb_enter(__func__); 1373 else if (witness_trace && n) 1374 kdb_backtrace(); 1375#endif 1376 return (n); 1377} 1378 1379const char * 1380witness_file(struct lock_object *lock) 1381{ 1382 struct witness *w; 1383 1384 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1385 return ("?"); 1386 w = lock->lo_witness; 1387 return (w->w_file); 1388} 1389 1390int 1391witness_line(struct lock_object *lock) 1392{ 1393 struct witness *w; 1394 1395 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1396 return (0); 1397 w = lock->lo_witness; 1398 return (w->w_line); 1399} 1400 1401static struct witness * 1402enroll(const char *description, struct lock_class *lock_class) 1403{ 1404 struct witness *w; 1405 1406 if (witness_watch == 0 || panicstr != NULL) 1407 return (NULL); 1408 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 1409 return (NULL); 1410 mtx_lock_spin(&w_mtx); 1411 STAILQ_FOREACH(w, &w_all, w_list) { 1412 if (w->w_name == description || (w->w_refcount > 0 && 1413 strcmp(description, w->w_name) == 0)) { 1414 w->w_refcount++; 1415 mtx_unlock_spin(&w_mtx); 1416 if (lock_class != w->w_class) 1417 panic( 1418 "lock (%s) %s does not match earlier (%s) lock", 1419 description, lock_class->lc_name, 1420 w->w_class->lc_name); 1421 return (w); 1422 } 1423 } 1424 if ((w = witness_get()) == NULL) 1425 goto out; 1426 w->w_name = description; 1427 w->w_class = lock_class; 1428 w->w_refcount = 1; 1429 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1430 if (lock_class->lc_flags & LC_SPINLOCK) { 1431 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1432 w_spin_cnt++; 1433 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1434 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1435 w_sleep_cnt++; 1436 } else { 1437 mtx_unlock_spin(&w_mtx); 1438 panic("lock class %s is not sleep or spin", 1439 lock_class->lc_name); 1440 } 1441 mtx_unlock_spin(&w_mtx); 1442out: 1443 /* 1444 * We issue a warning for any spin locks not defined in the static 1445 * order list as a way to discourage their use (folks should really 1446 * be using non-spin mutexes most of the time). However, several 1447 * 3rd part device drivers use spin locks because that is all they 1448 * have available on Windows and Linux and they think that normal 1449 * mutexes are insufficient. 1450 */ 1451 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn) 1452 printf("WITNESS: spin lock %s not in order list\n", 1453 description); 1454 return (w); 1455} 1456 1457/* Don't let the door bang you on the way out... */ 1458static int 1459depart(struct witness *w) 1460{ 1461 struct witness_child_list_entry *wcl, *nwcl; 1462 struct witness_list *list; 1463 struct witness *parent; 1464 1465 MPASS(w->w_refcount == 0); 1466 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1467 list = &w_sleep; 1468 w_sleep_cnt--; 1469 } else { 1470 list = &w_spin; 1471 w_spin_cnt--; 1472 } 1473 /* 1474 * First, we run through the entire tree looking for any 1475 * witnesses that the outgoing witness is a child of. For 1476 * each parent that we find, we reparent all the direct 1477 * children of the outgoing witness to its parent. 1478 */ 1479 STAILQ_FOREACH(parent, list, w_typelist) { 1480 if (!isitmychild(parent, w)) 1481 continue; 1482 removechild(parent, w); 1483 } 1484 1485 /* 1486 * Now we go through and free up the child list of the 1487 * outgoing witness. 1488 */ 1489 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) { 1490 nwcl = wcl->wcl_next; 1491 w_child_cnt--; 1492 witness_child_free(wcl); 1493 } 1494 1495 /* 1496 * Detach from various lists and free. 1497 */ 1498 STAILQ_REMOVE(list, w, witness, w_typelist); 1499 STAILQ_REMOVE(&w_all, w, witness, w_list); 1500 witness_free(w); 1501 1502 return (1); 1503} 1504 1505/* 1506 * Add "child" as a direct child of "parent". Returns false if 1507 * we fail due to out of memory. 1508 */ 1509static int 1510insertchild(struct witness *parent, struct witness *child) 1511{ 1512 struct witness_child_list_entry **wcl; 1513 1514 MPASS(child != NULL && parent != NULL); 1515 1516 /* 1517 * Insert "child" after "parent" 1518 */ 1519 wcl = &parent->w_children; 1520 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1521 wcl = &(*wcl)->wcl_next; 1522 if (*wcl == NULL) { 1523 *wcl = witness_child_get(); 1524 if (*wcl == NULL) 1525 return (0); 1526 w_child_cnt++; 1527 } 1528 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1529 1530 return (1); 1531} 1532 1533 1534static int 1535itismychild(struct witness *parent, struct witness *child) 1536{ 1537 struct witness_list *list; 1538 1539 MPASS(child != NULL && parent != NULL); 1540 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1541 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1542 panic( 1543 "%s: parent (%s) and child (%s) are not the same lock type", 1544 __func__, parent->w_class->lc_name, 1545 child->w_class->lc_name); 1546 1547 if (!insertchild(parent, child)) 1548 return (0); 1549 1550 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1551 list = &w_sleep; 1552 else 1553 list = &w_spin; 1554 return (1); 1555} 1556 1557static void 1558removechild(struct witness *parent, struct witness *child) 1559{ 1560 struct witness_child_list_entry **wcl, *wcl1; 1561 int i; 1562 1563 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1564 for (i = 0; i < (*wcl)->wcl_count; i++) 1565 if ((*wcl)->wcl_children[i] == child) 1566 goto found; 1567 return; 1568found: 1569 (*wcl)->wcl_count--; 1570 if ((*wcl)->wcl_count > i) 1571 (*wcl)->wcl_children[i] = 1572 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1573 MPASS((*wcl)->wcl_children[i] != NULL); 1574 if ((*wcl)->wcl_count != 0) 1575 return; 1576 wcl1 = *wcl; 1577 *wcl = wcl1->wcl_next; 1578 w_child_cnt--; 1579 witness_child_free(wcl1); 1580} 1581 1582static int 1583isitmychild(struct witness *parent, struct witness *child) 1584{ 1585 struct witness_child_list_entry *wcl; 1586 int i; 1587 1588 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1589 for (i = 0; i < wcl->wcl_count; i++) { 1590 if (wcl->wcl_children[i] == child) 1591 return (1); 1592 } 1593 } 1594 return (0); 1595} 1596 1597static int 1598isitmydescendant(struct witness *parent, struct witness *child) 1599{ 1600 struct witness_child_list_entry *wcl; 1601 int i, j; 1602 1603 if (isitmychild(parent, child)) 1604 return (1); 1605 j = 0; 1606 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1607 MPASS(j < 1000); 1608 for (i = 0; i < wcl->wcl_count; i++) { 1609 if (isitmydescendant(wcl->wcl_children[i], child)) 1610 return (1); 1611 } 1612 j++; 1613 } 1614 return (0); 1615} 1616 1617#ifdef BLESSING 1618static int 1619blessed(struct witness *w1, struct witness *w2) 1620{ 1621 int i; 1622 struct witness_blessed *b; 1623 1624 for (i = 0; i < blessed_count; i++) { 1625 b = &blessed_list[i]; 1626 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1627 if (strcmp(w2->w_name, b->b_lock2) == 0) 1628 return (1); 1629 continue; 1630 } 1631 if (strcmp(w1->w_name, b->b_lock2) == 0) 1632 if (strcmp(w2->w_name, b->b_lock1) == 0) 1633 return (1); 1634 } 1635 return (0); 1636} 1637#endif 1638 1639static struct witness * 1640witness_get(void) 1641{ 1642 struct witness *w; 1643 1644 if (witness_watch == 0) { 1645 mtx_unlock_spin(&w_mtx); 1646 return (NULL); 1647 } 1648 if (STAILQ_EMPTY(&w_free)) { 1649 witness_watch = 0; 1650 mtx_unlock_spin(&w_mtx); 1651 printf("%s: witness exhausted\n", __func__); 1652 return (NULL); 1653 } 1654 w = STAILQ_FIRST(&w_free); 1655 STAILQ_REMOVE_HEAD(&w_free, w_list); 1656 w_free_cnt--; 1657 bzero(w, sizeof(*w)); 1658 return (w); 1659} 1660 1661static void 1662witness_free(struct witness *w) 1663{ 1664 1665 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1666 w_free_cnt++; 1667} 1668 1669static struct witness_child_list_entry * 1670witness_child_get(void) 1671{ 1672 struct witness_child_list_entry *wcl; 1673 1674 if (witness_watch == 0) { 1675 mtx_unlock_spin(&w_mtx); 1676 return (NULL); 1677 } 1678 wcl = w_child_free; 1679 if (wcl == NULL) { 1680 witness_watch = 0; 1681 mtx_unlock_spin(&w_mtx); 1682 printf("%s: witness exhausted\n", __func__); 1683 return (NULL); 1684 } 1685 w_child_free = wcl->wcl_next; 1686 w_child_free_cnt--; 1687 bzero(wcl, sizeof(*wcl)); 1688 return (wcl); 1689} 1690 1691static void 1692witness_child_free(struct witness_child_list_entry *wcl) 1693{ 1694 1695 wcl->wcl_next = w_child_free; 1696 w_child_free = wcl; 1697 w_child_free_cnt++; 1698} 1699 1700static struct lock_list_entry * 1701witness_lock_list_get(void) 1702{ 1703 struct lock_list_entry *lle; 1704 1705 if (witness_watch == 0) 1706 return (NULL); 1707 mtx_lock_spin(&w_mtx); 1708 lle = w_lock_list_free; 1709 if (lle == NULL) { 1710 witness_watch = 0; 1711 mtx_unlock_spin(&w_mtx); 1712 printf("%s: witness exhausted\n", __func__); 1713 return (NULL); 1714 } 1715 w_lock_list_free = lle->ll_next; 1716 mtx_unlock_spin(&w_mtx); 1717 bzero(lle, sizeof(*lle)); 1718 return (lle); 1719} 1720 1721static void 1722witness_lock_list_free(struct lock_list_entry *lle) 1723{ 1724 1725 mtx_lock_spin(&w_mtx); 1726 lle->ll_next = w_lock_list_free; 1727 w_lock_list_free = lle; 1728 mtx_unlock_spin(&w_mtx); 1729} 1730 1731static struct lock_instance * 1732find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1733{ 1734 struct lock_list_entry *lle; 1735 struct lock_instance *instance; 1736 int i; 1737 1738 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1739 for (i = lle->ll_count - 1; i >= 0; i--) { 1740 instance = &lle->ll_children[i]; 1741 if (instance->li_lock == lock) 1742 return (instance); 1743 } 1744 return (NULL); 1745} 1746 1747static void 1748witness_list_lock(struct lock_instance *instance) 1749{ 1750 struct lock_object *lock; 1751 1752 lock = instance->li_lock; 1753 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1754 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 1755 if (lock->lo_type != lock->lo_name) 1756 printf(" (%s)", lock->lo_type); 1757 printf(" r = %d (%p) locked @ %s:%d\n", 1758 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1759 instance->li_line); 1760} 1761 1762#ifdef DDB 1763static int 1764witness_thread_has_locks(struct thread *td) 1765{ 1766 1767 return (td->td_sleeplocks != NULL); 1768} 1769 1770static int 1771witness_proc_has_locks(struct proc *p) 1772{ 1773 struct thread *td; 1774 1775 FOREACH_THREAD_IN_PROC(p, td) { 1776 if (witness_thread_has_locks(td)) 1777 return (1); 1778 } 1779 return (0); 1780} 1781#endif 1782 1783int 1784witness_list_locks(struct lock_list_entry **lock_list) 1785{ 1786 struct lock_list_entry *lle; 1787 int i, nheld; 1788 1789 nheld = 0; 1790 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1791 for (i = lle->ll_count - 1; i >= 0; i--) { 1792 witness_list_lock(&lle->ll_children[i]); 1793 nheld++; 1794 } 1795 return (nheld); 1796} 1797 1798/* 1799 * This is a bit risky at best. We call this function when we have timed 1800 * out acquiring a spin lock, and we assume that the other CPU is stuck 1801 * with this lock held. So, we go groveling around in the other CPU's 1802 * per-cpu data to try to find the lock instance for this spin lock to 1803 * see when it was last acquired. 1804 */ 1805void 1806witness_display_spinlock(struct lock_object *lock, struct thread *owner) 1807{ 1808 struct lock_instance *instance; 1809 struct pcpu *pc; 1810 1811 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 1812 return; 1813 pc = pcpu_find(owner->td_oncpu); 1814 instance = find_instance(pc->pc_spinlocks, lock); 1815 if (instance != NULL) 1816 witness_list_lock(instance); 1817} 1818 1819void 1820witness_save(struct lock_object *lock, const char **filep, int *linep) 1821{ 1822 struct lock_list_entry *lock_list; 1823 struct lock_instance *instance; 1824 struct lock_class *class; 1825 1826 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1827 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1828 return; 1829 class = LOCK_CLASS(lock); 1830 if (class->lc_flags & LC_SLEEPLOCK) 1831 lock_list = curthread->td_sleeplocks; 1832 else { 1833 if (witness_skipspin) 1834 return; 1835 lock_list = PCPU_GET(spinlocks); 1836 } 1837 instance = find_instance(lock_list, lock); 1838 if (instance == NULL) 1839 panic("%s: lock (%s) %s not locked", __func__, 1840 class->lc_name, lock->lo_name); 1841 *filep = instance->li_file; 1842 *linep = instance->li_line; 1843} 1844 1845void 1846witness_restore(struct lock_object *lock, const char *file, int line) 1847{ 1848 struct lock_list_entry *lock_list; 1849 struct lock_instance *instance; 1850 struct lock_class *class; 1851 1852 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1853 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1854 return; 1855 class = LOCK_CLASS(lock); 1856 if (class->lc_flags & LC_SLEEPLOCK) 1857 lock_list = curthread->td_sleeplocks; 1858 else { 1859 if (witness_skipspin) 1860 return; 1861 lock_list = PCPU_GET(spinlocks); 1862 } 1863 instance = find_instance(lock_list, lock); 1864 if (instance == NULL) 1865 panic("%s: lock (%s) %s not locked", __func__, 1866 class->lc_name, lock->lo_name); 1867 lock->lo_witness->w_file = file; 1868 lock->lo_witness->w_line = line; 1869 instance->li_file = file; 1870 instance->li_line = line; 1871} 1872 1873void 1874witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1875{ 1876#ifdef INVARIANT_SUPPORT 1877 struct lock_instance *instance; 1878 struct lock_class *class; 1879 1880 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1881 return; 1882 class = LOCK_CLASS(lock); 1883 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 1884 instance = find_instance(curthread->td_sleeplocks, lock); 1885 else if ((class->lc_flags & LC_SPINLOCK) != 0) 1886 instance = find_instance(PCPU_GET(spinlocks), lock); 1887 else { 1888 panic("Lock (%s) %s is not sleep or spin!", 1889 class->lc_name, lock->lo_name); 1890 } 1891 file = fixup_filename(file); 1892 switch (flags) { 1893 case LA_UNLOCKED: 1894 if (instance != NULL) 1895 panic("Lock (%s) %s locked @ %s:%d.", 1896 class->lc_name, lock->lo_name, file, line); 1897 break; 1898 case LA_LOCKED: 1899 case LA_LOCKED | LA_RECURSED: 1900 case LA_LOCKED | LA_NOTRECURSED: 1901 case LA_SLOCKED: 1902 case LA_SLOCKED | LA_RECURSED: 1903 case LA_SLOCKED | LA_NOTRECURSED: 1904 case LA_XLOCKED: 1905 case LA_XLOCKED | LA_RECURSED: 1906 case LA_XLOCKED | LA_NOTRECURSED: 1907 if (instance == NULL) { 1908 panic("Lock (%s) %s not locked @ %s:%d.", 1909 class->lc_name, lock->lo_name, file, line); 1910 break; 1911 } 1912 if ((flags & LA_XLOCKED) != 0 && 1913 (instance->li_flags & LI_EXCLUSIVE) == 0) 1914 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1915 class->lc_name, lock->lo_name, file, line); 1916 if ((flags & LA_SLOCKED) != 0 && 1917 (instance->li_flags & LI_EXCLUSIVE) != 0) 1918 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1919 class->lc_name, lock->lo_name, file, line); 1920 if ((flags & LA_RECURSED) != 0 && 1921 (instance->li_flags & LI_RECURSEMASK) == 0) 1922 panic("Lock (%s) %s not recursed @ %s:%d.", 1923 class->lc_name, lock->lo_name, file, line); 1924 if ((flags & LA_NOTRECURSED) != 0 && 1925 (instance->li_flags & LI_RECURSEMASK) != 0) 1926 panic("Lock (%s) %s recursed @ %s:%d.", 1927 class->lc_name, lock->lo_name, file, line); 1928 break; 1929 default: 1930 panic("Invalid lock assertion at %s:%d.", file, line); 1931 1932 } 1933#endif /* INVARIANT_SUPPORT */ 1934} 1935 1936#ifdef DDB 1937static void 1938witness_list(struct thread *td) 1939{ 1940 1941 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1942 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 1943 1944 if (witness_watch == 0) 1945 return; 1946 1947 witness_list_locks(&td->td_sleeplocks); 1948 1949 /* 1950 * We only handle spinlocks if td == curthread. This is somewhat broken 1951 * if td is currently executing on some other CPU and holds spin locks 1952 * as we won't display those locks. If we had a MI way of getting 1953 * the per-cpu data for a given cpu then we could use 1954 * td->td_oncpu to get the list of spinlocks for this thread 1955 * and "fix" this. 1956 * 1957 * That still wouldn't really fix this unless we locked sched_lock 1958 * or stopped the other CPU to make sure it wasn't changing the list 1959 * out from under us. It is probably best to just not try to handle 1960 * threads on other CPU's for now. 1961 */ 1962 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1963 witness_list_locks(PCPU_PTR(spinlocks)); 1964} 1965 1966DB_SHOW_COMMAND(locks, db_witness_list) 1967{ 1968 struct thread *td; 1969 1970 if (have_addr) 1971 td = db_lookup_thread(addr, TRUE); 1972 else 1973 td = kdb_thread; 1974 witness_list(td); 1975} 1976 1977DB_SHOW_COMMAND(alllocks, db_witness_list_all) 1978{ 1979 struct thread *td; 1980 struct proc *p; 1981 1982 /* 1983 * It would be nice to list only threads and processes that actually 1984 * held sleep locks, but that information is currently not exported 1985 * by WITNESS. 1986 */ 1987 FOREACH_PROC_IN_SYSTEM(p) { 1988 if (!witness_proc_has_locks(p)) 1989 continue; 1990 FOREACH_THREAD_IN_PROC(p, td) { 1991 if (!witness_thread_has_locks(td)) 1992 continue; 1993 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 1994 p->p_comm, td, td->td_tid); 1995 witness_list(td); 1996 } 1997 } 1998} 1999 2000DB_SHOW_COMMAND(witness, db_witness_display) 2001{ 2002 2003 witness_display(db_printf); 2004} 2005#endif 2006