subr_witness.c revision 149441
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of the `witness' lock verifier. Originally implemented for 34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 35 * classes in FreeBSD. 36 */ 37 38/* 39 * Main Entry: witness 40 * Pronunciation: 'wit-n&s 41 * Function: noun 42 * Etymology: Middle English witnesse, from Old English witnes knowledge, 43 * testimony, witness, from 2wit 44 * Date: before 12th century 45 * 1 : attestation of a fact or event : TESTIMONY 46 * 2 : one that gives evidence; specifically : one who testifies in 47 * a cause or before a judicial tribunal 48 * 3 : one asked to be present at a transaction so as to be able to 49 * testify to its having taken place 50 * 4 : one who has personal knowledge of something 51 * 5 a : something serving as evidence or proof : SIGN 52 * b : public affirmation by word or example of usually 53 * religious faith or conviction <the heroic witness to divine 54 * life -- Pilot> 55 * 6 capitalized : a member of the Jehovah's Witnesses 56 */ 57 58/* 59 * Special rules concerning Giant and lock orders: 60 * 61 * 1) Giant must be acquired before any other mutexes. Stated another way, 62 * no other mutex may be held when Giant is acquired. 63 * 64 * 2) Giant must be released when blocking on a sleepable lock. 65 * 66 * This rule is less obvious, but is a result of Giant providing the same 67 * semantics as spl(). Basically, when a thread sleeps, it must release 68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 69 * 2). 70 * 71 * 3) Giant may be acquired before or after sleepable locks. 72 * 73 * This rule is also not quite as obvious. Giant may be acquired after 74 * a sleepable lock because it is a non-sleepable lock and non-sleepable 75 * locks may always be acquired while holding a sleepable lock. The second 76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 80 * execute. Thus, acquiring Giant both before and after a sleepable lock 81 * will not result in a lock order reversal. 82 */ 83 84#include <sys/cdefs.h> 85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 149441 2005-08-25 03:47:37Z truckman $"); 86 87#include "opt_ddb.h" 88#include "opt_witness.h" 89 90#include <sys/param.h> 91#include <sys/bus.h> 92#include <sys/kdb.h> 93#include <sys/kernel.h> 94#include <sys/ktr.h> 95#include <sys/lock.h> 96#include <sys/malloc.h> 97#include <sys/mutex.h> 98#include <sys/proc.h> 99#include <sys/sysctl.h> 100#include <sys/systm.h> 101 102#include <ddb/ddb.h> 103 104#include <machine/stdarg.h> 105 106/* Define this to check for blessed mutexes */ 107#undef BLESSING 108 109#define WITNESS_COUNT 1024 110#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 111/* 112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 114 * probably be safe for the most part, but it's still a SWAG. 115 */ 116#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 117 118#define WITNESS_NCHILDREN 6 119 120struct witness_child_list_entry; 121 122struct witness { 123 const char *w_name; 124 struct lock_class *w_class; 125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 127 struct witness_child_list_entry *w_children; /* Great evilness... */ 128 const char *w_file; 129 int w_line; 130 u_int w_level; 131 u_int w_refcount; 132 u_char w_Giant_squawked:1; 133 u_char w_other_squawked:1; 134 u_char w_same_squawked:1; 135 u_char w_displayed:1; 136}; 137 138struct witness_child_list_entry { 139 struct witness_child_list_entry *wcl_next; 140 struct witness *wcl_children[WITNESS_NCHILDREN]; 141 u_int wcl_count; 142}; 143 144STAILQ_HEAD(witness_list, witness); 145 146#ifdef BLESSING 147struct witness_blessed { 148 const char *b_lock1; 149 const char *b_lock2; 150}; 151#endif 152 153struct witness_order_list_entry { 154 const char *w_name; 155 struct lock_class *w_class; 156}; 157 158#ifdef BLESSING 159static int blessed(struct witness *, struct witness *); 160#endif 161static int depart(struct witness *w); 162static struct witness *enroll(const char *description, 163 struct lock_class *lock_class); 164static int insertchild(struct witness *parent, struct witness *child); 165static int isitmychild(struct witness *parent, struct witness *child); 166static int isitmydescendant(struct witness *parent, struct witness *child); 167static int itismychild(struct witness *parent, struct witness *child); 168static void removechild(struct witness *parent, struct witness *child); 169static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 170static void witness_displaydescendants(void(*)(const char *fmt, ...), 171 struct witness *, int indent); 172static const char *fixup_filename(const char *file); 173static void witness_leveldescendents(struct witness *parent, int level); 174static void witness_levelall(void); 175static struct witness *witness_get(void); 176static void witness_free(struct witness *m); 177static struct witness_child_list_entry *witness_child_get(void); 178static void witness_child_free(struct witness_child_list_entry *wcl); 179static struct lock_list_entry *witness_lock_list_get(void); 180static void witness_lock_list_free(struct lock_list_entry *lle); 181static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 182 struct lock_object *lock); 183static void witness_list_lock(struct lock_instance *instance); 184#ifdef DDB 185static void witness_list(struct thread *td); 186static void witness_display_list(void(*prnt)(const char *fmt, ...), 187 struct witness_list *list); 188static void witness_display(void(*)(const char *fmt, ...)); 189#endif 190 191SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 192 193/* 194 * If set to 0, witness is disabled. If set to a non-zero value, witness 195 * performs full lock order checking for all locks. At runtime, this 196 * value may be set to 0 to turn off witness. witness is not allowed be 197 * turned on once it is turned off, however. 198 */ 199static int witness_watch = 1; 200TUNABLE_INT("debug.witness.watch", &witness_watch); 201SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 202 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 203 204#ifdef KDB 205/* 206 * When KDB is enabled and witness_kdb is set to 1, it will cause the system 207 * to drop into kdebug() when: 208 * - a lock heirarchy violation occurs 209 * - locks are held when going to sleep. 210 */ 211#ifdef WITNESS_KDB 212int witness_kdb = 1; 213#else 214int witness_kdb = 0; 215#endif 216TUNABLE_INT("debug.witness.kdb", &witness_kdb); 217SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 218 219/* 220 * When KDB is enabled and witness_trace is set to 1, it will cause the system 221 * to print a stack trace: 222 * - a lock heirarchy violation occurs 223 * - locks are held when going to sleep. 224 */ 225int witness_trace = 1; 226TUNABLE_INT("debug.witness.trace", &witness_trace); 227SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 228#endif /* KDB */ 229 230#ifdef WITNESS_SKIPSPIN 231int witness_skipspin = 1; 232#else 233int witness_skipspin = 0; 234#endif 235TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 236SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, 237 &witness_skipspin, 0, ""); 238 239static struct mtx w_mtx; 240static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 241static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 242static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 243static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 244static struct witness_child_list_entry *w_child_free = NULL; 245static struct lock_list_entry *w_lock_list_free = NULL; 246 247static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt; 248SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 249SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 250SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 251 ""); 252SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD, 253 &w_child_free_cnt, 0, ""); 254SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0, 255 ""); 256 257static struct witness w_data[WITNESS_COUNT]; 258static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 259static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 260 261static struct witness_order_list_entry order_lists[] = { 262 { "proctree", &lock_class_sx }, 263 { "allproc", &lock_class_sx }, 264 { "Giant", &lock_class_mtx_sleep }, 265 { "filedesc structure", &lock_class_mtx_sleep }, 266 { "pipe mutex", &lock_class_mtx_sleep }, 267 { "sigio lock", &lock_class_mtx_sleep }, 268 { "process group", &lock_class_mtx_sleep }, 269 { "process lock", &lock_class_mtx_sleep }, 270 { "session", &lock_class_mtx_sleep }, 271 { "uidinfo hash", &lock_class_mtx_sleep }, 272 { "uidinfo struct", &lock_class_mtx_sleep }, 273 { "allprison", &lock_class_mtx_sleep }, 274 { NULL, NULL }, 275 /* 276 * Sockets 277 */ 278 { "filedesc structure", &lock_class_mtx_sleep }, 279 { "accept", &lock_class_mtx_sleep }, 280 { "so_snd", &lock_class_mtx_sleep }, 281 { "so_rcv", &lock_class_mtx_sleep }, 282 { "sellck", &lock_class_mtx_sleep }, 283 { NULL, NULL }, 284 /* 285 * Routing 286 */ 287 { "so_rcv", &lock_class_mtx_sleep }, 288 { "radix node head", &lock_class_mtx_sleep }, 289 { "rtentry", &lock_class_mtx_sleep }, 290 { "ifaddr", &lock_class_mtx_sleep }, 291 { NULL, NULL }, 292 /* 293 * Multicast - protocol locks before interface locks, after UDP locks. 294 */ 295 { "udpinp", &lock_class_mtx_sleep }, 296 { "in_multi_mtx", &lock_class_mtx_sleep }, 297 { "igmp_mtx", &lock_class_mtx_sleep }, 298 { "if_addr_mtx", &lock_class_mtx_sleep }, 299 { NULL, NULL }, 300 /* 301 * UNIX Domain Sockets 302 */ 303 { "unp", &lock_class_mtx_sleep }, 304 { "so_snd", &lock_class_mtx_sleep }, 305 { NULL, NULL }, 306 /* 307 * UDP/IP 308 */ 309 { "udp", &lock_class_mtx_sleep }, 310 { "udpinp", &lock_class_mtx_sleep }, 311 { "so_snd", &lock_class_mtx_sleep }, 312 { NULL, NULL }, 313 /* 314 * TCP/IP 315 */ 316 { "tcp", &lock_class_mtx_sleep }, 317 { "tcpinp", &lock_class_mtx_sleep }, 318 { "so_snd", &lock_class_mtx_sleep }, 319 { NULL, NULL }, 320 /* 321 * SLIP 322 */ 323 { "slip_mtx", &lock_class_mtx_sleep }, 324 { "slip sc_mtx", &lock_class_mtx_sleep }, 325 { NULL, NULL }, 326 /* 327 * netatalk 328 */ 329 { "ddp_list_mtx", &lock_class_mtx_sleep }, 330 { "ddp_mtx", &lock_class_mtx_sleep }, 331 { NULL, NULL }, 332 /* 333 * BPF 334 */ 335 { "bpf global lock", &lock_class_mtx_sleep }, 336 { "bpf interface lock", &lock_class_mtx_sleep }, 337 { "bpf cdev lock", &lock_class_mtx_sleep }, 338 { NULL, NULL }, 339 /* 340 * NFS server 341 */ 342 { "nfsd_mtx", &lock_class_mtx_sleep }, 343 { "so_snd", &lock_class_mtx_sleep }, 344 { NULL, NULL }, 345 /* 346 * CDEV 347 */ 348 { "system map", &lock_class_mtx_sleep }, 349 { "vm page queue mutex", &lock_class_mtx_sleep }, 350 { "vnode interlock", &lock_class_mtx_sleep }, 351 { "cdev", &lock_class_mtx_sleep }, 352 { NULL, NULL }, 353 /* 354 * spin locks 355 */ 356#ifdef SMP 357 { "ap boot", &lock_class_mtx_spin }, 358#endif 359 { "sio", &lock_class_mtx_spin }, 360#ifdef __i386__ 361 { "cy", &lock_class_mtx_spin }, 362#endif 363 { "uart_hwmtx", &lock_class_mtx_spin }, 364 { "sabtty", &lock_class_mtx_spin }, 365 { "zstty", &lock_class_mtx_spin }, 366 { "ng_node", &lock_class_mtx_spin }, 367 { "ng_worklist", &lock_class_mtx_spin }, 368 { "taskqueue_fast", &lock_class_mtx_spin }, 369 { "intr table", &lock_class_mtx_spin }, 370 { "ithread table lock", &lock_class_mtx_spin }, 371 { "sleepq chain", &lock_class_mtx_spin }, 372 { "sched lock", &lock_class_mtx_spin }, 373 { "turnstile chain", &lock_class_mtx_spin }, 374 { "td_contested", &lock_class_mtx_spin }, 375 { "callout", &lock_class_mtx_spin }, 376 { "entropy harvest mutex", &lock_class_mtx_spin }, 377 /* 378 * leaf locks 379 */ 380 { "allpmaps", &lock_class_mtx_spin }, 381 { "vm page queue free mutex", &lock_class_mtx_spin }, 382 { "icu", &lock_class_mtx_spin }, 383#ifdef SMP 384 { "smp rendezvous", &lock_class_mtx_spin }, 385#if defined(__i386__) || defined(__amd64__) 386 { "tlb", &lock_class_mtx_spin }, 387#endif 388#ifdef __sparc64__ 389 { "ipi", &lock_class_mtx_spin }, 390 { "rtc_mtx", &lock_class_mtx_spin }, 391#endif 392#endif 393 { "clk", &lock_class_mtx_spin }, 394 { "mutex profiling lock", &lock_class_mtx_spin }, 395 { "kse zombie lock", &lock_class_mtx_spin }, 396 { "ALD Queue", &lock_class_mtx_spin }, 397#ifdef __ia64__ 398 { "MCA spin lock", &lock_class_mtx_spin }, 399#endif 400#if defined(__i386__) || defined(__amd64__) 401 { "pcicfg", &lock_class_mtx_spin }, 402 { "NDIS thread lock", &lock_class_mtx_spin }, 403#endif 404 { "tw_osl_io_lock", &lock_class_mtx_spin }, 405 { "tw_osl_q_lock", &lock_class_mtx_spin }, 406 { "tw_cl_io_lock", &lock_class_mtx_spin }, 407 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 408 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 409 { NULL, NULL }, 410 { NULL, NULL } 411}; 412 413#ifdef BLESSING 414/* 415 * Pairs of locks which have been blessed 416 * Don't complain about order problems with blessed locks 417 */ 418static struct witness_blessed blessed_list[] = { 419}; 420static int blessed_count = 421 sizeof(blessed_list) / sizeof(struct witness_blessed); 422#endif 423 424/* 425 * List of all locks in the system. 426 */ 427TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks); 428 429static struct mtx all_mtx = { 430 { &lock_class_mtx_sleep, /* mtx_object.lo_class */ 431 "All locks list", /* mtx_object.lo_name */ 432 "All locks list", /* mtx_object.lo_type */ 433 LO_INITIALIZED, /* mtx_object.lo_flags */ 434 { NULL, NULL }, /* mtx_object.lo_list */ 435 NULL }, /* mtx_object.lo_witness */ 436 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ 437}; 438 439/* 440 * This global is set to 0 once it becomes safe to use the witness code. 441 */ 442static int witness_cold = 1; 443 444/* 445 * Global variables for book keeping. 446 */ 447static int lock_cur_cnt; 448static int lock_max_cnt; 449 450/* 451 * The WITNESS-enabled diagnostic code. 452 */ 453static void 454witness_initialize(void *dummy __unused) 455{ 456 struct lock_object *lock; 457 struct witness_order_list_entry *order; 458 struct witness *w, *w1; 459 int i; 460 461 /* 462 * We have to release Giant before initializing its witness 463 * structure so that WITNESS doesn't get confused. 464 */ 465 mtx_unlock(&Giant); 466 mtx_assert(&Giant, MA_NOTOWNED); 467 468 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 469 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list); 470 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 471 MTX_NOWITNESS); 472 for (i = 0; i < WITNESS_COUNT; i++) 473 witness_free(&w_data[i]); 474 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 475 witness_child_free(&w_childdata[i]); 476 for (i = 0; i < LOCK_CHILDCOUNT; i++) 477 witness_lock_list_free(&w_locklistdata[i]); 478 479 /* First add in all the specified order lists. */ 480 for (order = order_lists; order->w_name != NULL; order++) { 481 w = enroll(order->w_name, order->w_class); 482 if (w == NULL) 483 continue; 484 w->w_file = "order list"; 485 for (order++; order->w_name != NULL; order++) { 486 w1 = enroll(order->w_name, order->w_class); 487 if (w1 == NULL) 488 continue; 489 w1->w_file = "order list"; 490 if (!itismychild(w, w1)) 491 panic("Not enough memory for static orders!"); 492 w = w1; 493 } 494 } 495 496 /* Iterate through all locks and add them to witness. */ 497 mtx_lock(&all_mtx); 498 TAILQ_FOREACH(lock, &all_locks, lo_list) { 499 if (lock->lo_flags & LO_WITNESS) 500 lock->lo_witness = enroll(lock->lo_type, 501 lock->lo_class); 502 else 503 lock->lo_witness = NULL; 504 } 505 mtx_unlock(&all_mtx); 506 507 /* Mark the witness code as being ready for use. */ 508 atomic_store_rel_int(&witness_cold, 0); 509 510 mtx_lock(&Giant); 511} 512SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 513 514static int 515sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 516{ 517 int error, value; 518 519 value = witness_watch; 520 error = sysctl_handle_int(oidp, &value, 0, req); 521 if (error != 0 || req->newptr == NULL) 522 return (error); 523 error = suser(req->td); 524 if (error != 0) 525 return (error); 526 if (value == witness_watch) 527 return (0); 528 if (value != 0) 529 return (EINVAL); 530 witness_watch = 0; 531 return (0); 532} 533 534void 535witness_init(struct lock_object *lock) 536{ 537 struct lock_class *class; 538 539 class = lock->lo_class; 540 if (lock->lo_flags & LO_INITIALIZED) 541 panic("%s: lock (%s) %s is already initialized", __func__, 542 class->lc_name, lock->lo_name); 543 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 544 (class->lc_flags & LC_RECURSABLE) == 0) 545 panic("%s: lock (%s) %s can not be recursable", __func__, 546 class->lc_name, lock->lo_name); 547 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 548 (class->lc_flags & LC_SLEEPABLE) == 0) 549 panic("%s: lock (%s) %s can not be sleepable", __func__, 550 class->lc_name, lock->lo_name); 551 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 552 (class->lc_flags & LC_UPGRADABLE) == 0) 553 panic("%s: lock (%s) %s can not be upgradable", __func__, 554 class->lc_name, lock->lo_name); 555 556 mtx_lock(&all_mtx); 557 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list); 558 lock->lo_flags |= LO_INITIALIZED; 559 lock_cur_cnt++; 560 if (lock_cur_cnt > lock_max_cnt) 561 lock_max_cnt = lock_cur_cnt; 562 mtx_unlock(&all_mtx); 563 if (!witness_cold && witness_watch != 0 && panicstr == NULL && 564 (lock->lo_flags & LO_WITNESS) != 0) 565 lock->lo_witness = enroll(lock->lo_type, class); 566 else 567 lock->lo_witness = NULL; 568} 569 570void 571witness_destroy(struct lock_object *lock) 572{ 573 struct witness *w; 574 575 if (witness_cold) 576 panic("lock (%s) %s destroyed while witness_cold", 577 lock->lo_class->lc_name, lock->lo_name); 578 if ((lock->lo_flags & LO_INITIALIZED) == 0) 579 panic("%s: lock (%s) %s is not initialized", __func__, 580 lock->lo_class->lc_name, lock->lo_name); 581 582 /* XXX: need to verify that no one holds the lock */ 583 w = lock->lo_witness; 584 if (w != NULL) { 585 mtx_lock_spin(&w_mtx); 586 MPASS(w->w_refcount > 0); 587 w->w_refcount--; 588 589 /* 590 * Lock is already released if we have an allocation failure 591 * and depart() fails. 592 */ 593 if (w->w_refcount != 0 || depart(w)) 594 mtx_unlock_spin(&w_mtx); 595 } 596 597 mtx_lock(&all_mtx); 598 lock_cur_cnt--; 599 TAILQ_REMOVE(&all_locks, lock, lo_list); 600 lock->lo_flags &= ~LO_INITIALIZED; 601 mtx_unlock(&all_mtx); 602} 603 604#ifdef DDB 605static void 606witness_display_list(void(*prnt)(const char *fmt, ...), 607 struct witness_list *list) 608{ 609 struct witness *w; 610 611 STAILQ_FOREACH(w, list, w_typelist) { 612 if (w->w_file == NULL || w->w_level > 0) 613 continue; 614 /* 615 * This lock has no anscestors, display its descendants. 616 */ 617 witness_displaydescendants(prnt, w, 0); 618 } 619} 620 621static void 622witness_display(void(*prnt)(const char *fmt, ...)) 623{ 624 struct witness *w; 625 626 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 627 witness_levelall(); 628 629 /* Clear all the displayed flags. */ 630 STAILQ_FOREACH(w, &w_all, w_list) { 631 w->w_displayed = 0; 632 } 633 634 /* 635 * First, handle sleep locks which have been acquired at least 636 * once. 637 */ 638 prnt("Sleep locks:\n"); 639 witness_display_list(prnt, &w_sleep); 640 641 /* 642 * Now do spin locks which have been acquired at least once. 643 */ 644 prnt("\nSpin locks:\n"); 645 witness_display_list(prnt, &w_spin); 646 647 /* 648 * Finally, any locks which have not been acquired yet. 649 */ 650 prnt("\nLocks which were never acquired:\n"); 651 STAILQ_FOREACH(w, &w_all, w_list) { 652 if (w->w_file != NULL || w->w_refcount == 0) 653 continue; 654 prnt("%s\n", w->w_name); 655 } 656} 657#endif /* DDB */ 658 659/* Trim useless garbage from filenames. */ 660static const char * 661fixup_filename(const char *file) 662{ 663 664 if (file == NULL) 665 return (NULL); 666 while (strncmp(file, "../", 3) == 0) 667 file += 3; 668 return (file); 669} 670 671int 672witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 673{ 674 675 if (witness_watch == 0 || panicstr != NULL) 676 return (0); 677 678 /* Require locks that witness knows about. */ 679 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 680 lock2->lo_witness == NULL) 681 return (EINVAL); 682 683 MPASS(!mtx_owned(&w_mtx)); 684 mtx_lock_spin(&w_mtx); 685 686 /* 687 * If we already have either an explicit or implied lock order that 688 * is the other way around, then return an error. 689 */ 690 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 691 mtx_unlock_spin(&w_mtx); 692 return (EDOOFUS); 693 } 694 695 /* Try to add the new order. */ 696 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 697 lock2->lo_type, lock1->lo_type); 698 if (!itismychild(lock1->lo_witness, lock2->lo_witness)) 699 return (ENOMEM); 700 mtx_unlock_spin(&w_mtx); 701 return (0); 702} 703 704void 705witness_checkorder(struct lock_object *lock, int flags, const char *file, 706 int line) 707{ 708 struct lock_list_entry **lock_list, *lle; 709 struct lock_instance *lock1, *lock2; 710 struct lock_class *class; 711 struct witness *w, *w1; 712 struct thread *td; 713 int i, j; 714 715 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 716 panicstr != NULL) 717 return; 718 719 /* 720 * Try locks do not block if they fail to acquire the lock, thus 721 * there is no danger of deadlocks or of switching while holding a 722 * spin lock if we acquire a lock via a try operation. This 723 * function shouldn't even be called for try locks, so panic if 724 * that happens. 725 */ 726 if (flags & LOP_TRYLOCK) 727 panic("%s should not be called for try lock operations", 728 __func__); 729 730 w = lock->lo_witness; 731 class = lock->lo_class; 732 td = curthread; 733 file = fixup_filename(file); 734 735 if (class->lc_flags & LC_SLEEPLOCK) { 736 /* 737 * Since spin locks include a critical section, this check 738 * implicitly enforces a lock order of all sleep locks before 739 * all spin locks. 740 */ 741 if (td->td_critnest != 0 && !kdb_active) 742 panic("blockable sleep lock (%s) %s @ %s:%d", 743 class->lc_name, lock->lo_name, file, line); 744 745 /* 746 * If this is the first lock acquired then just return as 747 * no order checking is needed. 748 */ 749 if (td->td_sleeplocks == NULL) 750 return; 751 lock_list = &td->td_sleeplocks; 752 } else { 753 /* 754 * If this is the first lock, just return as no order 755 * checking is needed. We check this in both if clauses 756 * here as unifying the check would require us to use a 757 * critical section to ensure we don't migrate while doing 758 * the check. Note that if this is not the first lock, we 759 * are already in a critical section and are safe for the 760 * rest of the check. 761 */ 762 if (PCPU_GET(spinlocks) == NULL) 763 return; 764 lock_list = PCPU_PTR(spinlocks); 765 } 766 767 /* 768 * Check to see if we are recursing on a lock we already own. If 769 * so, make sure that we don't mismatch exclusive and shared lock 770 * acquires. 771 */ 772 lock1 = find_instance(*lock_list, lock); 773 if (lock1 != NULL) { 774 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 775 (flags & LOP_EXCLUSIVE) == 0) { 776 printf("shared lock of (%s) %s @ %s:%d\n", 777 class->lc_name, lock->lo_name, file, line); 778 printf("while exclusively locked from %s:%d\n", 779 lock1->li_file, lock1->li_line); 780 panic("share->excl"); 781 } 782 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 783 (flags & LOP_EXCLUSIVE) != 0) { 784 printf("exclusive lock of (%s) %s @ %s:%d\n", 785 class->lc_name, lock->lo_name, file, line); 786 printf("while share locked from %s:%d\n", 787 lock1->li_file, lock1->li_line); 788 panic("excl->share"); 789 } 790 return; 791 } 792 793 /* 794 * Try locks do not block if they fail to acquire the lock, thus 795 * there is no danger of deadlocks or of switching while holding a 796 * spin lock if we acquire a lock via a try operation. 797 */ 798 if (flags & LOP_TRYLOCK) 799 return; 800 801 /* 802 * Check for duplicate locks of the same type. Note that we only 803 * have to check for this on the last lock we just acquired. Any 804 * other cases will be caught as lock order violations. 805 */ 806 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 807 w1 = lock1->li_lock->lo_witness; 808 if (w1 == w) { 809 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) || 810 (flags & LOP_DUPOK)) 811 return; 812 w->w_same_squawked = 1; 813 printf("acquiring duplicate lock of same type: \"%s\"\n", 814 lock->lo_type); 815 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 816 lock1->li_file, lock1->li_line); 817 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 818#ifdef KDB 819 goto debugger; 820#else 821 return; 822#endif 823 } 824 MPASS(!mtx_owned(&w_mtx)); 825 mtx_lock_spin(&w_mtx); 826 /* 827 * If we know that the the lock we are acquiring comes after 828 * the lock we most recently acquired in the lock order tree, 829 * then there is no need for any further checks. 830 */ 831 if (isitmychild(w1, w)) { 832 mtx_unlock_spin(&w_mtx); 833 return; 834 } 835 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 836 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 837 838 MPASS(j < WITNESS_COUNT); 839 lock1 = &lle->ll_children[i]; 840 w1 = lock1->li_lock->lo_witness; 841 842 /* 843 * If this lock doesn't undergo witness checking, 844 * then skip it. 845 */ 846 if (w1 == NULL) { 847 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 848 ("lock missing witness structure")); 849 continue; 850 } 851 /* 852 * If we are locking Giant and this is a sleepable 853 * lock, then skip it. 854 */ 855 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 856 lock == &Giant.mtx_object) 857 continue; 858 /* 859 * If we are locking a sleepable lock and this lock 860 * is Giant, then skip it. 861 */ 862 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 863 lock1->li_lock == &Giant.mtx_object) 864 continue; 865 /* 866 * If we are locking a sleepable lock and this lock 867 * isn't sleepable, we want to treat it as a lock 868 * order violation to enfore a general lock order of 869 * sleepable locks before non-sleepable locks. 870 */ 871 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 && 872 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 873 /* 874 * Check the lock order hierarchy for a reveresal. 875 */ 876 if (!isitmydescendant(w, w1)) 877 continue; 878 /* 879 * We have a lock order violation, check to see if it 880 * is allowed or has already been yelled about. 881 */ 882 mtx_unlock_spin(&w_mtx); 883#ifdef BLESSING 884 /* 885 * If the lock order is blessed, just bail. We don't 886 * look for other lock order violations though, which 887 * may be a bug. 888 */ 889 if (blessed(w, w1)) 890 return; 891#endif 892 if (lock1->li_lock == &Giant.mtx_object) { 893 if (w1->w_Giant_squawked) 894 return; 895 else 896 w1->w_Giant_squawked = 1; 897 } else { 898 if (w1->w_other_squawked) 899 return; 900 else 901 w1->w_other_squawked = 1; 902 } 903 /* 904 * Ok, yell about it. 905 */ 906 printf("lock order reversal\n"); 907 /* 908 * Try to locate an earlier lock with 909 * witness w in our list. 910 */ 911 do { 912 lock2 = &lle->ll_children[i]; 913 MPASS(lock2->li_lock != NULL); 914 if (lock2->li_lock->lo_witness == w) 915 break; 916 if (i == 0 && lle->ll_next != NULL) { 917 lle = lle->ll_next; 918 i = lle->ll_count - 1; 919 MPASS(i >= 0 && i < LOCK_NCHILDREN); 920 } else 921 i--; 922 } while (i >= 0); 923 if (i < 0) { 924 printf(" 1st %p %s (%s) @ %s:%d\n", 925 lock1->li_lock, lock1->li_lock->lo_name, 926 lock1->li_lock->lo_type, lock1->li_file, 927 lock1->li_line); 928 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 929 lock->lo_name, lock->lo_type, file, line); 930 } else { 931 printf(" 1st %p %s (%s) @ %s:%d\n", 932 lock2->li_lock, lock2->li_lock->lo_name, 933 lock2->li_lock->lo_type, lock2->li_file, 934 lock2->li_line); 935 printf(" 2nd %p %s (%s) @ %s:%d\n", 936 lock1->li_lock, lock1->li_lock->lo_name, 937 lock1->li_lock->lo_type, lock1->li_file, 938 lock1->li_line); 939 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 940 lock->lo_name, lock->lo_type, file, line); 941 } 942#ifdef KDB 943 goto debugger; 944#else 945 return; 946#endif 947 } 948 } 949 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 950 /* 951 * If requested, build a new lock order. However, don't build a new 952 * relationship between a sleepable lock and Giant if it is in the 953 * wrong direction. The correct lock order is that sleepable locks 954 * always come before Giant. 955 */ 956 if (flags & LOP_NEWORDER && 957 !(lock1->li_lock == &Giant.mtx_object && 958 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 959 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 960 lock->lo_type, lock1->li_lock->lo_type); 961 if (!itismychild(lock1->li_lock->lo_witness, w)) 962 /* Witness is dead. */ 963 return; 964 } 965 mtx_unlock_spin(&w_mtx); 966 return; 967 968#ifdef KDB 969debugger: 970 if (witness_trace) 971 kdb_backtrace(); 972 if (witness_kdb) 973 kdb_enter(__func__); 974#endif 975} 976 977void 978witness_lock(struct lock_object *lock, int flags, const char *file, int line) 979{ 980 struct lock_list_entry **lock_list, *lle; 981 struct lock_instance *instance; 982 struct witness *w; 983 struct thread *td; 984 985 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 986 panicstr != NULL) 987 return; 988 w = lock->lo_witness; 989 td = curthread; 990 file = fixup_filename(file); 991 992 /* Determine lock list for this lock. */ 993 if (lock->lo_class->lc_flags & LC_SLEEPLOCK) 994 lock_list = &td->td_sleeplocks; 995 else 996 lock_list = PCPU_PTR(spinlocks); 997 998 /* Check to see if we are recursing on a lock we already own. */ 999 instance = find_instance(*lock_list, lock); 1000 if (instance != NULL) { 1001 instance->li_flags++; 1002 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1003 td->td_proc->p_pid, lock->lo_name, 1004 instance->li_flags & LI_RECURSEMASK); 1005 instance->li_file = file; 1006 instance->li_line = line; 1007 return; 1008 } 1009 1010 /* Update per-witness last file and line acquire. */ 1011 w->w_file = file; 1012 w->w_line = line; 1013 1014 /* Find the next open lock instance in the list and fill it. */ 1015 lle = *lock_list; 1016 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1017 lle = witness_lock_list_get(); 1018 if (lle == NULL) 1019 return; 1020 lle->ll_next = *lock_list; 1021 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1022 td->td_proc->p_pid, lle); 1023 *lock_list = lle; 1024 } 1025 instance = &lle->ll_children[lle->ll_count++]; 1026 instance->li_lock = lock; 1027 instance->li_line = line; 1028 instance->li_file = file; 1029 if ((flags & LOP_EXCLUSIVE) != 0) 1030 instance->li_flags = LI_EXCLUSIVE; 1031 else 1032 instance->li_flags = 0; 1033 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1034 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1035} 1036 1037void 1038witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1039{ 1040 struct lock_instance *instance; 1041 struct lock_class *class; 1042 1043 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1044 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1045 return; 1046 class = lock->lo_class; 1047 file = fixup_filename(file); 1048 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1049 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1050 class->lc_name, lock->lo_name, file, line); 1051 if ((flags & LOP_TRYLOCK) == 0) 1052 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 1053 lock->lo_name, file, line); 1054 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1055 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1056 class->lc_name, lock->lo_name, file, line); 1057 instance = find_instance(curthread->td_sleeplocks, lock); 1058 if (instance == NULL) 1059 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1060 class->lc_name, lock->lo_name, file, line); 1061 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1062 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1063 class->lc_name, lock->lo_name, file, line); 1064 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1065 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1066 class->lc_name, lock->lo_name, 1067 instance->li_flags & LI_RECURSEMASK, file, line); 1068 instance->li_flags |= LI_EXCLUSIVE; 1069} 1070 1071void 1072witness_downgrade(struct lock_object *lock, int flags, const char *file, 1073 int line) 1074{ 1075 struct lock_instance *instance; 1076 struct lock_class *class; 1077 1078 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1079 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1080 return; 1081 class = lock->lo_class; 1082 file = fixup_filename(file); 1083 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1084 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1085 class->lc_name, lock->lo_name, file, line); 1086 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1087 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1088 class->lc_name, lock->lo_name, file, line); 1089 instance = find_instance(curthread->td_sleeplocks, lock); 1090 if (instance == NULL) 1091 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1092 class->lc_name, lock->lo_name, file, line); 1093 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1094 panic("downgrade of shared lock (%s) %s @ %s:%d", 1095 class->lc_name, lock->lo_name, file, line); 1096 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1097 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1098 class->lc_name, lock->lo_name, 1099 instance->li_flags & LI_RECURSEMASK, file, line); 1100 instance->li_flags &= ~LI_EXCLUSIVE; 1101} 1102 1103void 1104witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1105{ 1106 struct lock_list_entry **lock_list, *lle; 1107 struct lock_instance *instance; 1108 struct lock_class *class; 1109 struct thread *td; 1110 register_t s; 1111 int i, j; 1112 1113 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1114 panicstr != NULL) 1115 return; 1116 td = curthread; 1117 class = lock->lo_class; 1118 file = fixup_filename(file); 1119 1120 /* Find lock instance associated with this lock. */ 1121 if (class->lc_flags & LC_SLEEPLOCK) 1122 lock_list = &td->td_sleeplocks; 1123 else 1124 lock_list = PCPU_PTR(spinlocks); 1125 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1126 for (i = 0; i < (*lock_list)->ll_count; i++) { 1127 instance = &(*lock_list)->ll_children[i]; 1128 if (instance->li_lock == lock) 1129 goto found; 1130 } 1131 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1132 file, line); 1133found: 1134 1135 /* First, check for shared/exclusive mismatches. */ 1136 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1137 (flags & LOP_EXCLUSIVE) == 0) { 1138 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1139 lock->lo_name, file, line); 1140 printf("while exclusively locked from %s:%d\n", 1141 instance->li_file, instance->li_line); 1142 panic("excl->ushare"); 1143 } 1144 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1145 (flags & LOP_EXCLUSIVE) != 0) { 1146 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1147 lock->lo_name, file, line); 1148 printf("while share locked from %s:%d\n", instance->li_file, 1149 instance->li_line); 1150 panic("share->uexcl"); 1151 } 1152 1153 /* If we are recursed, unrecurse. */ 1154 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1155 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1156 td->td_proc->p_pid, instance->li_lock->lo_name, 1157 instance->li_flags); 1158 instance->li_flags--; 1159 return; 1160 } 1161 1162 /* Otherwise, remove this item from the list. */ 1163 s = intr_disable(); 1164 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1165 td->td_proc->p_pid, instance->li_lock->lo_name, 1166 (*lock_list)->ll_count - 1); 1167 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1168 (*lock_list)->ll_children[j] = 1169 (*lock_list)->ll_children[j + 1]; 1170 (*lock_list)->ll_count--; 1171 intr_restore(s); 1172 1173 /* If this lock list entry is now empty, free it. */ 1174 if ((*lock_list)->ll_count == 0) { 1175 lle = *lock_list; 1176 *lock_list = lle->ll_next; 1177 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1178 td->td_proc->p_pid, lle); 1179 witness_lock_list_free(lle); 1180 } 1181} 1182 1183/* 1184 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1185 * exempt Giant and sleepable locks from the checks as well. If any 1186 * non-exempt locks are held, then a supplied message is printed to the 1187 * console along with a list of the offending locks. If indicated in the 1188 * flags then a failure results in a panic as well. 1189 */ 1190int 1191witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1192{ 1193 struct lock_list_entry *lle; 1194 struct lock_instance *lock1; 1195 struct thread *td; 1196 va_list ap; 1197 int i, n; 1198 1199 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1200 return (0); 1201 n = 0; 1202 td = curthread; 1203 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1204 for (i = lle->ll_count - 1; i >= 0; i--) { 1205 lock1 = &lle->ll_children[i]; 1206 if (lock1->li_lock == lock) 1207 continue; 1208 if (flags & WARN_GIANTOK && 1209 lock1->li_lock == &Giant.mtx_object) 1210 continue; 1211 if (flags & WARN_SLEEPOK && 1212 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1213 continue; 1214 if (n == 0) { 1215 va_start(ap, fmt); 1216 vprintf(fmt, ap); 1217 va_end(ap); 1218 printf(" with the following"); 1219 if (flags & WARN_SLEEPOK) 1220 printf(" non-sleepable"); 1221 printf(" locks held:\n"); 1222 } 1223 n++; 1224 witness_list_lock(lock1); 1225 } 1226 if (PCPU_GET(spinlocks) != NULL) { 1227 /* 1228 * Since we already hold a spinlock preemption is 1229 * already blocked. 1230 */ 1231 if (n == 0) { 1232 va_start(ap, fmt); 1233 vprintf(fmt, ap); 1234 va_end(ap); 1235 printf(" with the following"); 1236 if (flags & WARN_SLEEPOK) 1237 printf(" non-sleepable"); 1238 printf(" locks held:\n"); 1239 } 1240 n += witness_list_locks(PCPU_PTR(spinlocks)); 1241 } 1242 if (flags & WARN_PANIC && n) 1243 panic("witness_warn"); 1244#ifdef KDB 1245 else if (witness_kdb && n) 1246 kdb_enter(__func__); 1247 else if (witness_trace && n) 1248 kdb_backtrace(); 1249#endif 1250 return (n); 1251} 1252 1253const char * 1254witness_file(struct lock_object *lock) 1255{ 1256 struct witness *w; 1257 1258 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1259 return ("?"); 1260 w = lock->lo_witness; 1261 return (w->w_file); 1262} 1263 1264int 1265witness_line(struct lock_object *lock) 1266{ 1267 struct witness *w; 1268 1269 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1270 return (0); 1271 w = lock->lo_witness; 1272 return (w->w_line); 1273} 1274 1275static struct witness * 1276enroll(const char *description, struct lock_class *lock_class) 1277{ 1278 struct witness *w; 1279 1280 if (witness_watch == 0 || panicstr != NULL) 1281 return (NULL); 1282 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 1283 return (NULL); 1284 mtx_lock_spin(&w_mtx); 1285 STAILQ_FOREACH(w, &w_all, w_list) { 1286 if (w->w_name == description || (w->w_refcount > 0 && 1287 strcmp(description, w->w_name) == 0)) { 1288 w->w_refcount++; 1289 mtx_unlock_spin(&w_mtx); 1290 if (lock_class != w->w_class) 1291 panic( 1292 "lock (%s) %s does not match earlier (%s) lock", 1293 description, lock_class->lc_name, 1294 w->w_class->lc_name); 1295 return (w); 1296 } 1297 } 1298 /* 1299 * This isn't quite right, as witness_cold is still 0 while we 1300 * enroll all the locks initialized before witness_initialize(). 1301 */ 1302 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) { 1303 mtx_unlock_spin(&w_mtx); 1304 panic("spin lock %s not in order list", description); 1305 } 1306 if ((w = witness_get()) == NULL) 1307 return (NULL); 1308 w->w_name = description; 1309 w->w_class = lock_class; 1310 w->w_refcount = 1; 1311 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1312 if (lock_class->lc_flags & LC_SPINLOCK) { 1313 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1314 w_spin_cnt++; 1315 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1316 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1317 w_sleep_cnt++; 1318 } else { 1319 mtx_unlock_spin(&w_mtx); 1320 panic("lock class %s is not sleep or spin", 1321 lock_class->lc_name); 1322 } 1323 mtx_unlock_spin(&w_mtx); 1324 return (w); 1325} 1326 1327/* Don't let the door bang you on the way out... */ 1328static int 1329depart(struct witness *w) 1330{ 1331 struct witness_child_list_entry *wcl, *nwcl; 1332 struct witness_list *list; 1333 struct witness *parent; 1334 1335 MPASS(w->w_refcount == 0); 1336 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1337 list = &w_sleep; 1338 w_sleep_cnt--; 1339 } else { 1340 list = &w_spin; 1341 w_spin_cnt--; 1342 } 1343 /* 1344 * First, we run through the entire tree looking for any 1345 * witnesses that the outgoing witness is a child of. For 1346 * each parent that we find, we reparent all the direct 1347 * children of the outgoing witness to its parent. 1348 */ 1349 STAILQ_FOREACH(parent, list, w_typelist) { 1350 if (!isitmychild(parent, w)) 1351 continue; 1352 removechild(parent, w); 1353 } 1354 1355 /* 1356 * Now we go through and free up the child list of the 1357 * outgoing witness. 1358 */ 1359 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) { 1360 nwcl = wcl->wcl_next; 1361 w_child_cnt--; 1362 witness_child_free(wcl); 1363 } 1364 1365 /* 1366 * Detach from various lists and free. 1367 */ 1368 STAILQ_REMOVE(list, w, witness, w_typelist); 1369 STAILQ_REMOVE(&w_all, w, witness, w_list); 1370 witness_free(w); 1371 1372 return (1); 1373} 1374 1375/* 1376 * Add "child" as a direct child of "parent". Returns false if 1377 * we fail due to out of memory. 1378 */ 1379static int 1380insertchild(struct witness *parent, struct witness *child) 1381{ 1382 struct witness_child_list_entry **wcl; 1383 1384 MPASS(child != NULL && parent != NULL); 1385 1386 /* 1387 * Insert "child" after "parent" 1388 */ 1389 wcl = &parent->w_children; 1390 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1391 wcl = &(*wcl)->wcl_next; 1392 if (*wcl == NULL) { 1393 *wcl = witness_child_get(); 1394 if (*wcl == NULL) 1395 return (0); 1396 w_child_cnt++; 1397 } 1398 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1399 1400 return (1); 1401} 1402 1403 1404static int 1405itismychild(struct witness *parent, struct witness *child) 1406{ 1407 struct witness_list *list; 1408 1409 MPASS(child != NULL && parent != NULL); 1410 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1411 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1412 panic( 1413 "%s: parent (%s) and child (%s) are not the same lock type", 1414 __func__, parent->w_class->lc_name, 1415 child->w_class->lc_name); 1416 1417 if (!insertchild(parent, child)) 1418 return (0); 1419 1420 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1421 list = &w_sleep; 1422 else 1423 list = &w_spin; 1424 return (1); 1425} 1426 1427static void 1428removechild(struct witness *parent, struct witness *child) 1429{ 1430 struct witness_child_list_entry **wcl, *wcl1; 1431 int i; 1432 1433 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1434 for (i = 0; i < (*wcl)->wcl_count; i++) 1435 if ((*wcl)->wcl_children[i] == child) 1436 goto found; 1437 return; 1438found: 1439 (*wcl)->wcl_count--; 1440 if ((*wcl)->wcl_count > i) 1441 (*wcl)->wcl_children[i] = 1442 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1443 MPASS((*wcl)->wcl_children[i] != NULL); 1444 if ((*wcl)->wcl_count != 0) 1445 return; 1446 wcl1 = *wcl; 1447 *wcl = wcl1->wcl_next; 1448 w_child_cnt--; 1449 witness_child_free(wcl1); 1450} 1451 1452static int 1453isitmychild(struct witness *parent, struct witness *child) 1454{ 1455 struct witness_child_list_entry *wcl; 1456 int i; 1457 1458 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1459 for (i = 0; i < wcl->wcl_count; i++) { 1460 if (wcl->wcl_children[i] == child) 1461 return (1); 1462 } 1463 } 1464 return (0); 1465} 1466 1467static int 1468isitmydescendant(struct witness *parent, struct witness *child) 1469{ 1470 struct witness_child_list_entry *wcl; 1471 int i, j; 1472 1473 if (isitmychild(parent, child)) 1474 return (1); 1475 j = 0; 1476 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1477 MPASS(j < 1000); 1478 for (i = 0; i < wcl->wcl_count; i++) { 1479 if (isitmydescendant(wcl->wcl_children[i], child)) 1480 return (1); 1481 } 1482 j++; 1483 } 1484 return (0); 1485} 1486 1487static void 1488witness_levelall (void) 1489{ 1490 struct witness_list *list; 1491 struct witness *w, *w1; 1492 1493 /* 1494 * First clear all levels. 1495 */ 1496 STAILQ_FOREACH(w, &w_all, w_list) { 1497 w->w_level = 0; 1498 } 1499 1500 /* 1501 * Look for locks with no parent and level all their descendants. 1502 */ 1503 STAILQ_FOREACH(w, &w_all, w_list) { 1504 /* 1505 * This is just an optimization, technically we could get 1506 * away just walking the all list each time. 1507 */ 1508 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1509 list = &w_sleep; 1510 else 1511 list = &w_spin; 1512 STAILQ_FOREACH(w1, list, w_typelist) { 1513 if (isitmychild(w1, w)) 1514 goto skip; 1515 } 1516 witness_leveldescendents(w, 0); 1517 skip: 1518 ; /* silence GCC 3.x */ 1519 } 1520} 1521 1522static void 1523witness_leveldescendents(struct witness *parent, int level) 1524{ 1525 struct witness_child_list_entry *wcl; 1526 int i; 1527 1528 if (parent->w_level < level) 1529 parent->w_level = level; 1530 level++; 1531 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1532 for (i = 0; i < wcl->wcl_count; i++) 1533 witness_leveldescendents(wcl->wcl_children[i], level); 1534} 1535 1536static void 1537witness_displaydescendants(void(*prnt)(const char *fmt, ...), 1538 struct witness *parent, int indent) 1539{ 1540 struct witness_child_list_entry *wcl; 1541 int i, level; 1542 1543 level = parent->w_level; 1544 prnt("%-2d", level); 1545 for (i = 0; i < indent; i++) 1546 prnt(" "); 1547 if (parent->w_refcount > 0) 1548 prnt("%s", parent->w_name); 1549 else 1550 prnt("(dead)"); 1551 if (parent->w_displayed) { 1552 prnt(" -- (already displayed)\n"); 1553 return; 1554 } 1555 parent->w_displayed = 1; 1556 if (parent->w_refcount > 0) { 1557 if (parent->w_file != NULL) 1558 prnt(" -- last acquired @ %s:%d", parent->w_file, 1559 parent->w_line); 1560 } 1561 prnt("\n"); 1562 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1563 for (i = 0; i < wcl->wcl_count; i++) 1564 witness_displaydescendants(prnt, 1565 wcl->wcl_children[i], indent + 1); 1566} 1567 1568#ifdef BLESSING 1569static int 1570blessed(struct witness *w1, struct witness *w2) 1571{ 1572 int i; 1573 struct witness_blessed *b; 1574 1575 for (i = 0; i < blessed_count; i++) { 1576 b = &blessed_list[i]; 1577 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1578 if (strcmp(w2->w_name, b->b_lock2) == 0) 1579 return (1); 1580 continue; 1581 } 1582 if (strcmp(w1->w_name, b->b_lock2) == 0) 1583 if (strcmp(w2->w_name, b->b_lock1) == 0) 1584 return (1); 1585 } 1586 return (0); 1587} 1588#endif 1589 1590static struct witness * 1591witness_get(void) 1592{ 1593 struct witness *w; 1594 1595 if (witness_watch == 0) { 1596 mtx_unlock_spin(&w_mtx); 1597 return (NULL); 1598 } 1599 if (STAILQ_EMPTY(&w_free)) { 1600 witness_watch = 0; 1601 mtx_unlock_spin(&w_mtx); 1602 printf("%s: witness exhausted\n", __func__); 1603 return (NULL); 1604 } 1605 w = STAILQ_FIRST(&w_free); 1606 STAILQ_REMOVE_HEAD(&w_free, w_list); 1607 w_free_cnt--; 1608 bzero(w, sizeof(*w)); 1609 return (w); 1610} 1611 1612static void 1613witness_free(struct witness *w) 1614{ 1615 1616 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1617 w_free_cnt++; 1618} 1619 1620static struct witness_child_list_entry * 1621witness_child_get(void) 1622{ 1623 struct witness_child_list_entry *wcl; 1624 1625 if (witness_watch == 0) { 1626 mtx_unlock_spin(&w_mtx); 1627 return (NULL); 1628 } 1629 wcl = w_child_free; 1630 if (wcl == NULL) { 1631 witness_watch = 0; 1632 mtx_unlock_spin(&w_mtx); 1633 printf("%s: witness exhausted\n", __func__); 1634 return (NULL); 1635 } 1636 w_child_free = wcl->wcl_next; 1637 w_child_free_cnt--; 1638 bzero(wcl, sizeof(*wcl)); 1639 return (wcl); 1640} 1641 1642static void 1643witness_child_free(struct witness_child_list_entry *wcl) 1644{ 1645 1646 wcl->wcl_next = w_child_free; 1647 w_child_free = wcl; 1648 w_child_free_cnt++; 1649} 1650 1651static struct lock_list_entry * 1652witness_lock_list_get(void) 1653{ 1654 struct lock_list_entry *lle; 1655 1656 if (witness_watch == 0) 1657 return (NULL); 1658 mtx_lock_spin(&w_mtx); 1659 lle = w_lock_list_free; 1660 if (lle == NULL) { 1661 witness_watch = 0; 1662 mtx_unlock_spin(&w_mtx); 1663 printf("%s: witness exhausted\n", __func__); 1664 return (NULL); 1665 } 1666 w_lock_list_free = lle->ll_next; 1667 mtx_unlock_spin(&w_mtx); 1668 bzero(lle, sizeof(*lle)); 1669 return (lle); 1670} 1671 1672static void 1673witness_lock_list_free(struct lock_list_entry *lle) 1674{ 1675 1676 mtx_lock_spin(&w_mtx); 1677 lle->ll_next = w_lock_list_free; 1678 w_lock_list_free = lle; 1679 mtx_unlock_spin(&w_mtx); 1680} 1681 1682static struct lock_instance * 1683find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1684{ 1685 struct lock_list_entry *lle; 1686 struct lock_instance *instance; 1687 int i; 1688 1689 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1690 for (i = lle->ll_count - 1; i >= 0; i--) { 1691 instance = &lle->ll_children[i]; 1692 if (instance->li_lock == lock) 1693 return (instance); 1694 } 1695 return (NULL); 1696} 1697 1698static void 1699witness_list_lock(struct lock_instance *instance) 1700{ 1701 struct lock_object *lock; 1702 1703 lock = instance->li_lock; 1704 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1705 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name); 1706 if (lock->lo_type != lock->lo_name) 1707 printf(" (%s)", lock->lo_type); 1708 printf(" r = %d (%p) locked @ %s:%d\n", 1709 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1710 instance->li_line); 1711} 1712 1713#ifdef DDB 1714static int 1715witness_thread_has_locks(struct thread *td) 1716{ 1717 1718 return (td->td_sleeplocks != NULL); 1719} 1720 1721static int 1722witness_proc_has_locks(struct proc *p) 1723{ 1724 struct thread *td; 1725 1726 FOREACH_THREAD_IN_PROC(p, td) { 1727 if (witness_thread_has_locks(td)) 1728 return (1); 1729 } 1730 return (0); 1731} 1732#endif 1733 1734int 1735witness_list_locks(struct lock_list_entry **lock_list) 1736{ 1737 struct lock_list_entry *lle; 1738 int i, nheld; 1739 1740 nheld = 0; 1741 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1742 for (i = lle->ll_count - 1; i >= 0; i--) { 1743 witness_list_lock(&lle->ll_children[i]); 1744 nheld++; 1745 } 1746 return (nheld); 1747} 1748 1749/* 1750 * This is a bit risky at best. We call this function when we have timed 1751 * out acquiring a spin lock, and we assume that the other CPU is stuck 1752 * with this lock held. So, we go groveling around in the other CPU's 1753 * per-cpu data to try to find the lock instance for this spin lock to 1754 * see when it was last acquired. 1755 */ 1756void 1757witness_display_spinlock(struct lock_object *lock, struct thread *owner) 1758{ 1759 struct lock_instance *instance; 1760 struct pcpu *pc; 1761 1762 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 1763 return; 1764 pc = pcpu_find(owner->td_oncpu); 1765 instance = find_instance(pc->pc_spinlocks, lock); 1766 if (instance != NULL) 1767 witness_list_lock(instance); 1768} 1769 1770void 1771witness_save(struct lock_object *lock, const char **filep, int *linep) 1772{ 1773 struct lock_instance *instance; 1774 1775 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1776 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1777 return; 1778 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1779 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1780 lock->lo_class->lc_name, lock->lo_name); 1781 instance = find_instance(curthread->td_sleeplocks, lock); 1782 if (instance == NULL) 1783 panic("%s: lock (%s) %s not locked", __func__, 1784 lock->lo_class->lc_name, lock->lo_name); 1785 *filep = instance->li_file; 1786 *linep = instance->li_line; 1787} 1788 1789void 1790witness_restore(struct lock_object *lock, const char *file, int line) 1791{ 1792 struct lock_instance *instance; 1793 1794 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1795 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1796 return; 1797 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1798 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1799 lock->lo_class->lc_name, lock->lo_name); 1800 instance = find_instance(curthread->td_sleeplocks, lock); 1801 if (instance == NULL) 1802 panic("%s: lock (%s) %s not locked", __func__, 1803 lock->lo_class->lc_name, lock->lo_name); 1804 lock->lo_witness->w_file = file; 1805 lock->lo_witness->w_line = line; 1806 instance->li_file = file; 1807 instance->li_line = line; 1808} 1809 1810void 1811witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1812{ 1813#ifdef INVARIANT_SUPPORT 1814 struct lock_instance *instance; 1815 1816 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1817 return; 1818 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0) 1819 instance = find_instance(curthread->td_sleeplocks, lock); 1820 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0) 1821 instance = find_instance(PCPU_GET(spinlocks), lock); 1822 else { 1823 panic("Lock (%s) %s is not sleep or spin!", 1824 lock->lo_class->lc_name, lock->lo_name); 1825 } 1826 file = fixup_filename(file); 1827 switch (flags) { 1828 case LA_UNLOCKED: 1829 if (instance != NULL) 1830 panic("Lock (%s) %s locked @ %s:%d.", 1831 lock->lo_class->lc_name, lock->lo_name, file, line); 1832 break; 1833 case LA_LOCKED: 1834 case LA_LOCKED | LA_RECURSED: 1835 case LA_LOCKED | LA_NOTRECURSED: 1836 case LA_SLOCKED: 1837 case LA_SLOCKED | LA_RECURSED: 1838 case LA_SLOCKED | LA_NOTRECURSED: 1839 case LA_XLOCKED: 1840 case LA_XLOCKED | LA_RECURSED: 1841 case LA_XLOCKED | LA_NOTRECURSED: 1842 if (instance == NULL) { 1843 panic("Lock (%s) %s not locked @ %s:%d.", 1844 lock->lo_class->lc_name, lock->lo_name, file, line); 1845 break; 1846 } 1847 if ((flags & LA_XLOCKED) != 0 && 1848 (instance->li_flags & LI_EXCLUSIVE) == 0) 1849 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1850 lock->lo_class->lc_name, lock->lo_name, file, line); 1851 if ((flags & LA_SLOCKED) != 0 && 1852 (instance->li_flags & LI_EXCLUSIVE) != 0) 1853 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1854 lock->lo_class->lc_name, lock->lo_name, file, line); 1855 if ((flags & LA_RECURSED) != 0 && 1856 (instance->li_flags & LI_RECURSEMASK) == 0) 1857 panic("Lock (%s) %s not recursed @ %s:%d.", 1858 lock->lo_class->lc_name, lock->lo_name, file, line); 1859 if ((flags & LA_NOTRECURSED) != 0 && 1860 (instance->li_flags & LI_RECURSEMASK) != 0) 1861 panic("Lock (%s) %s recursed @ %s:%d.", 1862 lock->lo_class->lc_name, lock->lo_name, file, line); 1863 break; 1864 default: 1865 panic("Invalid lock assertion at %s:%d.", file, line); 1866 1867 } 1868#endif /* INVARIANT_SUPPORT */ 1869} 1870 1871#ifdef DDB 1872static void 1873witness_list(struct thread *td) 1874{ 1875 1876 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1877 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 1878 1879 if (witness_watch == 0) 1880 return; 1881 1882 witness_list_locks(&td->td_sleeplocks); 1883 1884 /* 1885 * We only handle spinlocks if td == curthread. This is somewhat broken 1886 * if td is currently executing on some other CPU and holds spin locks 1887 * as we won't display those locks. If we had a MI way of getting 1888 * the per-cpu data for a given cpu then we could use 1889 * td->td_oncpu to get the list of spinlocks for this thread 1890 * and "fix" this. 1891 * 1892 * That still wouldn't really fix this unless we locked sched_lock 1893 * or stopped the other CPU to make sure it wasn't changing the list 1894 * out from under us. It is probably best to just not try to handle 1895 * threads on other CPU's for now. 1896 */ 1897 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1898 witness_list_locks(PCPU_PTR(spinlocks)); 1899} 1900 1901DB_SHOW_COMMAND(locks, db_witness_list) 1902{ 1903 struct thread *td; 1904 pid_t pid; 1905 struct proc *p; 1906 1907 if (have_addr) { 1908 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1909 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1910 ((addr >> 16) % 16) * 10000; 1911 /* sx_slock(&allproc_lock); */ 1912 FOREACH_PROC_IN_SYSTEM(p) { 1913 if (p->p_pid == pid) 1914 break; 1915 } 1916 /* sx_sunlock(&allproc_lock); */ 1917 if (p == NULL) { 1918 db_printf("pid %d not found\n", pid); 1919 return; 1920 } 1921 FOREACH_THREAD_IN_PROC(p, td) { 1922 witness_list(td); 1923 } 1924 } else { 1925 td = curthread; 1926 witness_list(td); 1927 } 1928} 1929 1930DB_SHOW_COMMAND(alllocks, db_witness_list_all) 1931{ 1932 struct thread *td; 1933 struct proc *p; 1934 1935 /* 1936 * It would be nice to list only threads and processes that actually 1937 * held sleep locks, but that information is currently not exported 1938 * by WITNESS. 1939 */ 1940 FOREACH_PROC_IN_SYSTEM(p) { 1941 if (!witness_proc_has_locks(p)) 1942 continue; 1943 FOREACH_THREAD_IN_PROC(p, td) { 1944 if (!witness_thread_has_locks(td)) 1945 continue; 1946 printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 1947 p->p_comm, td, td->td_tid); 1948 witness_list(td); 1949 } 1950 } 1951} 1952 1953DB_SHOW_COMMAND(witness, db_witness_display) 1954{ 1955 1956 witness_display(db_printf); 1957} 1958#endif 1959