subr_witness.c revision 144832
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of the `witness' lock verifier. Originally implemented for 34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 35 * classes in FreeBSD. 36 */ 37 38/* 39 * Main Entry: witness 40 * Pronunciation: 'wit-n&s 41 * Function: noun 42 * Etymology: Middle English witnesse, from Old English witnes knowledge, 43 * testimony, witness, from 2wit 44 * Date: before 12th century 45 * 1 : attestation of a fact or event : TESTIMONY 46 * 2 : one that gives evidence; specifically : one who testifies in 47 * a cause or before a judicial tribunal 48 * 3 : one asked to be present at a transaction so as to be able to 49 * testify to its having taken place 50 * 4 : one who has personal knowledge of something 51 * 5 a : something serving as evidence or proof : SIGN 52 * b : public affirmation by word or example of usually 53 * religious faith or conviction <the heroic witness to divine 54 * life -- Pilot> 55 * 6 capitalized : a member of the Jehovah's Witnesses 56 */ 57 58/* 59 * Special rules concerning Giant and lock orders: 60 * 61 * 1) Giant must be acquired before any other mutexes. Stated another way, 62 * no other mutex may be held when Giant is acquired. 63 * 64 * 2) Giant must be released when blocking on a sleepable lock. 65 * 66 * This rule is less obvious, but is a result of Giant providing the same 67 * semantics as spl(). Basically, when a thread sleeps, it must release 68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 69 * 2). 70 * 71 * 3) Giant may be acquired before or after sleepable locks. 72 * 73 * This rule is also not quite as obvious. Giant may be acquired after 74 * a sleepable lock because it is a non-sleepable lock and non-sleepable 75 * locks may always be acquired while holding a sleepable lock. The second 76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 80 * execute. Thus, acquiring Giant both before and after a sleepable lock 81 * will not result in a lock order reversal. 82 */ 83 84#include <sys/cdefs.h> 85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 144832 2005-04-09 11:31:31Z pjd $"); 86 87#include "opt_ddb.h" 88#include "opt_witness.h" 89 90#include <sys/param.h> 91#include <sys/bus.h> 92#include <sys/kdb.h> 93#include <sys/kernel.h> 94#include <sys/ktr.h> 95#include <sys/lock.h> 96#include <sys/malloc.h> 97#include <sys/mutex.h> 98#include <sys/proc.h> 99#include <sys/sysctl.h> 100#include <sys/systm.h> 101 102#include <ddb/ddb.h> 103 104#include <machine/stdarg.h> 105 106/* Define this to check for blessed mutexes */ 107#undef BLESSING 108 109#define WITNESS_COUNT 1024 110#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 111/* 112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 114 * probably be safe for the most part, but it's still a SWAG. 115 */ 116#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 117 118#define WITNESS_NCHILDREN 6 119 120struct witness_child_list_entry; 121 122struct witness { 123 const char *w_name; 124 struct lock_class *w_class; 125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 127 struct witness_child_list_entry *w_children; /* Great evilness... */ 128 const char *w_file; 129 int w_line; 130 u_int w_level; 131 u_int w_refcount; 132 u_char w_Giant_squawked:1; 133 u_char w_other_squawked:1; 134 u_char w_same_squawked:1; 135 u_char w_displayed:1; 136}; 137 138struct witness_child_list_entry { 139 struct witness_child_list_entry *wcl_next; 140 struct witness *wcl_children[WITNESS_NCHILDREN]; 141 u_int wcl_count; 142}; 143 144STAILQ_HEAD(witness_list, witness); 145 146#ifdef BLESSING 147struct witness_blessed { 148 const char *b_lock1; 149 const char *b_lock2; 150}; 151#endif 152 153struct witness_order_list_entry { 154 const char *w_name; 155 struct lock_class *w_class; 156}; 157 158#ifdef BLESSING 159static int blessed(struct witness *, struct witness *); 160#endif 161static int depart(struct witness *w); 162static struct witness *enroll(const char *description, 163 struct lock_class *lock_class); 164static int insertchild(struct witness *parent, struct witness *child); 165static int isitmychild(struct witness *parent, struct witness *child); 166static int isitmydescendant(struct witness *parent, struct witness *child); 167static int itismychild(struct witness *parent, struct witness *child); 168static int rebalancetree(struct witness_list *list); 169static void removechild(struct witness *parent, struct witness *child); 170static int reparentchildren(struct witness *newparent, 171 struct witness *oldparent); 172static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 173static void witness_displaydescendants(void(*)(const char *fmt, ...), 174 struct witness *, int indent); 175static const char *fixup_filename(const char *file); 176static void witness_leveldescendents(struct witness *parent, int level); 177static void witness_levelall(void); 178static struct witness *witness_get(void); 179static void witness_free(struct witness *m); 180static struct witness_child_list_entry *witness_child_get(void); 181static void witness_child_free(struct witness_child_list_entry *wcl); 182static struct lock_list_entry *witness_lock_list_get(void); 183static void witness_lock_list_free(struct lock_list_entry *lle); 184static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 185 struct lock_object *lock); 186static void witness_list_lock(struct lock_instance *instance); 187#ifdef DDB 188static void witness_list(struct thread *td); 189static void witness_display_list(void(*prnt)(const char *fmt, ...), 190 struct witness_list *list); 191static void witness_display(void(*)(const char *fmt, ...)); 192#endif 193 194SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 195 196/* 197 * If set to 0, witness is disabled. If set to 1, witness performs full lock 198 * order checking for all locks. If set to 2 or higher, then witness skips 199 * the full lock order check if the lock being acquired is at a higher level 200 * (i.e. farther down in the tree) than the current lock. This last mode is 201 * somewhat experimental and not considered fully safe. At runtime, this 202 * value may be set to 0 to turn off witness. witness is not allowed be 203 * turned on once it is turned off, however. 204 */ 205static int witness_watch = 1; 206TUNABLE_INT("debug.witness.watch", &witness_watch); 207SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 208 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 209 210#ifdef KDB 211/* 212 * When KDB is enabled and witness_kdb is set to 1, it will cause the system 213 * to drop into kdebug() when: 214 * - a lock heirarchy violation occurs 215 * - locks are held when going to sleep. 216 */ 217#ifdef WITNESS_KDB 218int witness_kdb = 1; 219#else 220int witness_kdb = 0; 221#endif 222TUNABLE_INT("debug.witness.kdb", &witness_kdb); 223SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 224 225/* 226 * When KDB is enabled and witness_trace is set to 1, it will cause the system 227 * to print a stack trace: 228 * - a lock heirarchy violation occurs 229 * - locks are held when going to sleep. 230 */ 231int witness_trace = 1; 232TUNABLE_INT("debug.witness.trace", &witness_trace); 233SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 234#endif /* KDB */ 235 236#ifdef WITNESS_SKIPSPIN 237int witness_skipspin = 1; 238#else 239int witness_skipspin = 0; 240#endif 241TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 242SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, 243 &witness_skipspin, 0, ""); 244 245static struct mtx w_mtx; 246static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 247static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 248static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 249static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 250static struct witness_child_list_entry *w_child_free = NULL; 251static struct lock_list_entry *w_lock_list_free = NULL; 252 253static struct witness w_data[WITNESS_COUNT]; 254static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 255static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 256 257static struct witness_order_list_entry order_lists[] = { 258 { "proctree", &lock_class_sx }, 259 { "allproc", &lock_class_sx }, 260 { "Giant", &lock_class_mtx_sleep }, 261 { "filedesc structure", &lock_class_mtx_sleep }, 262 { "pipe mutex", &lock_class_mtx_sleep }, 263 { "sigio lock", &lock_class_mtx_sleep }, 264 { "process group", &lock_class_mtx_sleep }, 265 { "process lock", &lock_class_mtx_sleep }, 266 { "session", &lock_class_mtx_sleep }, 267 { "uidinfo hash", &lock_class_mtx_sleep }, 268 { "uidinfo struct", &lock_class_mtx_sleep }, 269 { "allprison", &lock_class_mtx_sleep }, 270 { NULL, NULL }, 271 /* 272 * Sockets 273 */ 274 { "filedesc structure", &lock_class_mtx_sleep }, 275 { "accept", &lock_class_mtx_sleep }, 276 { "so_snd", &lock_class_mtx_sleep }, 277 { "so_rcv", &lock_class_mtx_sleep }, 278 { "sellck", &lock_class_mtx_sleep }, 279 { NULL, NULL }, 280 /* 281 * Routing 282 */ 283 { "so_rcv", &lock_class_mtx_sleep }, 284 { "radix node head", &lock_class_mtx_sleep }, 285 { "rtentry", &lock_class_mtx_sleep }, 286 { "ifaddr", &lock_class_mtx_sleep }, 287 { NULL, NULL }, 288 /* 289 * UNIX Domain Sockets 290 */ 291 { "unp", &lock_class_mtx_sleep }, 292 { "so_snd", &lock_class_mtx_sleep }, 293 { NULL, NULL }, 294 /* 295 * UDP/IP 296 */ 297 { "udp", &lock_class_mtx_sleep }, 298 { "udpinp", &lock_class_mtx_sleep }, 299 { "so_snd", &lock_class_mtx_sleep }, 300 { NULL, NULL }, 301 /* 302 * TCP/IP 303 */ 304 { "tcp", &lock_class_mtx_sleep }, 305 { "tcpinp", &lock_class_mtx_sleep }, 306 { "so_snd", &lock_class_mtx_sleep }, 307 { NULL, NULL }, 308 /* 309 * SLIP 310 */ 311 { "slip_mtx", &lock_class_mtx_sleep }, 312 { "slip sc_mtx", &lock_class_mtx_sleep }, 313 { NULL, NULL }, 314 /* 315 * netatalk 316 */ 317 { "ddp_list_mtx", &lock_class_mtx_sleep }, 318 { "ddp_mtx", &lock_class_mtx_sleep }, 319 { NULL, NULL }, 320 /* 321 * BPF 322 */ 323 { "bpf global lock", &lock_class_mtx_sleep }, 324 { "bpf interface lock", &lock_class_mtx_sleep }, 325 { "bpf cdev lock", &lock_class_mtx_sleep }, 326 { NULL, NULL }, 327 /* 328 * NFS server 329 */ 330 { "nfsd_mtx", &lock_class_mtx_sleep }, 331 { "so_snd", &lock_class_mtx_sleep }, 332 { NULL, NULL }, 333 /* 334 * spin locks 335 */ 336#ifdef SMP 337 { "ap boot", &lock_class_mtx_spin }, 338#endif 339 { "sio", &lock_class_mtx_spin }, 340#ifdef __i386__ 341 { "cy", &lock_class_mtx_spin }, 342#endif 343 { "uart_hwmtx", &lock_class_mtx_spin }, 344 { "sabtty", &lock_class_mtx_spin }, 345 { "zstty", &lock_class_mtx_spin }, 346 { "ng_node", &lock_class_mtx_spin }, 347 { "ng_worklist", &lock_class_mtx_spin }, 348 { "taskqueue_fast", &lock_class_mtx_spin }, 349 { "intr table", &lock_class_mtx_spin }, 350 { "ithread table lock", &lock_class_mtx_spin }, 351 { "sleepq chain", &lock_class_mtx_spin }, 352 { "sched lock", &lock_class_mtx_spin }, 353 { "turnstile chain", &lock_class_mtx_spin }, 354 { "td_contested", &lock_class_mtx_spin }, 355 { "callout", &lock_class_mtx_spin }, 356 { "entropy harvest mutex", &lock_class_mtx_spin }, 357 /* 358 * leaf locks 359 */ 360 { "allpmaps", &lock_class_mtx_spin }, 361 { "vm page queue free mutex", &lock_class_mtx_spin }, 362 { "icu", &lock_class_mtx_spin }, 363#ifdef SMP 364 { "smp rendezvous", &lock_class_mtx_spin }, 365#if defined(__i386__) || defined(__amd64__) 366 { "tlb", &lock_class_mtx_spin }, 367#endif 368#ifdef __sparc64__ 369 { "ipi", &lock_class_mtx_spin }, 370#endif 371#endif 372 { "clk", &lock_class_mtx_spin }, 373 { "mutex profiling lock", &lock_class_mtx_spin }, 374 { "kse zombie lock", &lock_class_mtx_spin }, 375 { "ALD Queue", &lock_class_mtx_spin }, 376#ifdef __ia64__ 377 { "MCA spin lock", &lock_class_mtx_spin }, 378#endif 379#if defined(__i386__) || defined(__amd64__) 380 { "pcicfg", &lock_class_mtx_spin }, 381 { "NDIS thread lock", &lock_class_mtx_spin }, 382#endif 383 { NULL, NULL }, 384 { NULL, NULL } 385}; 386 387#ifdef BLESSING 388/* 389 * Pairs of locks which have been blessed 390 * Don't complain about order problems with blessed locks 391 */ 392static struct witness_blessed blessed_list[] = { 393}; 394static int blessed_count = 395 sizeof(blessed_list) / sizeof(struct witness_blessed); 396#endif 397 398/* 399 * List of all locks in the system. 400 */ 401TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks); 402 403static struct mtx all_mtx = { 404 { &lock_class_mtx_sleep, /* mtx_object.lo_class */ 405 "All locks list", /* mtx_object.lo_name */ 406 "All locks list", /* mtx_object.lo_type */ 407 LO_INITIALIZED, /* mtx_object.lo_flags */ 408 { NULL, NULL }, /* mtx_object.lo_list */ 409 NULL }, /* mtx_object.lo_witness */ 410 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ 411}; 412 413/* 414 * This global is set to 0 once it becomes safe to use the witness code. 415 */ 416static int witness_cold = 1; 417 418/* 419 * Global variables for book keeping. 420 */ 421static int lock_cur_cnt; 422static int lock_max_cnt; 423 424/* 425 * The WITNESS-enabled diagnostic code. 426 */ 427static void 428witness_initialize(void *dummy __unused) 429{ 430 struct lock_object *lock; 431 struct witness_order_list_entry *order; 432 struct witness *w, *w1; 433 int i; 434 435 /* 436 * We have to release Giant before initializing its witness 437 * structure so that WITNESS doesn't get confused. 438 */ 439 mtx_unlock(&Giant); 440 mtx_assert(&Giant, MA_NOTOWNED); 441 442 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 443 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list); 444 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 445 MTX_NOWITNESS); 446 for (i = 0; i < WITNESS_COUNT; i++) 447 witness_free(&w_data[i]); 448 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 449 witness_child_free(&w_childdata[i]); 450 for (i = 0; i < LOCK_CHILDCOUNT; i++) 451 witness_lock_list_free(&w_locklistdata[i]); 452 453 /* First add in all the specified order lists. */ 454 for (order = order_lists; order->w_name != NULL; order++) { 455 w = enroll(order->w_name, order->w_class); 456 if (w == NULL) 457 continue; 458 w->w_file = "order list"; 459 for (order++; order->w_name != NULL; order++) { 460 w1 = enroll(order->w_name, order->w_class); 461 if (w1 == NULL) 462 continue; 463 w1->w_file = "order list"; 464 if (!itismychild(w, w1)) 465 panic("Not enough memory for static orders!"); 466 w = w1; 467 } 468 } 469 470 /* Iterate through all locks and add them to witness. */ 471 mtx_lock(&all_mtx); 472 TAILQ_FOREACH(lock, &all_locks, lo_list) { 473 if (lock->lo_flags & LO_WITNESS) 474 lock->lo_witness = enroll(lock->lo_type, 475 lock->lo_class); 476 else 477 lock->lo_witness = NULL; 478 } 479 mtx_unlock(&all_mtx); 480 481 /* Mark the witness code as being ready for use. */ 482 atomic_store_rel_int(&witness_cold, 0); 483 484 mtx_lock(&Giant); 485} 486SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 487 488static int 489sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 490{ 491 int error, value; 492 493 value = witness_watch; 494 error = sysctl_handle_int(oidp, &value, 0, req); 495 if (error != 0 || req->newptr == NULL) 496 return (error); 497 error = suser(req->td); 498 if (error != 0) 499 return (error); 500 if (value == witness_watch) 501 return (0); 502 if (value != 0) 503 return (EINVAL); 504 witness_watch = 0; 505 return (0); 506} 507 508void 509witness_init(struct lock_object *lock) 510{ 511 struct lock_class *class; 512 513 class = lock->lo_class; 514 if (lock->lo_flags & LO_INITIALIZED) 515 panic("%s: lock (%s) %s is already initialized", __func__, 516 class->lc_name, lock->lo_name); 517 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 518 (class->lc_flags & LC_RECURSABLE) == 0) 519 panic("%s: lock (%s) %s can not be recursable", __func__, 520 class->lc_name, lock->lo_name); 521 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 522 (class->lc_flags & LC_SLEEPABLE) == 0) 523 panic("%s: lock (%s) %s can not be sleepable", __func__, 524 class->lc_name, lock->lo_name); 525 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 526 (class->lc_flags & LC_UPGRADABLE) == 0) 527 panic("%s: lock (%s) %s can not be upgradable", __func__, 528 class->lc_name, lock->lo_name); 529 530 mtx_lock(&all_mtx); 531 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list); 532 lock->lo_flags |= LO_INITIALIZED; 533 lock_cur_cnt++; 534 if (lock_cur_cnt > lock_max_cnt) 535 lock_max_cnt = lock_cur_cnt; 536 mtx_unlock(&all_mtx); 537 if (!witness_cold && witness_watch != 0 && panicstr == NULL && 538 (lock->lo_flags & LO_WITNESS) != 0) 539 lock->lo_witness = enroll(lock->lo_type, class); 540 else 541 lock->lo_witness = NULL; 542} 543 544void 545witness_destroy(struct lock_object *lock) 546{ 547 struct witness *w; 548 549 if (witness_cold) 550 panic("lock (%s) %s destroyed while witness_cold", 551 lock->lo_class->lc_name, lock->lo_name); 552 if ((lock->lo_flags & LO_INITIALIZED) == 0) 553 panic("%s: lock (%s) %s is not initialized", __func__, 554 lock->lo_class->lc_name, lock->lo_name); 555 556 /* XXX: need to verify that no one holds the lock */ 557 w = lock->lo_witness; 558 if (w != NULL) { 559 mtx_lock_spin(&w_mtx); 560 MPASS(w->w_refcount > 0); 561 w->w_refcount--; 562 563 /* 564 * Lock is already released if we have an allocation failure 565 * and depart() fails. 566 */ 567 if (w->w_refcount != 0 || depart(w)) 568 mtx_unlock_spin(&w_mtx); 569 } 570 571 mtx_lock(&all_mtx); 572 lock_cur_cnt--; 573 TAILQ_REMOVE(&all_locks, lock, lo_list); 574 lock->lo_flags &= ~LO_INITIALIZED; 575 mtx_unlock(&all_mtx); 576} 577 578#ifdef DDB 579static void 580witness_display_list(void(*prnt)(const char *fmt, ...), 581 struct witness_list *list) 582{ 583 struct witness *w; 584 585 STAILQ_FOREACH(w, list, w_typelist) { 586 if (w->w_file == NULL || w->w_level > 0) 587 continue; 588 /* 589 * This lock has no anscestors, display its descendants. 590 */ 591 witness_displaydescendants(prnt, w, 0); 592 } 593} 594 595static void 596witness_display(void(*prnt)(const char *fmt, ...)) 597{ 598 struct witness *w; 599 600 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 601 witness_levelall(); 602 603 /* Clear all the displayed flags. */ 604 STAILQ_FOREACH(w, &w_all, w_list) { 605 w->w_displayed = 0; 606 } 607 608 /* 609 * First, handle sleep locks which have been acquired at least 610 * once. 611 */ 612 prnt("Sleep locks:\n"); 613 witness_display_list(prnt, &w_sleep); 614 615 /* 616 * Now do spin locks which have been acquired at least once. 617 */ 618 prnt("\nSpin locks:\n"); 619 witness_display_list(prnt, &w_spin); 620 621 /* 622 * Finally, any locks which have not been acquired yet. 623 */ 624 prnt("\nLocks which were never acquired:\n"); 625 STAILQ_FOREACH(w, &w_all, w_list) { 626 if (w->w_file != NULL || w->w_refcount == 0) 627 continue; 628 prnt("%s\n", w->w_name); 629 } 630} 631#endif /* DDB */ 632 633/* Trim useless garbage from filenames. */ 634static const char * 635fixup_filename(const char *file) 636{ 637 638 if (file == NULL) 639 return (NULL); 640 while (strncmp(file, "../", 3) == 0) 641 file += 3; 642 return (file); 643} 644 645int 646witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 647{ 648 649 if (witness_watch == 0 || panicstr != NULL) 650 return (0); 651 652 /* Require locks that witness knows about. */ 653 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 654 lock2->lo_witness == NULL) 655 return (EINVAL); 656 657 MPASS(!mtx_owned(&w_mtx)); 658 mtx_lock_spin(&w_mtx); 659 660 /* 661 * If we already have either an explicit or implied lock order that 662 * is the other way around, then return an error. 663 */ 664 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 665 mtx_unlock_spin(&w_mtx); 666 return (EDOOFUS); 667 } 668 669 /* Try to add the new order. */ 670 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 671 lock2->lo_type, lock1->lo_type); 672 if (!itismychild(lock1->lo_witness, lock2->lo_witness)) 673 return (ENOMEM); 674 mtx_unlock_spin(&w_mtx); 675 return (0); 676} 677 678void 679witness_checkorder(struct lock_object *lock, int flags, const char *file, 680 int line) 681{ 682 struct lock_list_entry **lock_list, *lle; 683 struct lock_instance *lock1, *lock2; 684 struct lock_class *class; 685 struct witness *w, *w1; 686 struct thread *td; 687 int i, j; 688 689 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 690 panicstr != NULL) 691 return; 692 693 /* 694 * Try locks do not block if they fail to acquire the lock, thus 695 * there is no danger of deadlocks or of switching while holding a 696 * spin lock if we acquire a lock via a try operation. This 697 * function shouldn't even be called for try locks, so panic if 698 * that happens. 699 */ 700 if (flags & LOP_TRYLOCK) 701 panic("%s should not be called for try lock operations", 702 __func__); 703 704 w = lock->lo_witness; 705 class = lock->lo_class; 706 td = curthread; 707 file = fixup_filename(file); 708 709 if (class->lc_flags & LC_SLEEPLOCK) { 710 /* 711 * Since spin locks include a critical section, this check 712 * implicitly enforces a lock order of all sleep locks before 713 * all spin locks. 714 */ 715 if (td->td_critnest != 0 && !kdb_active) 716 panic("blockable sleep lock (%s) %s @ %s:%d", 717 class->lc_name, lock->lo_name, file, line); 718 719 /* 720 * If this is the first lock acquired then just return as 721 * no order checking is needed. 722 */ 723 if (td->td_sleeplocks == NULL) 724 return; 725 lock_list = &td->td_sleeplocks; 726 } else { 727 /* 728 * If this is the first lock, just return as no order 729 * checking is needed. We check this in both if clauses 730 * here as unifying the check would require us to use a 731 * critical section to ensure we don't migrate while doing 732 * the check. Note that if this is not the first lock, we 733 * are already in a critical section and are safe for the 734 * rest of the check. 735 */ 736 if (PCPU_GET(spinlocks) == NULL) 737 return; 738 lock_list = PCPU_PTR(spinlocks); 739 } 740 741 /* 742 * Check to see if we are recursing on a lock we already own. If 743 * so, make sure that we don't mismatch exclusive and shared lock 744 * acquires. 745 */ 746 lock1 = find_instance(*lock_list, lock); 747 if (lock1 != NULL) { 748 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 749 (flags & LOP_EXCLUSIVE) == 0) { 750 printf("shared lock of (%s) %s @ %s:%d\n", 751 class->lc_name, lock->lo_name, file, line); 752 printf("while exclusively locked from %s:%d\n", 753 lock1->li_file, lock1->li_line); 754 panic("share->excl"); 755 } 756 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 757 (flags & LOP_EXCLUSIVE) != 0) { 758 printf("exclusive lock of (%s) %s @ %s:%d\n", 759 class->lc_name, lock->lo_name, file, line); 760 printf("while share locked from %s:%d\n", 761 lock1->li_file, lock1->li_line); 762 panic("excl->share"); 763 } 764 return; 765 } 766 767 /* 768 * Try locks do not block if they fail to acquire the lock, thus 769 * there is no danger of deadlocks or of switching while holding a 770 * spin lock if we acquire a lock via a try operation. 771 */ 772 if (flags & LOP_TRYLOCK) 773 return; 774 775 /* 776 * Check for duplicate locks of the same type. Note that we only 777 * have to check for this on the last lock we just acquired. Any 778 * other cases will be caught as lock order violations. 779 */ 780 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 781 w1 = lock1->li_lock->lo_witness; 782 if (w1 == w) { 783 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK)) 784 return; 785 w->w_same_squawked = 1; 786 printf("acquiring duplicate lock of same type: \"%s\"\n", 787 lock->lo_type); 788 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 789 lock1->li_file, lock1->li_line); 790 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 791#ifdef KDB 792 goto debugger; 793#else 794 return; 795#endif 796 } 797 MPASS(!mtx_owned(&w_mtx)); 798 mtx_lock_spin(&w_mtx); 799 /* 800 * If we have a known higher number just say ok 801 */ 802 if (witness_watch > 1 && w->w_level > w1->w_level) { 803 mtx_unlock_spin(&w_mtx); 804 return; 805 } 806 /* 807 * If we know that the the lock we are acquiring comes after 808 * the lock we most recently acquired in the lock order tree, 809 * then there is no need for any further checks. 810 */ 811 if (isitmydescendant(w1, w)) { 812 mtx_unlock_spin(&w_mtx); 813 return; 814 } 815 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 816 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 817 818 MPASS(j < WITNESS_COUNT); 819 lock1 = &lle->ll_children[i]; 820 w1 = lock1->li_lock->lo_witness; 821 822 /* 823 * If this lock doesn't undergo witness checking, 824 * then skip it. 825 */ 826 if (w1 == NULL) { 827 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 828 ("lock missing witness structure")); 829 continue; 830 } 831 /* 832 * If we are locking Giant and this is a sleepable 833 * lock, then skip it. 834 */ 835 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 836 lock == &Giant.mtx_object) 837 continue; 838 /* 839 * If we are locking a sleepable lock and this lock 840 * is Giant, then skip it. 841 */ 842 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 843 lock1->li_lock == &Giant.mtx_object) 844 continue; 845 /* 846 * If we are locking a sleepable lock and this lock 847 * isn't sleepable, we want to treat it as a lock 848 * order violation to enfore a general lock order of 849 * sleepable locks before non-sleepable locks. 850 */ 851 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 && 852 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 853 /* 854 * Check the lock order hierarchy for a reveresal. 855 */ 856 if (!isitmydescendant(w, w1)) 857 continue; 858 /* 859 * We have a lock order violation, check to see if it 860 * is allowed or has already been yelled about. 861 */ 862 mtx_unlock_spin(&w_mtx); 863#ifdef BLESSING 864 /* 865 * If the lock order is blessed, just bail. We don't 866 * look for other lock order violations though, which 867 * may be a bug. 868 */ 869 if (blessed(w, w1)) 870 return; 871#endif 872 if (lock1->li_lock == &Giant.mtx_object) { 873 if (w1->w_Giant_squawked) 874 return; 875 else 876 w1->w_Giant_squawked = 1; 877 } else { 878 if (w1->w_other_squawked) 879 return; 880 else 881 w1->w_other_squawked = 1; 882 } 883 /* 884 * Ok, yell about it. 885 */ 886 printf("lock order reversal\n"); 887 /* 888 * Try to locate an earlier lock with 889 * witness w in our list. 890 */ 891 do { 892 lock2 = &lle->ll_children[i]; 893 MPASS(lock2->li_lock != NULL); 894 if (lock2->li_lock->lo_witness == w) 895 break; 896 if (i == 0 && lle->ll_next != NULL) { 897 lle = lle->ll_next; 898 i = lle->ll_count - 1; 899 MPASS(i >= 0 && i < LOCK_NCHILDREN); 900 } else 901 i--; 902 } while (i >= 0); 903 if (i < 0) { 904 printf(" 1st %p %s (%s) @ %s:%d\n", 905 lock1->li_lock, lock1->li_lock->lo_name, 906 lock1->li_lock->lo_type, lock1->li_file, 907 lock1->li_line); 908 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 909 lock->lo_name, lock->lo_type, file, line); 910 } else { 911 printf(" 1st %p %s (%s) @ %s:%d\n", 912 lock2->li_lock, lock2->li_lock->lo_name, 913 lock2->li_lock->lo_type, lock2->li_file, 914 lock2->li_line); 915 printf(" 2nd %p %s (%s) @ %s:%d\n", 916 lock1->li_lock, lock1->li_lock->lo_name, 917 lock1->li_lock->lo_type, lock1->li_file, 918 lock1->li_line); 919 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 920 lock->lo_name, lock->lo_type, file, line); 921 } 922#ifdef KDB 923 goto debugger; 924#else 925 return; 926#endif 927 } 928 } 929 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 930 /* 931 * If requested, build a new lock order. However, don't build a new 932 * relationship between a sleepable lock and Giant if it is in the 933 * wrong direction. The correct lock order is that sleepable locks 934 * always come before Giant. 935 */ 936 if (flags & LOP_NEWORDER && 937 !(lock1->li_lock == &Giant.mtx_object && 938 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 939 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 940 lock->lo_type, lock1->li_lock->lo_type); 941 if (!itismychild(lock1->li_lock->lo_witness, w)) 942 /* Witness is dead. */ 943 return; 944 } 945 mtx_unlock_spin(&w_mtx); 946 return; 947 948#ifdef KDB 949debugger: 950 if (witness_trace) 951 kdb_backtrace(); 952 if (witness_kdb) 953 kdb_enter(__func__); 954#endif 955} 956 957void 958witness_lock(struct lock_object *lock, int flags, const char *file, int line) 959{ 960 struct lock_list_entry **lock_list, *lle; 961 struct lock_instance *instance; 962 struct witness *w; 963 struct thread *td; 964 965 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 966 panicstr != NULL) 967 return; 968 w = lock->lo_witness; 969 td = curthread; 970 file = fixup_filename(file); 971 972 /* Determine lock list for this lock. */ 973 if (lock->lo_class->lc_flags & LC_SLEEPLOCK) 974 lock_list = &td->td_sleeplocks; 975 else 976 lock_list = PCPU_PTR(spinlocks); 977 978 /* Check to see if we are recursing on a lock we already own. */ 979 instance = find_instance(*lock_list, lock); 980 if (instance != NULL) { 981 instance->li_flags++; 982 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 983 td->td_proc->p_pid, lock->lo_name, 984 instance->li_flags & LI_RECURSEMASK); 985 instance->li_file = file; 986 instance->li_line = line; 987 return; 988 } 989 990 /* Update per-witness last file and line acquire. */ 991 w->w_file = file; 992 w->w_line = line; 993 994 /* Find the next open lock instance in the list and fill it. */ 995 lle = *lock_list; 996 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 997 lle = witness_lock_list_get(); 998 if (lle == NULL) 999 return; 1000 lle->ll_next = *lock_list; 1001 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1002 td->td_proc->p_pid, lle); 1003 *lock_list = lle; 1004 } 1005 instance = &lle->ll_children[lle->ll_count++]; 1006 instance->li_lock = lock; 1007 instance->li_line = line; 1008 instance->li_file = file; 1009 if ((flags & LOP_EXCLUSIVE) != 0) 1010 instance->li_flags = LI_EXCLUSIVE; 1011 else 1012 instance->li_flags = 0; 1013 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1014 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1015} 1016 1017void 1018witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1019{ 1020 struct lock_instance *instance; 1021 struct lock_class *class; 1022 1023 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1024 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1025 return; 1026 class = lock->lo_class; 1027 file = fixup_filename(file); 1028 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1029 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1030 class->lc_name, lock->lo_name, file, line); 1031 if ((flags & LOP_TRYLOCK) == 0) 1032 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 1033 lock->lo_name, file, line); 1034 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1035 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1036 class->lc_name, lock->lo_name, file, line); 1037 instance = find_instance(curthread->td_sleeplocks, lock); 1038 if (instance == NULL) 1039 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1040 class->lc_name, lock->lo_name, file, line); 1041 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1042 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1043 class->lc_name, lock->lo_name, file, line); 1044 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1045 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1046 class->lc_name, lock->lo_name, 1047 instance->li_flags & LI_RECURSEMASK, file, line); 1048 instance->li_flags |= LI_EXCLUSIVE; 1049} 1050 1051void 1052witness_downgrade(struct lock_object *lock, int flags, const char *file, 1053 int line) 1054{ 1055 struct lock_instance *instance; 1056 struct lock_class *class; 1057 1058 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1059 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1060 return; 1061 class = lock->lo_class; 1062 file = fixup_filename(file); 1063 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1064 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1065 class->lc_name, lock->lo_name, file, line); 1066 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1067 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1068 class->lc_name, lock->lo_name, file, line); 1069 instance = find_instance(curthread->td_sleeplocks, lock); 1070 if (instance == NULL) 1071 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1072 class->lc_name, lock->lo_name, file, line); 1073 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1074 panic("downgrade of shared lock (%s) %s @ %s:%d", 1075 class->lc_name, lock->lo_name, file, line); 1076 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1077 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1078 class->lc_name, lock->lo_name, 1079 instance->li_flags & LI_RECURSEMASK, file, line); 1080 instance->li_flags &= ~LI_EXCLUSIVE; 1081} 1082 1083void 1084witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1085{ 1086 struct lock_list_entry **lock_list, *lle; 1087 struct lock_instance *instance; 1088 struct lock_class *class; 1089 struct thread *td; 1090 register_t s; 1091 int i, j; 1092 1093 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1094 panicstr != NULL) 1095 return; 1096 td = curthread; 1097 class = lock->lo_class; 1098 file = fixup_filename(file); 1099 1100 /* Find lock instance associated with this lock. */ 1101 if (class->lc_flags & LC_SLEEPLOCK) 1102 lock_list = &td->td_sleeplocks; 1103 else 1104 lock_list = PCPU_PTR(spinlocks); 1105 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1106 for (i = 0; i < (*lock_list)->ll_count; i++) { 1107 instance = &(*lock_list)->ll_children[i]; 1108 if (instance->li_lock == lock) 1109 goto found; 1110 } 1111 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1112 file, line); 1113found: 1114 1115 /* First, check for shared/exclusive mismatches. */ 1116 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1117 (flags & LOP_EXCLUSIVE) == 0) { 1118 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1119 lock->lo_name, file, line); 1120 printf("while exclusively locked from %s:%d\n", 1121 instance->li_file, instance->li_line); 1122 panic("excl->ushare"); 1123 } 1124 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1125 (flags & LOP_EXCLUSIVE) != 0) { 1126 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1127 lock->lo_name, file, line); 1128 printf("while share locked from %s:%d\n", instance->li_file, 1129 instance->li_line); 1130 panic("share->uexcl"); 1131 } 1132 1133 /* If we are recursed, unrecurse. */ 1134 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1135 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1136 td->td_proc->p_pid, instance->li_lock->lo_name, 1137 instance->li_flags); 1138 instance->li_flags--; 1139 return; 1140 } 1141 1142 /* Otherwise, remove this item from the list. */ 1143 s = intr_disable(); 1144 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1145 td->td_proc->p_pid, instance->li_lock->lo_name, 1146 (*lock_list)->ll_count - 1); 1147 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1148 (*lock_list)->ll_children[j] = 1149 (*lock_list)->ll_children[j + 1]; 1150 (*lock_list)->ll_count--; 1151 intr_restore(s); 1152 1153 /* If this lock list entry is now empty, free it. */ 1154 if ((*lock_list)->ll_count == 0) { 1155 lle = *lock_list; 1156 *lock_list = lle->ll_next; 1157 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1158 td->td_proc->p_pid, lle); 1159 witness_lock_list_free(lle); 1160 } 1161} 1162 1163/* 1164 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1165 * exempt Giant and sleepable locks from the checks as well. If any 1166 * non-exempt locks are held, then a supplied message is printed to the 1167 * console along with a list of the offending locks. If indicated in the 1168 * flags then a failure results in a panic as well. 1169 */ 1170int 1171witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1172{ 1173 struct lock_list_entry *lle; 1174 struct lock_instance *lock1; 1175 struct thread *td; 1176 va_list ap; 1177 int i, n; 1178 1179 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1180 return (0); 1181 n = 0; 1182 td = curthread; 1183 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1184 for (i = lle->ll_count - 1; i >= 0; i--) { 1185 lock1 = &lle->ll_children[i]; 1186 if (lock1->li_lock == lock) 1187 continue; 1188 if (flags & WARN_GIANTOK && 1189 lock1->li_lock == &Giant.mtx_object) 1190 continue; 1191 if (flags & WARN_SLEEPOK && 1192 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1193 continue; 1194 if (n == 0) { 1195 va_start(ap, fmt); 1196 vprintf(fmt, ap); 1197 va_end(ap); 1198 printf(" with the following"); 1199 if (flags & WARN_SLEEPOK) 1200 printf(" non-sleepable"); 1201 printf(" locks held:\n"); 1202 } 1203 n++; 1204 witness_list_lock(lock1); 1205 } 1206 if (PCPU_GET(spinlocks) != NULL) { 1207 /* 1208 * Since we already hold a spinlock preemption is 1209 * already blocked. 1210 */ 1211 if (n == 0) { 1212 va_start(ap, fmt); 1213 vprintf(fmt, ap); 1214 va_end(ap); 1215 printf(" with the following"); 1216 if (flags & WARN_SLEEPOK) 1217 printf(" non-sleepable"); 1218 printf(" locks held:\n"); 1219 } 1220 n += witness_list_locks(PCPU_PTR(spinlocks)); 1221 } 1222 if (flags & WARN_PANIC && n) 1223 panic("witness_warn"); 1224#ifdef KDB 1225 else if (witness_kdb && n) 1226 kdb_enter(__func__); 1227 else if (witness_trace && n) 1228 kdb_backtrace(); 1229#endif 1230 return (n); 1231} 1232 1233const char * 1234witness_file(struct lock_object *lock) 1235{ 1236 struct witness *w; 1237 1238 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1239 return ("?"); 1240 w = lock->lo_witness; 1241 return (w->w_file); 1242} 1243 1244int 1245witness_line(struct lock_object *lock) 1246{ 1247 struct witness *w; 1248 1249 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1250 return (0); 1251 w = lock->lo_witness; 1252 return (w->w_line); 1253} 1254 1255static struct witness * 1256enroll(const char *description, struct lock_class *lock_class) 1257{ 1258 struct witness *w; 1259 1260 if (witness_watch == 0 || panicstr != NULL) 1261 return (NULL); 1262 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 1263 return (NULL); 1264 mtx_lock_spin(&w_mtx); 1265 STAILQ_FOREACH(w, &w_all, w_list) { 1266 if (w->w_name == description || (w->w_refcount > 0 && 1267 strcmp(description, w->w_name) == 0)) { 1268 w->w_refcount++; 1269 mtx_unlock_spin(&w_mtx); 1270 if (lock_class != w->w_class) 1271 panic( 1272 "lock (%s) %s does not match earlier (%s) lock", 1273 description, lock_class->lc_name, 1274 w->w_class->lc_name); 1275 return (w); 1276 } 1277 } 1278 /* 1279 * This isn't quite right, as witness_cold is still 0 while we 1280 * enroll all the locks initialized before witness_initialize(). 1281 */ 1282 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) { 1283 mtx_unlock_spin(&w_mtx); 1284 panic("spin lock %s not in order list", description); 1285 } 1286 if ((w = witness_get()) == NULL) 1287 return (NULL); 1288 w->w_name = description; 1289 w->w_class = lock_class; 1290 w->w_refcount = 1; 1291 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1292 if (lock_class->lc_flags & LC_SPINLOCK) 1293 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1294 else if (lock_class->lc_flags & LC_SLEEPLOCK) 1295 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1296 else { 1297 mtx_unlock_spin(&w_mtx); 1298 panic("lock class %s is not sleep or spin", 1299 lock_class->lc_name); 1300 } 1301 mtx_unlock_spin(&w_mtx); 1302 return (w); 1303} 1304 1305/* Don't let the door bang you on the way out... */ 1306static int 1307depart(struct witness *w) 1308{ 1309 struct witness_child_list_entry *wcl, *nwcl; 1310 struct witness_list *list; 1311 struct witness *parent; 1312 1313 MPASS(w->w_refcount == 0); 1314 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1315 list = &w_sleep; 1316 else 1317 list = &w_spin; 1318 /* 1319 * First, we run through the entire tree looking for any 1320 * witnesses that the outgoing witness is a child of. For 1321 * each parent that we find, we reparent all the direct 1322 * children of the outgoing witness to its parent. 1323 */ 1324 STAILQ_FOREACH(parent, list, w_typelist) { 1325 if (!isitmychild(parent, w)) 1326 continue; 1327 removechild(parent, w); 1328 if (!reparentchildren(parent, w)) 1329 return (0); 1330 } 1331 1332 /* 1333 * Now we go through and free up the child list of the 1334 * outgoing witness. 1335 */ 1336 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) { 1337 nwcl = wcl->wcl_next; 1338 witness_child_free(wcl); 1339 } 1340 1341 /* 1342 * Detach from various lists and free. 1343 */ 1344 STAILQ_REMOVE(list, w, witness, w_typelist); 1345 STAILQ_REMOVE(&w_all, w, witness, w_list); 1346 witness_free(w); 1347 1348 /* Finally, fixup the tree. */ 1349 return (rebalancetree(list)); 1350} 1351 1352/* 1353 * Prune an entire lock order tree. We look for cases where a lock 1354 * is now both a descendant and a direct child of a given lock. In 1355 * that case, we want to remove the direct child link from the tree. 1356 * 1357 * Returns false if insertchild() fails. 1358 */ 1359static int 1360rebalancetree(struct witness_list *list) 1361{ 1362 struct witness *child, *parent; 1363 1364 STAILQ_FOREACH(child, list, w_typelist) { 1365 STAILQ_FOREACH(parent, list, w_typelist) { 1366 if (!isitmychild(parent, child)) 1367 continue; 1368 removechild(parent, child); 1369 if (isitmydescendant(parent, child)) 1370 continue; 1371 if (!insertchild(parent, child)) 1372 return (0); 1373 } 1374 } 1375 witness_levelall(); 1376 return (1); 1377} 1378 1379/* 1380 * Add "child" as a direct child of "parent". Returns false if 1381 * we fail due to out of memory. 1382 */ 1383static int 1384insertchild(struct witness *parent, struct witness *child) 1385{ 1386 struct witness_child_list_entry **wcl; 1387 1388 MPASS(child != NULL && parent != NULL); 1389 1390 /* 1391 * Insert "child" after "parent" 1392 */ 1393 wcl = &parent->w_children; 1394 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1395 wcl = &(*wcl)->wcl_next; 1396 if (*wcl == NULL) { 1397 *wcl = witness_child_get(); 1398 if (*wcl == NULL) 1399 return (0); 1400 } 1401 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1402 1403 return (1); 1404} 1405 1406/* 1407 * Make all the direct descendants of oldparent be direct descendants 1408 * of newparent. 1409 */ 1410static int 1411reparentchildren(struct witness *newparent, struct witness *oldparent) 1412{ 1413 struct witness_child_list_entry *wcl; 1414 int i; 1415 1416 /* Avoid making a witness a child of itself. */ 1417 MPASS(!isitmychild(oldparent, newparent)); 1418 1419 for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1420 for (i = 0; i < wcl->wcl_count; i++) 1421 if (!insertchild(newparent, wcl->wcl_children[i])) 1422 return (0); 1423 return (1); 1424} 1425 1426static int 1427itismychild(struct witness *parent, struct witness *child) 1428{ 1429 struct witness_list *list; 1430 1431 MPASS(child != NULL && parent != NULL); 1432 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1433 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1434 panic( 1435 "%s: parent (%s) and child (%s) are not the same lock type", 1436 __func__, parent->w_class->lc_name, 1437 child->w_class->lc_name); 1438 1439 if (!insertchild(parent, child)) 1440 return (0); 1441 1442 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1443 list = &w_sleep; 1444 else 1445 list = &w_spin; 1446 return (rebalancetree(list)); 1447} 1448 1449static void 1450removechild(struct witness *parent, struct witness *child) 1451{ 1452 struct witness_child_list_entry **wcl, *wcl1; 1453 int i; 1454 1455 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1456 for (i = 0; i < (*wcl)->wcl_count; i++) 1457 if ((*wcl)->wcl_children[i] == child) 1458 goto found; 1459 return; 1460found: 1461 (*wcl)->wcl_count--; 1462 if ((*wcl)->wcl_count > i) 1463 (*wcl)->wcl_children[i] = 1464 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1465 MPASS((*wcl)->wcl_children[i] != NULL); 1466 if ((*wcl)->wcl_count != 0) 1467 return; 1468 wcl1 = *wcl; 1469 *wcl = wcl1->wcl_next; 1470 witness_child_free(wcl1); 1471} 1472 1473static int 1474isitmychild(struct witness *parent, struct witness *child) 1475{ 1476 struct witness_child_list_entry *wcl; 1477 int i; 1478 1479 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1480 for (i = 0; i < wcl->wcl_count; i++) { 1481 if (wcl->wcl_children[i] == child) 1482 return (1); 1483 } 1484 } 1485 return (0); 1486} 1487 1488static int 1489isitmydescendant(struct witness *parent, struct witness *child) 1490{ 1491 struct witness_child_list_entry *wcl; 1492 int i, j; 1493 1494 if (isitmychild(parent, child)) 1495 return (1); 1496 j = 0; 1497 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1498 MPASS(j < 1000); 1499 for (i = 0; i < wcl->wcl_count; i++) { 1500 if (isitmydescendant(wcl->wcl_children[i], child)) 1501 return (1); 1502 } 1503 j++; 1504 } 1505 return (0); 1506} 1507 1508static void 1509witness_levelall (void) 1510{ 1511 struct witness_list *list; 1512 struct witness *w, *w1; 1513 1514 /* 1515 * First clear all levels. 1516 */ 1517 STAILQ_FOREACH(w, &w_all, w_list) { 1518 w->w_level = 0; 1519 } 1520 1521 /* 1522 * Look for locks with no parent and level all their descendants. 1523 */ 1524 STAILQ_FOREACH(w, &w_all, w_list) { 1525 /* 1526 * This is just an optimization, technically we could get 1527 * away just walking the all list each time. 1528 */ 1529 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1530 list = &w_sleep; 1531 else 1532 list = &w_spin; 1533 STAILQ_FOREACH(w1, list, w_typelist) { 1534 if (isitmychild(w1, w)) 1535 goto skip; 1536 } 1537 witness_leveldescendents(w, 0); 1538 skip: 1539 ; /* silence GCC 3.x */ 1540 } 1541} 1542 1543static void 1544witness_leveldescendents(struct witness *parent, int level) 1545{ 1546 struct witness_child_list_entry *wcl; 1547 int i; 1548 1549 if (parent->w_level < level) 1550 parent->w_level = level; 1551 level++; 1552 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1553 for (i = 0; i < wcl->wcl_count; i++) 1554 witness_leveldescendents(wcl->wcl_children[i], level); 1555} 1556 1557static void 1558witness_displaydescendants(void(*prnt)(const char *fmt, ...), 1559 struct witness *parent, int indent) 1560{ 1561 struct witness_child_list_entry *wcl; 1562 int i, level; 1563 1564 level = parent->w_level; 1565 prnt("%-2d", level); 1566 for (i = 0; i < indent; i++) 1567 prnt(" "); 1568 if (parent->w_refcount > 0) 1569 prnt("%s", parent->w_name); 1570 else 1571 prnt("(dead)"); 1572 if (parent->w_displayed) { 1573 prnt(" -- (already displayed)\n"); 1574 return; 1575 } 1576 parent->w_displayed = 1; 1577 if (parent->w_refcount > 0) { 1578 if (parent->w_file != NULL) 1579 prnt(" -- last acquired @ %s:%d", parent->w_file, 1580 parent->w_line); 1581 } 1582 prnt("\n"); 1583 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1584 for (i = 0; i < wcl->wcl_count; i++) 1585 witness_displaydescendants(prnt, 1586 wcl->wcl_children[i], indent + 1); 1587} 1588 1589#ifdef BLESSING 1590static int 1591blessed(struct witness *w1, struct witness *w2) 1592{ 1593 int i; 1594 struct witness_blessed *b; 1595 1596 for (i = 0; i < blessed_count; i++) { 1597 b = &blessed_list[i]; 1598 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1599 if (strcmp(w2->w_name, b->b_lock2) == 0) 1600 return (1); 1601 continue; 1602 } 1603 if (strcmp(w1->w_name, b->b_lock2) == 0) 1604 if (strcmp(w2->w_name, b->b_lock1) == 0) 1605 return (1); 1606 } 1607 return (0); 1608} 1609#endif 1610 1611static struct witness * 1612witness_get(void) 1613{ 1614 struct witness *w; 1615 1616 if (witness_watch == 0) { 1617 mtx_unlock_spin(&w_mtx); 1618 return (NULL); 1619 } 1620 if (STAILQ_EMPTY(&w_free)) { 1621 witness_watch = 0; 1622 mtx_unlock_spin(&w_mtx); 1623 printf("%s: witness exhausted\n", __func__); 1624 return (NULL); 1625 } 1626 w = STAILQ_FIRST(&w_free); 1627 STAILQ_REMOVE_HEAD(&w_free, w_list); 1628 bzero(w, sizeof(*w)); 1629 return (w); 1630} 1631 1632static void 1633witness_free(struct witness *w) 1634{ 1635 1636 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1637} 1638 1639static struct witness_child_list_entry * 1640witness_child_get(void) 1641{ 1642 struct witness_child_list_entry *wcl; 1643 1644 if (witness_watch == 0) { 1645 mtx_unlock_spin(&w_mtx); 1646 return (NULL); 1647 } 1648 wcl = w_child_free; 1649 if (wcl == NULL) { 1650 witness_watch = 0; 1651 mtx_unlock_spin(&w_mtx); 1652 printf("%s: witness exhausted\n", __func__); 1653 return (NULL); 1654 } 1655 w_child_free = wcl->wcl_next; 1656 bzero(wcl, sizeof(*wcl)); 1657 return (wcl); 1658} 1659 1660static void 1661witness_child_free(struct witness_child_list_entry *wcl) 1662{ 1663 1664 wcl->wcl_next = w_child_free; 1665 w_child_free = wcl; 1666} 1667 1668static struct lock_list_entry * 1669witness_lock_list_get(void) 1670{ 1671 struct lock_list_entry *lle; 1672 1673 if (witness_watch == 0) 1674 return (NULL); 1675 mtx_lock_spin(&w_mtx); 1676 lle = w_lock_list_free; 1677 if (lle == NULL) { 1678 witness_watch = 0; 1679 mtx_unlock_spin(&w_mtx); 1680 printf("%s: witness exhausted\n", __func__); 1681 return (NULL); 1682 } 1683 w_lock_list_free = lle->ll_next; 1684 mtx_unlock_spin(&w_mtx); 1685 bzero(lle, sizeof(*lle)); 1686 return (lle); 1687} 1688 1689static void 1690witness_lock_list_free(struct lock_list_entry *lle) 1691{ 1692 1693 mtx_lock_spin(&w_mtx); 1694 lle->ll_next = w_lock_list_free; 1695 w_lock_list_free = lle; 1696 mtx_unlock_spin(&w_mtx); 1697} 1698 1699static struct lock_instance * 1700find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1701{ 1702 struct lock_list_entry *lle; 1703 struct lock_instance *instance; 1704 int i; 1705 1706 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1707 for (i = lle->ll_count - 1; i >= 0; i--) { 1708 instance = &lle->ll_children[i]; 1709 if (instance->li_lock == lock) 1710 return (instance); 1711 } 1712 return (NULL); 1713} 1714 1715static void 1716witness_list_lock(struct lock_instance *instance) 1717{ 1718 struct lock_object *lock; 1719 1720 lock = instance->li_lock; 1721 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1722 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name); 1723 if (lock->lo_type != lock->lo_name) 1724 printf(" (%s)", lock->lo_type); 1725 printf(" r = %d (%p) locked @ %s:%d\n", 1726 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1727 instance->li_line); 1728} 1729 1730#ifdef DDB 1731static int 1732witness_thread_has_locks(struct thread *td) 1733{ 1734 1735 return (td->td_sleeplocks != NULL); 1736} 1737 1738static int 1739witness_proc_has_locks(struct proc *p) 1740{ 1741 struct thread *td; 1742 1743 FOREACH_THREAD_IN_PROC(p, td) { 1744 if (witness_thread_has_locks(td)) 1745 return (1); 1746 } 1747 return (0); 1748} 1749#endif 1750 1751int 1752witness_list_locks(struct lock_list_entry **lock_list) 1753{ 1754 struct lock_list_entry *lle; 1755 int i, nheld; 1756 1757 nheld = 0; 1758 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1759 for (i = lle->ll_count - 1; i >= 0; i--) { 1760 witness_list_lock(&lle->ll_children[i]); 1761 nheld++; 1762 } 1763 return (nheld); 1764} 1765 1766/* 1767 * This is a bit risky at best. We call this function when we have timed 1768 * out acquiring a spin lock, and we assume that the other CPU is stuck 1769 * with this lock held. So, we go groveling around in the other CPU's 1770 * per-cpu data to try to find the lock instance for this spin lock to 1771 * see when it was last acquired. 1772 */ 1773void 1774witness_display_spinlock(struct lock_object *lock, struct thread *owner) 1775{ 1776 struct lock_instance *instance; 1777 struct pcpu *pc; 1778 1779 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 1780 return; 1781 pc = pcpu_find(owner->td_oncpu); 1782 instance = find_instance(pc->pc_spinlocks, lock); 1783 if (instance != NULL) 1784 witness_list_lock(instance); 1785} 1786 1787void 1788witness_save(struct lock_object *lock, const char **filep, int *linep) 1789{ 1790 struct lock_instance *instance; 1791 1792 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1793 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1794 return; 1795 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1796 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1797 lock->lo_class->lc_name, lock->lo_name); 1798 instance = find_instance(curthread->td_sleeplocks, lock); 1799 if (instance == NULL) 1800 panic("%s: lock (%s) %s not locked", __func__, 1801 lock->lo_class->lc_name, lock->lo_name); 1802 *filep = instance->li_file; 1803 *linep = instance->li_line; 1804} 1805 1806void 1807witness_restore(struct lock_object *lock, const char *file, int line) 1808{ 1809 struct lock_instance *instance; 1810 1811 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1812 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1813 return; 1814 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1815 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1816 lock->lo_class->lc_name, lock->lo_name); 1817 instance = find_instance(curthread->td_sleeplocks, lock); 1818 if (instance == NULL) 1819 panic("%s: lock (%s) %s not locked", __func__, 1820 lock->lo_class->lc_name, lock->lo_name); 1821 lock->lo_witness->w_file = file; 1822 lock->lo_witness->w_line = line; 1823 instance->li_file = file; 1824 instance->li_line = line; 1825} 1826 1827void 1828witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1829{ 1830#ifdef INVARIANT_SUPPORT 1831 struct lock_instance *instance; 1832 1833 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1834 return; 1835 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0) 1836 instance = find_instance(curthread->td_sleeplocks, lock); 1837 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0) 1838 instance = find_instance(PCPU_GET(spinlocks), lock); 1839 else { 1840 panic("Lock (%s) %s is not sleep or spin!", 1841 lock->lo_class->lc_name, lock->lo_name); 1842 } 1843 file = fixup_filename(file); 1844 switch (flags) { 1845 case LA_UNLOCKED: 1846 if (instance != NULL) 1847 panic("Lock (%s) %s locked @ %s:%d.", 1848 lock->lo_class->lc_name, lock->lo_name, file, line); 1849 break; 1850 case LA_LOCKED: 1851 case LA_LOCKED | LA_RECURSED: 1852 case LA_LOCKED | LA_NOTRECURSED: 1853 case LA_SLOCKED: 1854 case LA_SLOCKED | LA_RECURSED: 1855 case LA_SLOCKED | LA_NOTRECURSED: 1856 case LA_XLOCKED: 1857 case LA_XLOCKED | LA_RECURSED: 1858 case LA_XLOCKED | LA_NOTRECURSED: 1859 if (instance == NULL) { 1860 panic("Lock (%s) %s not locked @ %s:%d.", 1861 lock->lo_class->lc_name, lock->lo_name, file, line); 1862 break; 1863 } 1864 if ((flags & LA_XLOCKED) != 0 && 1865 (instance->li_flags & LI_EXCLUSIVE) == 0) 1866 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1867 lock->lo_class->lc_name, lock->lo_name, file, line); 1868 if ((flags & LA_SLOCKED) != 0 && 1869 (instance->li_flags & LI_EXCLUSIVE) != 0) 1870 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1871 lock->lo_class->lc_name, lock->lo_name, file, line); 1872 if ((flags & LA_RECURSED) != 0 && 1873 (instance->li_flags & LI_RECURSEMASK) == 0) 1874 panic("Lock (%s) %s not recursed @ %s:%d.", 1875 lock->lo_class->lc_name, lock->lo_name, file, line); 1876 if ((flags & LA_NOTRECURSED) != 0 && 1877 (instance->li_flags & LI_RECURSEMASK) != 0) 1878 panic("Lock (%s) %s recursed @ %s:%d.", 1879 lock->lo_class->lc_name, lock->lo_name, file, line); 1880 break; 1881 default: 1882 panic("Invalid lock assertion at %s:%d.", file, line); 1883 1884 } 1885#endif /* INVARIANT_SUPPORT */ 1886} 1887 1888#ifdef DDB 1889static void 1890witness_list(struct thread *td) 1891{ 1892 1893 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1894 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 1895 1896 if (witness_watch == 0) 1897 return; 1898 1899 witness_list_locks(&td->td_sleeplocks); 1900 1901 /* 1902 * We only handle spinlocks if td == curthread. This is somewhat broken 1903 * if td is currently executing on some other CPU and holds spin locks 1904 * as we won't display those locks. If we had a MI way of getting 1905 * the per-cpu data for a given cpu then we could use 1906 * td->td_oncpu to get the list of spinlocks for this thread 1907 * and "fix" this. 1908 * 1909 * That still wouldn't really fix this unless we locked sched_lock 1910 * or stopped the other CPU to make sure it wasn't changing the list 1911 * out from under us. It is probably best to just not try to handle 1912 * threads on other CPU's for now. 1913 */ 1914 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1915 witness_list_locks(PCPU_PTR(spinlocks)); 1916} 1917 1918DB_SHOW_COMMAND(locks, db_witness_list) 1919{ 1920 struct thread *td; 1921 pid_t pid; 1922 struct proc *p; 1923 1924 if (have_addr) { 1925 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1926 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1927 ((addr >> 16) % 16) * 10000; 1928 /* sx_slock(&allproc_lock); */ 1929 FOREACH_PROC_IN_SYSTEM(p) { 1930 if (p->p_pid == pid) 1931 break; 1932 } 1933 /* sx_sunlock(&allproc_lock); */ 1934 if (p == NULL) { 1935 db_printf("pid %d not found\n", pid); 1936 return; 1937 } 1938 FOREACH_THREAD_IN_PROC(p, td) { 1939 witness_list(td); 1940 } 1941 } else { 1942 td = curthread; 1943 witness_list(td); 1944 } 1945} 1946 1947DB_SHOW_COMMAND(alllocks, db_witness_list_all) 1948{ 1949 struct thread *td; 1950 struct proc *p; 1951 1952 /* 1953 * It would be nice to list only threads and processes that actually 1954 * held sleep locks, but that information is currently not exported 1955 * by WITNESS. 1956 */ 1957 FOREACH_PROC_IN_SYSTEM(p) { 1958 if (!witness_proc_has_locks(p)) 1959 continue; 1960 FOREACH_THREAD_IN_PROC(p, td) { 1961 if (!witness_thread_has_locks(td)) 1962 continue; 1963 printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 1964 p->p_comm, td, td->td_tid); 1965 witness_list(td); 1966 } 1967 } 1968} 1969 1970DB_SHOW_COMMAND(witness, db_witness_display) 1971{ 1972 1973 witness_display(db_printf); 1974} 1975#endif 1976