subr_witness.c revision 149979
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of the `witness' lock verifier. Originally implemented for 34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 35 * classes in FreeBSD. 36 */ 37 38/* 39 * Main Entry: witness 40 * Pronunciation: 'wit-n&s 41 * Function: noun 42 * Etymology: Middle English witnesse, from Old English witnes knowledge, 43 * testimony, witness, from 2wit 44 * Date: before 12th century 45 * 1 : attestation of a fact or event : TESTIMONY 46 * 2 : one that gives evidence; specifically : one who testifies in 47 * a cause or before a judicial tribunal 48 * 3 : one asked to be present at a transaction so as to be able to 49 * testify to its having taken place 50 * 4 : one who has personal knowledge of something 51 * 5 a : something serving as evidence or proof : SIGN 52 * b : public affirmation by word or example of usually 53 * religious faith or conviction <the heroic witness to divine 54 * life -- Pilot> 55 * 6 capitalized : a member of the Jehovah's Witnesses 56 */ 57 58/* 59 * Special rules concerning Giant and lock orders: 60 * 61 * 1) Giant must be acquired before any other mutexes. Stated another way, 62 * no other mutex may be held when Giant is acquired. 63 * 64 * 2) Giant must be released when blocking on a sleepable lock. 65 * 66 * This rule is less obvious, but is a result of Giant providing the same 67 * semantics as spl(). Basically, when a thread sleeps, it must release 68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 69 * 2). 70 * 71 * 3) Giant may be acquired before or after sleepable locks. 72 * 73 * This rule is also not quite as obvious. Giant may be acquired after 74 * a sleepable lock because it is a non-sleepable lock and non-sleepable 75 * locks may always be acquired while holding a sleepable lock. The second 76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 80 * execute. Thus, acquiring Giant both before and after a sleepable lock 81 * will not result in a lock order reversal. 82 */ 83 84#include <sys/cdefs.h> 85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 149979 2005-09-11 07:57:06Z truckman $"); 86 87#include "opt_ddb.h" 88#include "opt_witness.h" 89 90#include <sys/param.h> 91#include <sys/bus.h> 92#include <sys/kdb.h> 93#include <sys/kernel.h> 94#include <sys/ktr.h> 95#include <sys/lock.h> 96#include <sys/malloc.h> 97#include <sys/mutex.h> 98#include <sys/proc.h> 99#include <sys/sysctl.h> 100#include <sys/systm.h> 101 102#include <ddb/ddb.h> 103 104#include <machine/stdarg.h> 105 106/* Define this to check for blessed mutexes */ 107#undef BLESSING 108 109#define WITNESS_COUNT 1024 110#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 111/* 112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 114 * probably be safe for the most part, but it's still a SWAG. 115 */ 116#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 117 118#define WITNESS_NCHILDREN 6 119 120struct witness_child_list_entry; 121 122struct witness { 123 const char *w_name; 124 struct lock_class *w_class; 125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 127 struct witness_child_list_entry *w_children; /* Great evilness... */ 128 const char *w_file; 129 int w_line; 130 u_int w_level; 131 u_int w_refcount; 132 u_char w_Giant_squawked:1; 133 u_char w_other_squawked:1; 134 u_char w_same_squawked:1; 135 u_char w_displayed:1; 136}; 137 138struct witness_child_list_entry { 139 struct witness_child_list_entry *wcl_next; 140 struct witness *wcl_children[WITNESS_NCHILDREN]; 141 u_int wcl_count; 142}; 143 144STAILQ_HEAD(witness_list, witness); 145 146#ifdef BLESSING 147struct witness_blessed { 148 const char *b_lock1; 149 const char *b_lock2; 150}; 151#endif 152 153struct witness_order_list_entry { 154 const char *w_name; 155 struct lock_class *w_class; 156}; 157 158#ifdef BLESSING 159static int blessed(struct witness *, struct witness *); 160#endif 161static int depart(struct witness *w); 162static struct witness *enroll(const char *description, 163 struct lock_class *lock_class); 164static int insertchild(struct witness *parent, struct witness *child); 165static int isitmychild(struct witness *parent, struct witness *child); 166static int isitmydescendant(struct witness *parent, struct witness *child); 167static int itismychild(struct witness *parent, struct witness *child); 168static void removechild(struct witness *parent, struct witness *child); 169static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 170static const char *fixup_filename(const char *file); 171static struct witness *witness_get(void); 172static void witness_free(struct witness *m); 173static struct witness_child_list_entry *witness_child_get(void); 174static void witness_child_free(struct witness_child_list_entry *wcl); 175static struct lock_list_entry *witness_lock_list_get(void); 176static void witness_lock_list_free(struct lock_list_entry *lle); 177static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 178 struct lock_object *lock); 179static void witness_list_lock(struct lock_instance *instance); 180#ifdef DDB 181static void witness_leveldescendents(struct witness *parent, int level); 182static void witness_levelall(void); 183static void witness_displaydescendants(void(*)(const char *fmt, ...), 184 struct witness *, int indent); 185static void witness_display_list(void(*prnt)(const char *fmt, ...), 186 struct witness_list *list); 187static void witness_display(void(*)(const char *fmt, ...)); 188static void witness_list(struct thread *td); 189#endif 190 191SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 192 193/* 194 * If set to 0, witness is disabled. If set to a non-zero value, witness 195 * performs full lock order checking for all locks. At runtime, this 196 * value may be set to 0 to turn off witness. witness is not allowed be 197 * turned on once it is turned off, however. 198 */ 199static int witness_watch = 1; 200TUNABLE_INT("debug.witness.watch", &witness_watch); 201SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 202 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 203 204#ifdef KDB 205/* 206 * When KDB is enabled and witness_kdb is set to 1, it will cause the system 207 * to drop into kdebug() when: 208 * - a lock heirarchy violation occurs 209 * - locks are held when going to sleep. 210 */ 211#ifdef WITNESS_KDB 212int witness_kdb = 1; 213#else 214int witness_kdb = 0; 215#endif 216TUNABLE_INT("debug.witness.kdb", &witness_kdb); 217SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 218 219/* 220 * When KDB is enabled and witness_trace is set to 1, it will cause the system 221 * to print a stack trace: 222 * - a lock heirarchy violation occurs 223 * - locks are held when going to sleep. 224 */ 225int witness_trace = 1; 226TUNABLE_INT("debug.witness.trace", &witness_trace); 227SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 228#endif /* KDB */ 229 230#ifdef WITNESS_SKIPSPIN 231int witness_skipspin = 1; 232#else 233int witness_skipspin = 0; 234#endif 235TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 236SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, 237 &witness_skipspin, 0, ""); 238 239static struct mtx w_mtx; 240static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 241static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 242static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 243static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 244static struct witness_child_list_entry *w_child_free = NULL; 245static struct lock_list_entry *w_lock_list_free = NULL; 246 247static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt; 248SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 249SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 250SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 251 ""); 252SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD, 253 &w_child_free_cnt, 0, ""); 254SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0, 255 ""); 256 257static struct witness w_data[WITNESS_COUNT]; 258static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 259static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 260 261static struct witness_order_list_entry order_lists[] = { 262 /* 263 * sx locks 264 */ 265 { "proctree", &lock_class_sx }, 266 { "allproc", &lock_class_sx }, 267 { NULL, NULL }, 268 /* 269 * Various mutexes 270 */ 271 { "Giant", &lock_class_mtx_sleep }, 272 { "filedesc structure", &lock_class_mtx_sleep }, 273 { "pipe mutex", &lock_class_mtx_sleep }, 274 { "sigio lock", &lock_class_mtx_sleep }, 275 { "process group", &lock_class_mtx_sleep }, 276 { "process lock", &lock_class_mtx_sleep }, 277 { "session", &lock_class_mtx_sleep }, 278 { "uidinfo hash", &lock_class_mtx_sleep }, 279 { "uidinfo struct", &lock_class_mtx_sleep }, 280 { "allprison", &lock_class_mtx_sleep }, 281 { NULL, NULL }, 282 /* 283 * Sockets 284 */ 285 { "filedesc structure", &lock_class_mtx_sleep }, 286 { "accept", &lock_class_mtx_sleep }, 287 { "so_snd", &lock_class_mtx_sleep }, 288 { "so_rcv", &lock_class_mtx_sleep }, 289 { "sellck", &lock_class_mtx_sleep }, 290 { NULL, NULL }, 291 /* 292 * Routing 293 */ 294 { "so_rcv", &lock_class_mtx_sleep }, 295 { "radix node head", &lock_class_mtx_sleep }, 296 { "rtentry", &lock_class_mtx_sleep }, 297 { "ifaddr", &lock_class_mtx_sleep }, 298 { NULL, NULL }, 299 /* 300 * Multicast - protocol locks before interface locks, after UDP locks. 301 */ 302 { "udpinp", &lock_class_mtx_sleep }, 303 { "in_multi_mtx", &lock_class_mtx_sleep }, 304 { "igmp_mtx", &lock_class_mtx_sleep }, 305 { "if_addr_mtx", &lock_class_mtx_sleep }, 306 { NULL, NULL }, 307 /* 308 * UNIX Domain Sockets 309 */ 310 { "unp", &lock_class_mtx_sleep }, 311 { "so_snd", &lock_class_mtx_sleep }, 312 { NULL, NULL }, 313 /* 314 * UDP/IP 315 */ 316 { "udp", &lock_class_mtx_sleep }, 317 { "udpinp", &lock_class_mtx_sleep }, 318 { "so_snd", &lock_class_mtx_sleep }, 319 { NULL, NULL }, 320 /* 321 * TCP/IP 322 */ 323 { "tcp", &lock_class_mtx_sleep }, 324 { "tcpinp", &lock_class_mtx_sleep }, 325 { "so_snd", &lock_class_mtx_sleep }, 326 { NULL, NULL }, 327 /* 328 * SLIP 329 */ 330 { "slip_mtx", &lock_class_mtx_sleep }, 331 { "slip sc_mtx", &lock_class_mtx_sleep }, 332 { NULL, NULL }, 333 /* 334 * netatalk 335 */ 336 { "ddp_list_mtx", &lock_class_mtx_sleep }, 337 { "ddp_mtx", &lock_class_mtx_sleep }, 338 { NULL, NULL }, 339 /* 340 * BPF 341 */ 342 { "bpf global lock", &lock_class_mtx_sleep }, 343 { "bpf interface lock", &lock_class_mtx_sleep }, 344 { "bpf cdev lock", &lock_class_mtx_sleep }, 345 { NULL, NULL }, 346 /* 347 * NFS server 348 */ 349 { "nfsd_mtx", &lock_class_mtx_sleep }, 350 { "so_snd", &lock_class_mtx_sleep }, 351 { NULL, NULL }, 352 /* 353 * CDEV 354 */ 355 { "system map", &lock_class_mtx_sleep }, 356 { "vm page queue mutex", &lock_class_mtx_sleep }, 357 { "vnode interlock", &lock_class_mtx_sleep }, 358 { "cdev", &lock_class_mtx_sleep }, 359 { NULL, NULL }, 360 /* 361 * spin locks 362 */ 363#ifdef SMP 364 { "ap boot", &lock_class_mtx_spin }, 365#endif 366 { "sio", &lock_class_mtx_spin }, 367#ifdef __i386__ 368 { "cy", &lock_class_mtx_spin }, 369#endif 370 { "uart_hwmtx", &lock_class_mtx_spin }, 371 { "sabtty", &lock_class_mtx_spin }, 372 { "zstty", &lock_class_mtx_spin }, 373 { "ng_node", &lock_class_mtx_spin }, 374 { "ng_worklist", &lock_class_mtx_spin }, 375 { "taskqueue_fast", &lock_class_mtx_spin }, 376 { "intr table", &lock_class_mtx_spin }, 377 { "ithread table lock", &lock_class_mtx_spin }, 378 { "sleepq chain", &lock_class_mtx_spin }, 379 { "sched lock", &lock_class_mtx_spin }, 380 { "turnstile chain", &lock_class_mtx_spin }, 381 { "td_contested", &lock_class_mtx_spin }, 382 { "callout", &lock_class_mtx_spin }, 383 { "entropy harvest mutex", &lock_class_mtx_spin }, 384 /* 385 * leaf locks 386 */ 387 { "allpmaps", &lock_class_mtx_spin }, 388 { "vm page queue free mutex", &lock_class_mtx_spin }, 389 { "icu", &lock_class_mtx_spin }, 390#ifdef SMP 391 { "smp rendezvous", &lock_class_mtx_spin }, 392#if defined(__i386__) || defined(__amd64__) 393 { "tlb", &lock_class_mtx_spin }, 394#endif 395#ifdef __sparc64__ 396 { "ipi", &lock_class_mtx_spin }, 397 { "rtc_mtx", &lock_class_mtx_spin }, 398#endif 399#endif 400 { "clk", &lock_class_mtx_spin }, 401 { "mutex profiling lock", &lock_class_mtx_spin }, 402 { "kse zombie lock", &lock_class_mtx_spin }, 403 { "ALD Queue", &lock_class_mtx_spin }, 404#ifdef __ia64__ 405 { "MCA spin lock", &lock_class_mtx_spin }, 406#endif 407#if defined(__i386__) || defined(__amd64__) 408 { "pcicfg", &lock_class_mtx_spin }, 409 { "NDIS thread lock", &lock_class_mtx_spin }, 410#endif 411 { "tw_osl_io_lock", &lock_class_mtx_spin }, 412 { "tw_osl_q_lock", &lock_class_mtx_spin }, 413 { "tw_cl_io_lock", &lock_class_mtx_spin }, 414 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 415 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 416 { NULL, NULL }, 417 { NULL, NULL } 418}; 419 420#ifdef BLESSING 421/* 422 * Pairs of locks which have been blessed 423 * Don't complain about order problems with blessed locks 424 */ 425static struct witness_blessed blessed_list[] = { 426}; 427static int blessed_count = 428 sizeof(blessed_list) / sizeof(struct witness_blessed); 429#endif 430 431/* 432 * List of all locks in the system. 433 */ 434TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks); 435 436static struct mtx all_mtx = { 437 { &lock_class_mtx_sleep, /* mtx_object.lo_class */ 438 "All locks list", /* mtx_object.lo_name */ 439 "All locks list", /* mtx_object.lo_type */ 440 LO_INITIALIZED, /* mtx_object.lo_flags */ 441 { NULL, NULL }, /* mtx_object.lo_list */ 442 NULL }, /* mtx_object.lo_witness */ 443 MTX_UNOWNED, 0 /* mtx_lock, mtx_recurse */ 444}; 445 446/* 447 * This global is set to 0 once it becomes safe to use the witness code. 448 */ 449static int witness_cold = 1; 450 451/* 452 * Global variables for book keeping. 453 */ 454static int lock_cur_cnt; 455static int lock_max_cnt; 456 457/* 458 * The WITNESS-enabled diagnostic code. 459 */ 460static void 461witness_initialize(void *dummy __unused) 462{ 463 struct lock_object *lock; 464 struct witness_order_list_entry *order; 465 struct witness *w, *w1; 466 int i; 467 468 /* 469 * We have to release Giant before initializing its witness 470 * structure so that WITNESS doesn't get confused. 471 */ 472 mtx_unlock(&Giant); 473 mtx_assert(&Giant, MA_NOTOWNED); 474 475 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 476 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list); 477 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 478 MTX_NOWITNESS); 479 for (i = 0; i < WITNESS_COUNT; i++) 480 witness_free(&w_data[i]); 481 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 482 witness_child_free(&w_childdata[i]); 483 for (i = 0; i < LOCK_CHILDCOUNT; i++) 484 witness_lock_list_free(&w_locklistdata[i]); 485 486 /* First add in all the specified order lists. */ 487 for (order = order_lists; order->w_name != NULL; order++) { 488 w = enroll(order->w_name, order->w_class); 489 if (w == NULL) 490 continue; 491 w->w_file = "order list"; 492 for (order++; order->w_name != NULL; order++) { 493 w1 = enroll(order->w_name, order->w_class); 494 if (w1 == NULL) 495 continue; 496 w1->w_file = "order list"; 497 if (!itismychild(w, w1)) 498 panic("Not enough memory for static orders!"); 499 w = w1; 500 } 501 } 502 503 /* Iterate through all locks and add them to witness. */ 504 mtx_lock(&all_mtx); 505 TAILQ_FOREACH(lock, &all_locks, lo_list) { 506 if (lock->lo_flags & LO_WITNESS) 507 lock->lo_witness = enroll(lock->lo_type, 508 lock->lo_class); 509 else 510 lock->lo_witness = NULL; 511 } 512 mtx_unlock(&all_mtx); 513 514 /* Mark the witness code as being ready for use. */ 515 atomic_store_rel_int(&witness_cold, 0); 516 517 mtx_lock(&Giant); 518} 519SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 520 521static int 522sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 523{ 524 int error, value; 525 526 value = witness_watch; 527 error = sysctl_handle_int(oidp, &value, 0, req); 528 if (error != 0 || req->newptr == NULL) 529 return (error); 530 error = suser(req->td); 531 if (error != 0) 532 return (error); 533 if (value == witness_watch) 534 return (0); 535 if (value != 0) 536 return (EINVAL); 537 witness_watch = 0; 538 return (0); 539} 540 541void 542witness_init(struct lock_object *lock) 543{ 544 struct lock_class *class; 545 546 class = lock->lo_class; 547 if (lock->lo_flags & LO_INITIALIZED) 548 panic("%s: lock (%s) %s is already initialized", __func__, 549 class->lc_name, lock->lo_name); 550 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 551 (class->lc_flags & LC_RECURSABLE) == 0) 552 panic("%s: lock (%s) %s can not be recursable", __func__, 553 class->lc_name, lock->lo_name); 554 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 555 (class->lc_flags & LC_SLEEPABLE) == 0) 556 panic("%s: lock (%s) %s can not be sleepable", __func__, 557 class->lc_name, lock->lo_name); 558 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 559 (class->lc_flags & LC_UPGRADABLE) == 0) 560 panic("%s: lock (%s) %s can not be upgradable", __func__, 561 class->lc_name, lock->lo_name); 562 563 mtx_lock(&all_mtx); 564 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list); 565 lock->lo_flags |= LO_INITIALIZED; 566 lock_cur_cnt++; 567 if (lock_cur_cnt > lock_max_cnt) 568 lock_max_cnt = lock_cur_cnt; 569 mtx_unlock(&all_mtx); 570 if (!witness_cold && witness_watch != 0 && panicstr == NULL && 571 (lock->lo_flags & LO_WITNESS) != 0) 572 lock->lo_witness = enroll(lock->lo_type, class); 573 else 574 lock->lo_witness = NULL; 575} 576 577void 578witness_destroy(struct lock_object *lock) 579{ 580 struct witness *w; 581 582 if (witness_cold) 583 panic("lock (%s) %s destroyed while witness_cold", 584 lock->lo_class->lc_name, lock->lo_name); 585 if ((lock->lo_flags & LO_INITIALIZED) == 0) 586 panic("%s: lock (%s) %s is not initialized", __func__, 587 lock->lo_class->lc_name, lock->lo_name); 588 589 /* XXX: need to verify that no one holds the lock */ 590 w = lock->lo_witness; 591 if (w != NULL) { 592 mtx_lock_spin(&w_mtx); 593 MPASS(w->w_refcount > 0); 594 w->w_refcount--; 595 596 /* 597 * Lock is already released if we have an allocation failure 598 * and depart() fails. 599 */ 600 if (w->w_refcount != 0 || depart(w)) 601 mtx_unlock_spin(&w_mtx); 602 } 603 604 mtx_lock(&all_mtx); 605 lock_cur_cnt--; 606 TAILQ_REMOVE(&all_locks, lock, lo_list); 607 lock->lo_flags &= ~LO_INITIALIZED; 608 mtx_unlock(&all_mtx); 609} 610 611#ifdef DDB 612static void 613witness_levelall (void) 614{ 615 struct witness_list *list; 616 struct witness *w, *w1; 617 618 /* 619 * First clear all levels. 620 */ 621 STAILQ_FOREACH(w, &w_all, w_list) { 622 w->w_level = 0; 623 } 624 625 /* 626 * Look for locks with no parent and level all their descendants. 627 */ 628 STAILQ_FOREACH(w, &w_all, w_list) { 629 /* 630 * This is just an optimization, technically we could get 631 * away just walking the all list each time. 632 */ 633 if (w->w_class->lc_flags & LC_SLEEPLOCK) 634 list = &w_sleep; 635 else 636 list = &w_spin; 637 STAILQ_FOREACH(w1, list, w_typelist) { 638 if (isitmychild(w1, w)) 639 goto skip; 640 } 641 witness_leveldescendents(w, 0); 642 skip: 643 ; /* silence GCC 3.x */ 644 } 645} 646 647static void 648witness_leveldescendents(struct witness *parent, int level) 649{ 650 struct witness_child_list_entry *wcl; 651 int i; 652 653 if (parent->w_level < level) 654 parent->w_level = level; 655 level++; 656 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 657 for (i = 0; i < wcl->wcl_count; i++) 658 witness_leveldescendents(wcl->wcl_children[i], level); 659} 660 661static void 662witness_displaydescendants(void(*prnt)(const char *fmt, ...), 663 struct witness *parent, int indent) 664{ 665 struct witness_child_list_entry *wcl; 666 int i, level; 667 668 level = parent->w_level; 669 prnt("%-2d", level); 670 for (i = 0; i < indent; i++) 671 prnt(" "); 672 if (parent->w_refcount > 0) 673 prnt("%s", parent->w_name); 674 else 675 prnt("(dead)"); 676 if (parent->w_displayed) { 677 prnt(" -- (already displayed)\n"); 678 return; 679 } 680 parent->w_displayed = 1; 681 if (parent->w_refcount > 0) { 682 if (parent->w_file != NULL) 683 prnt(" -- last acquired @ %s:%d", parent->w_file, 684 parent->w_line); 685 } 686 prnt("\n"); 687 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 688 for (i = 0; i < wcl->wcl_count; i++) 689 witness_displaydescendants(prnt, 690 wcl->wcl_children[i], indent + 1); 691} 692 693static void 694witness_display_list(void(*prnt)(const char *fmt, ...), 695 struct witness_list *list) 696{ 697 struct witness *w; 698 699 STAILQ_FOREACH(w, list, w_typelist) { 700 if (w->w_file == NULL || w->w_level > 0) 701 continue; 702 /* 703 * This lock has no anscestors, display its descendants. 704 */ 705 witness_displaydescendants(prnt, w, 0); 706 } 707} 708 709static void 710witness_display(void(*prnt)(const char *fmt, ...)) 711{ 712 struct witness *w; 713 714 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 715 witness_levelall(); 716 717 /* Clear all the displayed flags. */ 718 STAILQ_FOREACH(w, &w_all, w_list) { 719 w->w_displayed = 0; 720 } 721 722 /* 723 * First, handle sleep locks which have been acquired at least 724 * once. 725 */ 726 prnt("Sleep locks:\n"); 727 witness_display_list(prnt, &w_sleep); 728 729 /* 730 * Now do spin locks which have been acquired at least once. 731 */ 732 prnt("\nSpin locks:\n"); 733 witness_display_list(prnt, &w_spin); 734 735 /* 736 * Finally, any locks which have not been acquired yet. 737 */ 738 prnt("\nLocks which were never acquired:\n"); 739 STAILQ_FOREACH(w, &w_all, w_list) { 740 if (w->w_file != NULL || w->w_refcount == 0) 741 continue; 742 prnt("%s\n", w->w_name); 743 } 744} 745#endif /* DDB */ 746 747/* Trim useless garbage from filenames. */ 748static const char * 749fixup_filename(const char *file) 750{ 751 752 if (file == NULL) 753 return (NULL); 754 while (strncmp(file, "../", 3) == 0) 755 file += 3; 756 return (file); 757} 758 759int 760witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 761{ 762 763 if (witness_watch == 0 || panicstr != NULL) 764 return (0); 765 766 /* Require locks that witness knows about. */ 767 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 768 lock2->lo_witness == NULL) 769 return (EINVAL); 770 771 MPASS(!mtx_owned(&w_mtx)); 772 mtx_lock_spin(&w_mtx); 773 774 /* 775 * If we already have either an explicit or implied lock order that 776 * is the other way around, then return an error. 777 */ 778 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 779 mtx_unlock_spin(&w_mtx); 780 return (EDOOFUS); 781 } 782 783 /* Try to add the new order. */ 784 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 785 lock2->lo_type, lock1->lo_type); 786 if (!itismychild(lock1->lo_witness, lock2->lo_witness)) 787 return (ENOMEM); 788 mtx_unlock_spin(&w_mtx); 789 return (0); 790} 791 792void 793witness_checkorder(struct lock_object *lock, int flags, const char *file, 794 int line) 795{ 796 struct lock_list_entry **lock_list, *lle; 797 struct lock_instance *lock1, *lock2; 798 struct lock_class *class; 799 struct witness *w, *w1; 800 struct thread *td; 801 int i, j; 802 803 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 804 panicstr != NULL) 805 return; 806 807 /* 808 * Try locks do not block if they fail to acquire the lock, thus 809 * there is no danger of deadlocks or of switching while holding a 810 * spin lock if we acquire a lock via a try operation. This 811 * function shouldn't even be called for try locks, so panic if 812 * that happens. 813 */ 814 if (flags & LOP_TRYLOCK) 815 panic("%s should not be called for try lock operations", 816 __func__); 817 818 w = lock->lo_witness; 819 class = lock->lo_class; 820 td = curthread; 821 file = fixup_filename(file); 822 823 if (class->lc_flags & LC_SLEEPLOCK) { 824 /* 825 * Since spin locks include a critical section, this check 826 * implicitly enforces a lock order of all sleep locks before 827 * all spin locks. 828 */ 829 if (td->td_critnest != 0 && !kdb_active) 830 panic("blockable sleep lock (%s) %s @ %s:%d", 831 class->lc_name, lock->lo_name, file, line); 832 833 /* 834 * If this is the first lock acquired then just return as 835 * no order checking is needed. 836 */ 837 if (td->td_sleeplocks == NULL) 838 return; 839 lock_list = &td->td_sleeplocks; 840 } else { 841 /* 842 * If this is the first lock, just return as no order 843 * checking is needed. We check this in both if clauses 844 * here as unifying the check would require us to use a 845 * critical section to ensure we don't migrate while doing 846 * the check. Note that if this is not the first lock, we 847 * are already in a critical section and are safe for the 848 * rest of the check. 849 */ 850 if (PCPU_GET(spinlocks) == NULL) 851 return; 852 lock_list = PCPU_PTR(spinlocks); 853 } 854 855 /* 856 * Check to see if we are recursing on a lock we already own. If 857 * so, make sure that we don't mismatch exclusive and shared lock 858 * acquires. 859 */ 860 lock1 = find_instance(*lock_list, lock); 861 if (lock1 != NULL) { 862 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 863 (flags & LOP_EXCLUSIVE) == 0) { 864 printf("shared lock of (%s) %s @ %s:%d\n", 865 class->lc_name, lock->lo_name, file, line); 866 printf("while exclusively locked from %s:%d\n", 867 lock1->li_file, lock1->li_line); 868 panic("share->excl"); 869 } 870 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 871 (flags & LOP_EXCLUSIVE) != 0) { 872 printf("exclusive lock of (%s) %s @ %s:%d\n", 873 class->lc_name, lock->lo_name, file, line); 874 printf("while share locked from %s:%d\n", 875 lock1->li_file, lock1->li_line); 876 panic("excl->share"); 877 } 878 return; 879 } 880 881 /* 882 * Try locks do not block if they fail to acquire the lock, thus 883 * there is no danger of deadlocks or of switching while holding a 884 * spin lock if we acquire a lock via a try operation. 885 */ 886 if (flags & LOP_TRYLOCK) 887 return; 888 889 /* 890 * Check for duplicate locks of the same type. Note that we only 891 * have to check for this on the last lock we just acquired. Any 892 * other cases will be caught as lock order violations. 893 */ 894 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 895 w1 = lock1->li_lock->lo_witness; 896 if (w1 == w) { 897 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) || 898 (flags & LOP_DUPOK)) 899 return; 900 w->w_same_squawked = 1; 901 printf("acquiring duplicate lock of same type: \"%s\"\n", 902 lock->lo_type); 903 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 904 lock1->li_file, lock1->li_line); 905 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 906#ifdef KDB 907 goto debugger; 908#else 909 return; 910#endif 911 } 912 MPASS(!mtx_owned(&w_mtx)); 913 mtx_lock_spin(&w_mtx); 914 /* 915 * If we know that the the lock we are acquiring comes after 916 * the lock we most recently acquired in the lock order tree, 917 * then there is no need for any further checks. 918 */ 919 if (isitmychild(w1, w)) { 920 mtx_unlock_spin(&w_mtx); 921 return; 922 } 923 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 924 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 925 926 MPASS(j < WITNESS_COUNT); 927 lock1 = &lle->ll_children[i]; 928 w1 = lock1->li_lock->lo_witness; 929 930 /* 931 * If this lock doesn't undergo witness checking, 932 * then skip it. 933 */ 934 if (w1 == NULL) { 935 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 936 ("lock missing witness structure")); 937 continue; 938 } 939 /* 940 * If we are locking Giant and this is a sleepable 941 * lock, then skip it. 942 */ 943 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 944 lock == &Giant.mtx_object) 945 continue; 946 /* 947 * If we are locking a sleepable lock and this lock 948 * is Giant, then skip it. 949 */ 950 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 951 lock1->li_lock == &Giant.mtx_object) 952 continue; 953 /* 954 * If we are locking a sleepable lock and this lock 955 * isn't sleepable, we want to treat it as a lock 956 * order violation to enfore a general lock order of 957 * sleepable locks before non-sleepable locks. 958 */ 959 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 960 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 961 goto reversal; 962 /* 963 * Check the lock order hierarchy for a reveresal. 964 */ 965 if (!isitmydescendant(w, w1)) 966 continue; 967 reversal: 968 /* 969 * We have a lock order violation, check to see if it 970 * is allowed or has already been yelled about. 971 */ 972 mtx_unlock_spin(&w_mtx); 973#ifdef BLESSING 974 /* 975 * If the lock order is blessed, just bail. We don't 976 * look for other lock order violations though, which 977 * may be a bug. 978 */ 979 if (blessed(w, w1)) 980 return; 981#endif 982 if (lock1->li_lock == &Giant.mtx_object) { 983 if (w1->w_Giant_squawked) 984 return; 985 else 986 w1->w_Giant_squawked = 1; 987 } else { 988 if (w1->w_other_squawked) 989 return; 990 else 991 w1->w_other_squawked = 1; 992 } 993 /* 994 * Ok, yell about it. 995 */ 996 printf("lock order reversal\n"); 997 /* 998 * Try to locate an earlier lock with 999 * witness w in our list. 1000 */ 1001 do { 1002 lock2 = &lle->ll_children[i]; 1003 MPASS(lock2->li_lock != NULL); 1004 if (lock2->li_lock->lo_witness == w) 1005 break; 1006 if (i == 0 && lle->ll_next != NULL) { 1007 lle = lle->ll_next; 1008 i = lle->ll_count - 1; 1009 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1010 } else 1011 i--; 1012 } while (i >= 0); 1013 if (i < 0) { 1014 printf(" 1st %p %s (%s) @ %s:%d\n", 1015 lock1->li_lock, lock1->li_lock->lo_name, 1016 lock1->li_lock->lo_type, lock1->li_file, 1017 lock1->li_line); 1018 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1019 lock->lo_name, lock->lo_type, file, line); 1020 } else { 1021 printf(" 1st %p %s (%s) @ %s:%d\n", 1022 lock2->li_lock, lock2->li_lock->lo_name, 1023 lock2->li_lock->lo_type, lock2->li_file, 1024 lock2->li_line); 1025 printf(" 2nd %p %s (%s) @ %s:%d\n", 1026 lock1->li_lock, lock1->li_lock->lo_name, 1027 lock1->li_lock->lo_type, lock1->li_file, 1028 lock1->li_line); 1029 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1030 lock->lo_name, lock->lo_type, file, line); 1031 } 1032#ifdef KDB 1033 goto debugger; 1034#else 1035 return; 1036#endif 1037 } 1038 } 1039 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 1040 /* 1041 * If requested, build a new lock order. However, don't build a new 1042 * relationship between a sleepable lock and Giant if it is in the 1043 * wrong direction. The correct lock order is that sleepable locks 1044 * always come before Giant. 1045 */ 1046 if (flags & LOP_NEWORDER && 1047 !(lock1->li_lock == &Giant.mtx_object && 1048 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1049 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1050 lock->lo_type, lock1->li_lock->lo_type); 1051 if (!itismychild(lock1->li_lock->lo_witness, w)) 1052 /* Witness is dead. */ 1053 return; 1054 } 1055 mtx_unlock_spin(&w_mtx); 1056 return; 1057 1058#ifdef KDB 1059debugger: 1060 if (witness_trace) 1061 kdb_backtrace(); 1062 if (witness_kdb) 1063 kdb_enter(__func__); 1064#endif 1065} 1066 1067void 1068witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1069{ 1070 struct lock_list_entry **lock_list, *lle; 1071 struct lock_instance *instance; 1072 struct witness *w; 1073 struct thread *td; 1074 1075 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1076 panicstr != NULL) 1077 return; 1078 w = lock->lo_witness; 1079 td = curthread; 1080 file = fixup_filename(file); 1081 1082 /* Determine lock list for this lock. */ 1083 if (lock->lo_class->lc_flags & LC_SLEEPLOCK) 1084 lock_list = &td->td_sleeplocks; 1085 else 1086 lock_list = PCPU_PTR(spinlocks); 1087 1088 /* Check to see if we are recursing on a lock we already own. */ 1089 instance = find_instance(*lock_list, lock); 1090 if (instance != NULL) { 1091 instance->li_flags++; 1092 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1093 td->td_proc->p_pid, lock->lo_name, 1094 instance->li_flags & LI_RECURSEMASK); 1095 instance->li_file = file; 1096 instance->li_line = line; 1097 return; 1098 } 1099 1100 /* Update per-witness last file and line acquire. */ 1101 w->w_file = file; 1102 w->w_line = line; 1103 1104 /* Find the next open lock instance in the list and fill it. */ 1105 lle = *lock_list; 1106 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1107 lle = witness_lock_list_get(); 1108 if (lle == NULL) 1109 return; 1110 lle->ll_next = *lock_list; 1111 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1112 td->td_proc->p_pid, lle); 1113 *lock_list = lle; 1114 } 1115 instance = &lle->ll_children[lle->ll_count++]; 1116 instance->li_lock = lock; 1117 instance->li_line = line; 1118 instance->li_file = file; 1119 if ((flags & LOP_EXCLUSIVE) != 0) 1120 instance->li_flags = LI_EXCLUSIVE; 1121 else 1122 instance->li_flags = 0; 1123 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1124 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1125} 1126 1127void 1128witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1129{ 1130 struct lock_instance *instance; 1131 struct lock_class *class; 1132 1133 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1134 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1135 return; 1136 class = lock->lo_class; 1137 file = fixup_filename(file); 1138 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1139 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1140 class->lc_name, lock->lo_name, file, line); 1141 if ((flags & LOP_TRYLOCK) == 0) 1142 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 1143 lock->lo_name, file, line); 1144 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1145 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1146 class->lc_name, lock->lo_name, file, line); 1147 instance = find_instance(curthread->td_sleeplocks, lock); 1148 if (instance == NULL) 1149 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1150 class->lc_name, lock->lo_name, file, line); 1151 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1152 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1153 class->lc_name, lock->lo_name, file, line); 1154 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1155 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1156 class->lc_name, lock->lo_name, 1157 instance->li_flags & LI_RECURSEMASK, file, line); 1158 instance->li_flags |= LI_EXCLUSIVE; 1159} 1160 1161void 1162witness_downgrade(struct lock_object *lock, int flags, const char *file, 1163 int line) 1164{ 1165 struct lock_instance *instance; 1166 struct lock_class *class; 1167 1168 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1169 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1170 return; 1171 class = lock->lo_class; 1172 file = fixup_filename(file); 1173 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1174 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1175 class->lc_name, lock->lo_name, file, line); 1176 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1177 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1178 class->lc_name, lock->lo_name, file, line); 1179 instance = find_instance(curthread->td_sleeplocks, lock); 1180 if (instance == NULL) 1181 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1182 class->lc_name, lock->lo_name, file, line); 1183 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1184 panic("downgrade of shared lock (%s) %s @ %s:%d", 1185 class->lc_name, lock->lo_name, file, line); 1186 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1187 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1188 class->lc_name, lock->lo_name, 1189 instance->li_flags & LI_RECURSEMASK, file, line); 1190 instance->li_flags &= ~LI_EXCLUSIVE; 1191} 1192 1193void 1194witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1195{ 1196 struct lock_list_entry **lock_list, *lle; 1197 struct lock_instance *instance; 1198 struct lock_class *class; 1199 struct thread *td; 1200 register_t s; 1201 int i, j; 1202 1203 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1204 panicstr != NULL) 1205 return; 1206 td = curthread; 1207 class = lock->lo_class; 1208 file = fixup_filename(file); 1209 1210 /* Find lock instance associated with this lock. */ 1211 if (class->lc_flags & LC_SLEEPLOCK) 1212 lock_list = &td->td_sleeplocks; 1213 else 1214 lock_list = PCPU_PTR(spinlocks); 1215 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1216 for (i = 0; i < (*lock_list)->ll_count; i++) { 1217 instance = &(*lock_list)->ll_children[i]; 1218 if (instance->li_lock == lock) 1219 goto found; 1220 } 1221 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1222 file, line); 1223found: 1224 1225 /* First, check for shared/exclusive mismatches. */ 1226 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1227 (flags & LOP_EXCLUSIVE) == 0) { 1228 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1229 lock->lo_name, file, line); 1230 printf("while exclusively locked from %s:%d\n", 1231 instance->li_file, instance->li_line); 1232 panic("excl->ushare"); 1233 } 1234 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1235 (flags & LOP_EXCLUSIVE) != 0) { 1236 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1237 lock->lo_name, file, line); 1238 printf("while share locked from %s:%d\n", instance->li_file, 1239 instance->li_line); 1240 panic("share->uexcl"); 1241 } 1242 1243 /* If we are recursed, unrecurse. */ 1244 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1245 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1246 td->td_proc->p_pid, instance->li_lock->lo_name, 1247 instance->li_flags); 1248 instance->li_flags--; 1249 return; 1250 } 1251 1252 /* Otherwise, remove this item from the list. */ 1253 s = intr_disable(); 1254 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1255 td->td_proc->p_pid, instance->li_lock->lo_name, 1256 (*lock_list)->ll_count - 1); 1257 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1258 (*lock_list)->ll_children[j] = 1259 (*lock_list)->ll_children[j + 1]; 1260 (*lock_list)->ll_count--; 1261 intr_restore(s); 1262 1263 /* If this lock list entry is now empty, free it. */ 1264 if ((*lock_list)->ll_count == 0) { 1265 lle = *lock_list; 1266 *lock_list = lle->ll_next; 1267 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1268 td->td_proc->p_pid, lle); 1269 witness_lock_list_free(lle); 1270 } 1271} 1272 1273/* 1274 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1275 * exempt Giant and sleepable locks from the checks as well. If any 1276 * non-exempt locks are held, then a supplied message is printed to the 1277 * console along with a list of the offending locks. If indicated in the 1278 * flags then a failure results in a panic as well. 1279 */ 1280int 1281witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1282{ 1283 struct lock_list_entry *lle; 1284 struct lock_instance *lock1; 1285 struct thread *td; 1286 va_list ap; 1287 int i, n; 1288 1289 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1290 return (0); 1291 n = 0; 1292 td = curthread; 1293 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1294 for (i = lle->ll_count - 1; i >= 0; i--) { 1295 lock1 = &lle->ll_children[i]; 1296 if (lock1->li_lock == lock) 1297 continue; 1298 if (flags & WARN_GIANTOK && 1299 lock1->li_lock == &Giant.mtx_object) 1300 continue; 1301 if (flags & WARN_SLEEPOK && 1302 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1303 continue; 1304 if (n == 0) { 1305 va_start(ap, fmt); 1306 vprintf(fmt, ap); 1307 va_end(ap); 1308 printf(" with the following"); 1309 if (flags & WARN_SLEEPOK) 1310 printf(" non-sleepable"); 1311 printf(" locks held:\n"); 1312 } 1313 n++; 1314 witness_list_lock(lock1); 1315 } 1316 if (PCPU_GET(spinlocks) != NULL) { 1317 /* 1318 * Since we already hold a spinlock preemption is 1319 * already blocked. 1320 */ 1321 if (n == 0) { 1322 va_start(ap, fmt); 1323 vprintf(fmt, ap); 1324 va_end(ap); 1325 printf(" with the following"); 1326 if (flags & WARN_SLEEPOK) 1327 printf(" non-sleepable"); 1328 printf(" locks held:\n"); 1329 } 1330 n += witness_list_locks(PCPU_PTR(spinlocks)); 1331 } 1332 if (flags & WARN_PANIC && n) 1333 panic("witness_warn"); 1334#ifdef KDB 1335 else if (witness_kdb && n) 1336 kdb_enter(__func__); 1337 else if (witness_trace && n) 1338 kdb_backtrace(); 1339#endif 1340 return (n); 1341} 1342 1343const char * 1344witness_file(struct lock_object *lock) 1345{ 1346 struct witness *w; 1347 1348 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1349 return ("?"); 1350 w = lock->lo_witness; 1351 return (w->w_file); 1352} 1353 1354int 1355witness_line(struct lock_object *lock) 1356{ 1357 struct witness *w; 1358 1359 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1360 return (0); 1361 w = lock->lo_witness; 1362 return (w->w_line); 1363} 1364 1365static struct witness * 1366enroll(const char *description, struct lock_class *lock_class) 1367{ 1368 struct witness *w; 1369 1370 if (witness_watch == 0 || panicstr != NULL) 1371 return (NULL); 1372 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 1373 return (NULL); 1374 mtx_lock_spin(&w_mtx); 1375 STAILQ_FOREACH(w, &w_all, w_list) { 1376 if (w->w_name == description || (w->w_refcount > 0 && 1377 strcmp(description, w->w_name) == 0)) { 1378 w->w_refcount++; 1379 mtx_unlock_spin(&w_mtx); 1380 if (lock_class != w->w_class) 1381 panic( 1382 "lock (%s) %s does not match earlier (%s) lock", 1383 description, lock_class->lc_name, 1384 w->w_class->lc_name); 1385 return (w); 1386 } 1387 } 1388 /* 1389 * This isn't quite right, as witness_cold is still 0 while we 1390 * enroll all the locks initialized before witness_initialize(). 1391 */ 1392 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) { 1393 mtx_unlock_spin(&w_mtx); 1394 panic("spin lock %s not in order list", description); 1395 } 1396 if ((w = witness_get()) == NULL) 1397 return (NULL); 1398 w->w_name = description; 1399 w->w_class = lock_class; 1400 w->w_refcount = 1; 1401 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1402 if (lock_class->lc_flags & LC_SPINLOCK) { 1403 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1404 w_spin_cnt++; 1405 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1406 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1407 w_sleep_cnt++; 1408 } else { 1409 mtx_unlock_spin(&w_mtx); 1410 panic("lock class %s is not sleep or spin", 1411 lock_class->lc_name); 1412 } 1413 mtx_unlock_spin(&w_mtx); 1414 return (w); 1415} 1416 1417/* Don't let the door bang you on the way out... */ 1418static int 1419depart(struct witness *w) 1420{ 1421 struct witness_child_list_entry *wcl, *nwcl; 1422 struct witness_list *list; 1423 struct witness *parent; 1424 1425 MPASS(w->w_refcount == 0); 1426 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1427 list = &w_sleep; 1428 w_sleep_cnt--; 1429 } else { 1430 list = &w_spin; 1431 w_spin_cnt--; 1432 } 1433 /* 1434 * First, we run through the entire tree looking for any 1435 * witnesses that the outgoing witness is a child of. For 1436 * each parent that we find, we reparent all the direct 1437 * children of the outgoing witness to its parent. 1438 */ 1439 STAILQ_FOREACH(parent, list, w_typelist) { 1440 if (!isitmychild(parent, w)) 1441 continue; 1442 removechild(parent, w); 1443 } 1444 1445 /* 1446 * Now we go through and free up the child list of the 1447 * outgoing witness. 1448 */ 1449 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) { 1450 nwcl = wcl->wcl_next; 1451 w_child_cnt--; 1452 witness_child_free(wcl); 1453 } 1454 1455 /* 1456 * Detach from various lists and free. 1457 */ 1458 STAILQ_REMOVE(list, w, witness, w_typelist); 1459 STAILQ_REMOVE(&w_all, w, witness, w_list); 1460 witness_free(w); 1461 1462 return (1); 1463} 1464 1465/* 1466 * Add "child" as a direct child of "parent". Returns false if 1467 * we fail due to out of memory. 1468 */ 1469static int 1470insertchild(struct witness *parent, struct witness *child) 1471{ 1472 struct witness_child_list_entry **wcl; 1473 1474 MPASS(child != NULL && parent != NULL); 1475 1476 /* 1477 * Insert "child" after "parent" 1478 */ 1479 wcl = &parent->w_children; 1480 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1481 wcl = &(*wcl)->wcl_next; 1482 if (*wcl == NULL) { 1483 *wcl = witness_child_get(); 1484 if (*wcl == NULL) 1485 return (0); 1486 w_child_cnt++; 1487 } 1488 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1489 1490 return (1); 1491} 1492 1493 1494static int 1495itismychild(struct witness *parent, struct witness *child) 1496{ 1497 struct witness_list *list; 1498 1499 MPASS(child != NULL && parent != NULL); 1500 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1501 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1502 panic( 1503 "%s: parent (%s) and child (%s) are not the same lock type", 1504 __func__, parent->w_class->lc_name, 1505 child->w_class->lc_name); 1506 1507 if (!insertchild(parent, child)) 1508 return (0); 1509 1510 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1511 list = &w_sleep; 1512 else 1513 list = &w_spin; 1514 return (1); 1515} 1516 1517static void 1518removechild(struct witness *parent, struct witness *child) 1519{ 1520 struct witness_child_list_entry **wcl, *wcl1; 1521 int i; 1522 1523 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1524 for (i = 0; i < (*wcl)->wcl_count; i++) 1525 if ((*wcl)->wcl_children[i] == child) 1526 goto found; 1527 return; 1528found: 1529 (*wcl)->wcl_count--; 1530 if ((*wcl)->wcl_count > i) 1531 (*wcl)->wcl_children[i] = 1532 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1533 MPASS((*wcl)->wcl_children[i] != NULL); 1534 if ((*wcl)->wcl_count != 0) 1535 return; 1536 wcl1 = *wcl; 1537 *wcl = wcl1->wcl_next; 1538 w_child_cnt--; 1539 witness_child_free(wcl1); 1540} 1541 1542static int 1543isitmychild(struct witness *parent, struct witness *child) 1544{ 1545 struct witness_child_list_entry *wcl; 1546 int i; 1547 1548 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1549 for (i = 0; i < wcl->wcl_count; i++) { 1550 if (wcl->wcl_children[i] == child) 1551 return (1); 1552 } 1553 } 1554 return (0); 1555} 1556 1557static int 1558isitmydescendant(struct witness *parent, struct witness *child) 1559{ 1560 struct witness_child_list_entry *wcl; 1561 int i, j; 1562 1563 if (isitmychild(parent, child)) 1564 return (1); 1565 j = 0; 1566 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1567 MPASS(j < 1000); 1568 for (i = 0; i < wcl->wcl_count; i++) { 1569 if (isitmydescendant(wcl->wcl_children[i], child)) 1570 return (1); 1571 } 1572 j++; 1573 } 1574 return (0); 1575} 1576 1577#ifdef BLESSING 1578static int 1579blessed(struct witness *w1, struct witness *w2) 1580{ 1581 int i; 1582 struct witness_blessed *b; 1583 1584 for (i = 0; i < blessed_count; i++) { 1585 b = &blessed_list[i]; 1586 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1587 if (strcmp(w2->w_name, b->b_lock2) == 0) 1588 return (1); 1589 continue; 1590 } 1591 if (strcmp(w1->w_name, b->b_lock2) == 0) 1592 if (strcmp(w2->w_name, b->b_lock1) == 0) 1593 return (1); 1594 } 1595 return (0); 1596} 1597#endif 1598 1599static struct witness * 1600witness_get(void) 1601{ 1602 struct witness *w; 1603 1604 if (witness_watch == 0) { 1605 mtx_unlock_spin(&w_mtx); 1606 return (NULL); 1607 } 1608 if (STAILQ_EMPTY(&w_free)) { 1609 witness_watch = 0; 1610 mtx_unlock_spin(&w_mtx); 1611 printf("%s: witness exhausted\n", __func__); 1612 return (NULL); 1613 } 1614 w = STAILQ_FIRST(&w_free); 1615 STAILQ_REMOVE_HEAD(&w_free, w_list); 1616 w_free_cnt--; 1617 bzero(w, sizeof(*w)); 1618 return (w); 1619} 1620 1621static void 1622witness_free(struct witness *w) 1623{ 1624 1625 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1626 w_free_cnt++; 1627} 1628 1629static struct witness_child_list_entry * 1630witness_child_get(void) 1631{ 1632 struct witness_child_list_entry *wcl; 1633 1634 if (witness_watch == 0) { 1635 mtx_unlock_spin(&w_mtx); 1636 return (NULL); 1637 } 1638 wcl = w_child_free; 1639 if (wcl == NULL) { 1640 witness_watch = 0; 1641 mtx_unlock_spin(&w_mtx); 1642 printf("%s: witness exhausted\n", __func__); 1643 return (NULL); 1644 } 1645 w_child_free = wcl->wcl_next; 1646 w_child_free_cnt--; 1647 bzero(wcl, sizeof(*wcl)); 1648 return (wcl); 1649} 1650 1651static void 1652witness_child_free(struct witness_child_list_entry *wcl) 1653{ 1654 1655 wcl->wcl_next = w_child_free; 1656 w_child_free = wcl; 1657 w_child_free_cnt++; 1658} 1659 1660static struct lock_list_entry * 1661witness_lock_list_get(void) 1662{ 1663 struct lock_list_entry *lle; 1664 1665 if (witness_watch == 0) 1666 return (NULL); 1667 mtx_lock_spin(&w_mtx); 1668 lle = w_lock_list_free; 1669 if (lle == NULL) { 1670 witness_watch = 0; 1671 mtx_unlock_spin(&w_mtx); 1672 printf("%s: witness exhausted\n", __func__); 1673 return (NULL); 1674 } 1675 w_lock_list_free = lle->ll_next; 1676 mtx_unlock_spin(&w_mtx); 1677 bzero(lle, sizeof(*lle)); 1678 return (lle); 1679} 1680 1681static void 1682witness_lock_list_free(struct lock_list_entry *lle) 1683{ 1684 1685 mtx_lock_spin(&w_mtx); 1686 lle->ll_next = w_lock_list_free; 1687 w_lock_list_free = lle; 1688 mtx_unlock_spin(&w_mtx); 1689} 1690 1691static struct lock_instance * 1692find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1693{ 1694 struct lock_list_entry *lle; 1695 struct lock_instance *instance; 1696 int i; 1697 1698 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1699 for (i = lle->ll_count - 1; i >= 0; i--) { 1700 instance = &lle->ll_children[i]; 1701 if (instance->li_lock == lock) 1702 return (instance); 1703 } 1704 return (NULL); 1705} 1706 1707static void 1708witness_list_lock(struct lock_instance *instance) 1709{ 1710 struct lock_object *lock; 1711 1712 lock = instance->li_lock; 1713 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1714 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name); 1715 if (lock->lo_type != lock->lo_name) 1716 printf(" (%s)", lock->lo_type); 1717 printf(" r = %d (%p) locked @ %s:%d\n", 1718 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1719 instance->li_line); 1720} 1721 1722#ifdef DDB 1723static int 1724witness_thread_has_locks(struct thread *td) 1725{ 1726 1727 return (td->td_sleeplocks != NULL); 1728} 1729 1730static int 1731witness_proc_has_locks(struct proc *p) 1732{ 1733 struct thread *td; 1734 1735 FOREACH_THREAD_IN_PROC(p, td) { 1736 if (witness_thread_has_locks(td)) 1737 return (1); 1738 } 1739 return (0); 1740} 1741#endif 1742 1743int 1744witness_list_locks(struct lock_list_entry **lock_list) 1745{ 1746 struct lock_list_entry *lle; 1747 int i, nheld; 1748 1749 nheld = 0; 1750 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1751 for (i = lle->ll_count - 1; i >= 0; i--) { 1752 witness_list_lock(&lle->ll_children[i]); 1753 nheld++; 1754 } 1755 return (nheld); 1756} 1757 1758/* 1759 * This is a bit risky at best. We call this function when we have timed 1760 * out acquiring a spin lock, and we assume that the other CPU is stuck 1761 * with this lock held. So, we go groveling around in the other CPU's 1762 * per-cpu data to try to find the lock instance for this spin lock to 1763 * see when it was last acquired. 1764 */ 1765void 1766witness_display_spinlock(struct lock_object *lock, struct thread *owner) 1767{ 1768 struct lock_instance *instance; 1769 struct pcpu *pc; 1770 1771 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 1772 return; 1773 pc = pcpu_find(owner->td_oncpu); 1774 instance = find_instance(pc->pc_spinlocks, lock); 1775 if (instance != NULL) 1776 witness_list_lock(instance); 1777} 1778 1779void 1780witness_save(struct lock_object *lock, const char **filep, int *linep) 1781{ 1782 struct lock_instance *instance; 1783 1784 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1785 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1786 return; 1787 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1788 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1789 lock->lo_class->lc_name, lock->lo_name); 1790 instance = find_instance(curthread->td_sleeplocks, lock); 1791 if (instance == NULL) 1792 panic("%s: lock (%s) %s not locked", __func__, 1793 lock->lo_class->lc_name, lock->lo_name); 1794 *filep = instance->li_file; 1795 *linep = instance->li_line; 1796} 1797 1798void 1799witness_restore(struct lock_object *lock, const char *file, int line) 1800{ 1801 struct lock_instance *instance; 1802 1803 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1804 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1805 return; 1806 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1807 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1808 lock->lo_class->lc_name, lock->lo_name); 1809 instance = find_instance(curthread->td_sleeplocks, lock); 1810 if (instance == NULL) 1811 panic("%s: lock (%s) %s not locked", __func__, 1812 lock->lo_class->lc_name, lock->lo_name); 1813 lock->lo_witness->w_file = file; 1814 lock->lo_witness->w_line = line; 1815 instance->li_file = file; 1816 instance->li_line = line; 1817} 1818 1819void 1820witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1821{ 1822#ifdef INVARIANT_SUPPORT 1823 struct lock_instance *instance; 1824 1825 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1826 return; 1827 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0) 1828 instance = find_instance(curthread->td_sleeplocks, lock); 1829 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0) 1830 instance = find_instance(PCPU_GET(spinlocks), lock); 1831 else { 1832 panic("Lock (%s) %s is not sleep or spin!", 1833 lock->lo_class->lc_name, lock->lo_name); 1834 } 1835 file = fixup_filename(file); 1836 switch (flags) { 1837 case LA_UNLOCKED: 1838 if (instance != NULL) 1839 panic("Lock (%s) %s locked @ %s:%d.", 1840 lock->lo_class->lc_name, lock->lo_name, file, line); 1841 break; 1842 case LA_LOCKED: 1843 case LA_LOCKED | LA_RECURSED: 1844 case LA_LOCKED | LA_NOTRECURSED: 1845 case LA_SLOCKED: 1846 case LA_SLOCKED | LA_RECURSED: 1847 case LA_SLOCKED | LA_NOTRECURSED: 1848 case LA_XLOCKED: 1849 case LA_XLOCKED | LA_RECURSED: 1850 case LA_XLOCKED | LA_NOTRECURSED: 1851 if (instance == NULL) { 1852 panic("Lock (%s) %s not locked @ %s:%d.", 1853 lock->lo_class->lc_name, lock->lo_name, file, line); 1854 break; 1855 } 1856 if ((flags & LA_XLOCKED) != 0 && 1857 (instance->li_flags & LI_EXCLUSIVE) == 0) 1858 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1859 lock->lo_class->lc_name, lock->lo_name, file, line); 1860 if ((flags & LA_SLOCKED) != 0 && 1861 (instance->li_flags & LI_EXCLUSIVE) != 0) 1862 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1863 lock->lo_class->lc_name, lock->lo_name, file, line); 1864 if ((flags & LA_RECURSED) != 0 && 1865 (instance->li_flags & LI_RECURSEMASK) == 0) 1866 panic("Lock (%s) %s not recursed @ %s:%d.", 1867 lock->lo_class->lc_name, lock->lo_name, file, line); 1868 if ((flags & LA_NOTRECURSED) != 0 && 1869 (instance->li_flags & LI_RECURSEMASK) != 0) 1870 panic("Lock (%s) %s recursed @ %s:%d.", 1871 lock->lo_class->lc_name, lock->lo_name, file, line); 1872 break; 1873 default: 1874 panic("Invalid lock assertion at %s:%d.", file, line); 1875 1876 } 1877#endif /* INVARIANT_SUPPORT */ 1878} 1879 1880#ifdef DDB 1881static void 1882witness_list(struct thread *td) 1883{ 1884 1885 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1886 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 1887 1888 if (witness_watch == 0) 1889 return; 1890 1891 witness_list_locks(&td->td_sleeplocks); 1892 1893 /* 1894 * We only handle spinlocks if td == curthread. This is somewhat broken 1895 * if td is currently executing on some other CPU and holds spin locks 1896 * as we won't display those locks. If we had a MI way of getting 1897 * the per-cpu data for a given cpu then we could use 1898 * td->td_oncpu to get the list of spinlocks for this thread 1899 * and "fix" this. 1900 * 1901 * That still wouldn't really fix this unless we locked sched_lock 1902 * or stopped the other CPU to make sure it wasn't changing the list 1903 * out from under us. It is probably best to just not try to handle 1904 * threads on other CPU's for now. 1905 */ 1906 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1907 witness_list_locks(PCPU_PTR(spinlocks)); 1908} 1909 1910DB_SHOW_COMMAND(locks, db_witness_list) 1911{ 1912 struct thread *td; 1913 pid_t pid; 1914 struct proc *p; 1915 1916 if (have_addr) { 1917 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1918 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1919 ((addr >> 16) % 16) * 10000; 1920 /* sx_slock(&allproc_lock); */ 1921 FOREACH_PROC_IN_SYSTEM(p) { 1922 if (p->p_pid == pid) 1923 break; 1924 } 1925 /* sx_sunlock(&allproc_lock); */ 1926 if (p == NULL) { 1927 db_printf("pid %d not found\n", pid); 1928 return; 1929 } 1930 FOREACH_THREAD_IN_PROC(p, td) { 1931 witness_list(td); 1932 } 1933 } else { 1934 td = curthread; 1935 witness_list(td); 1936 } 1937} 1938 1939DB_SHOW_COMMAND(alllocks, db_witness_list_all) 1940{ 1941 struct thread *td; 1942 struct proc *p; 1943 1944 /* 1945 * It would be nice to list only threads and processes that actually 1946 * held sleep locks, but that information is currently not exported 1947 * by WITNESS. 1948 */ 1949 FOREACH_PROC_IN_SYSTEM(p) { 1950 if (!witness_proc_has_locks(p)) 1951 continue; 1952 FOREACH_THREAD_IN_PROC(p, td) { 1953 if (!witness_thread_has_locks(td)) 1954 continue; 1955 printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 1956 p->p_comm, td, td->td_tid); 1957 witness_list(td); 1958 } 1959 } 1960} 1961 1962DB_SHOW_COMMAND(witness, db_witness_display) 1963{ 1964 1965 witness_display(db_printf); 1966} 1967#endif 1968