subr_witness.c revision 154484
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Implementation of the `witness' lock verifier. Originally implemented for 34 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 35 * classes in FreeBSD. 36 */ 37 38/* 39 * Main Entry: witness 40 * Pronunciation: 'wit-n&s 41 * Function: noun 42 * Etymology: Middle English witnesse, from Old English witnes knowledge, 43 * testimony, witness, from 2wit 44 * Date: before 12th century 45 * 1 : attestation of a fact or event : TESTIMONY 46 * 2 : one that gives evidence; specifically : one who testifies in 47 * a cause or before a judicial tribunal 48 * 3 : one asked to be present at a transaction so as to be able to 49 * testify to its having taken place 50 * 4 : one who has personal knowledge of something 51 * 5 a : something serving as evidence or proof : SIGN 52 * b : public affirmation by word or example of usually 53 * religious faith or conviction <the heroic witness to divine 54 * life -- Pilot> 55 * 6 capitalized : a member of the Jehovah's Witnesses 56 */ 57 58/* 59 * Special rules concerning Giant and lock orders: 60 * 61 * 1) Giant must be acquired before any other mutexes. Stated another way, 62 * no other mutex may be held when Giant is acquired. 63 * 64 * 2) Giant must be released when blocking on a sleepable lock. 65 * 66 * This rule is less obvious, but is a result of Giant providing the same 67 * semantics as spl(). Basically, when a thread sleeps, it must release 68 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 69 * 2). 70 * 71 * 3) Giant may be acquired before or after sleepable locks. 72 * 73 * This rule is also not quite as obvious. Giant may be acquired after 74 * a sleepable lock because it is a non-sleepable lock and non-sleepable 75 * locks may always be acquired while holding a sleepable lock. The second 76 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 77 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 78 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 79 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 80 * execute. Thus, acquiring Giant both before and after a sleepable lock 81 * will not result in a lock order reversal. 82 */ 83 84#include <sys/cdefs.h> 85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 154484 2006-01-17 16:55:17Z jhb $"); 86 87#include "opt_ddb.h" 88#include "opt_witness.h" 89 90#include <sys/param.h> 91#include <sys/bus.h> 92#include <sys/kdb.h> 93#include <sys/kernel.h> 94#include <sys/ktr.h> 95#include <sys/lock.h> 96#include <sys/malloc.h> 97#include <sys/mutex.h> 98#include <sys/proc.h> 99#include <sys/sysctl.h> 100#include <sys/systm.h> 101 102#include <ddb/ddb.h> 103 104#include <machine/stdarg.h> 105 106/* Easier to stay with the old names. */ 107#define lo_list lo_witness_data.lod_list 108#define lo_witness lo_witness_data.lod_witness 109 110/* Define this to check for blessed mutexes */ 111#undef BLESSING 112 113#define WITNESS_COUNT 1024 114#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 115/* 116 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 117 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 118 * probably be safe for the most part, but it's still a SWAG. 119 */ 120#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 121 122#define WITNESS_NCHILDREN 6 123 124struct witness_child_list_entry; 125 126struct witness { 127 const char *w_name; 128 struct lock_class *w_class; 129 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 130 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 131 struct witness_child_list_entry *w_children; /* Great evilness... */ 132 const char *w_file; 133 int w_line; 134 u_int w_level; 135 u_int w_refcount; 136 u_char w_Giant_squawked:1; 137 u_char w_other_squawked:1; 138 u_char w_same_squawked:1; 139 u_char w_displayed:1; 140}; 141 142struct witness_child_list_entry { 143 struct witness_child_list_entry *wcl_next; 144 struct witness *wcl_children[WITNESS_NCHILDREN]; 145 u_int wcl_count; 146}; 147 148STAILQ_HEAD(witness_list, witness); 149 150#ifdef BLESSING 151struct witness_blessed { 152 const char *b_lock1; 153 const char *b_lock2; 154}; 155#endif 156 157struct witness_order_list_entry { 158 const char *w_name; 159 struct lock_class *w_class; 160}; 161 162#ifdef BLESSING 163static int blessed(struct witness *, struct witness *); 164#endif 165static int depart(struct witness *w); 166static struct witness *enroll(const char *description, 167 struct lock_class *lock_class); 168static int insertchild(struct witness *parent, struct witness *child); 169static int isitmychild(struct witness *parent, struct witness *child); 170static int isitmydescendant(struct witness *parent, struct witness *child); 171static int itismychild(struct witness *parent, struct witness *child); 172static void removechild(struct witness *parent, struct witness *child); 173static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 174static const char *fixup_filename(const char *file); 175static struct witness *witness_get(void); 176static void witness_free(struct witness *m); 177static struct witness_child_list_entry *witness_child_get(void); 178static void witness_child_free(struct witness_child_list_entry *wcl); 179static struct lock_list_entry *witness_lock_list_get(void); 180static void witness_lock_list_free(struct lock_list_entry *lle); 181static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 182 struct lock_object *lock); 183static void witness_list_lock(struct lock_instance *instance); 184#ifdef DDB 185static void witness_leveldescendents(struct witness *parent, int level); 186static void witness_levelall(void); 187static void witness_displaydescendants(void(*)(const char *fmt, ...), 188 struct witness *, int indent); 189static void witness_display_list(void(*prnt)(const char *fmt, ...), 190 struct witness_list *list); 191static void witness_display(void(*)(const char *fmt, ...)); 192static void witness_list(struct thread *td); 193#endif 194 195SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 196 197/* 198 * If set to 0, witness is disabled. If set to a non-zero value, witness 199 * performs full lock order checking for all locks. At runtime, this 200 * value may be set to 0 to turn off witness. witness is not allowed be 201 * turned on once it is turned off, however. 202 */ 203static int witness_watch = 1; 204TUNABLE_INT("debug.witness.watch", &witness_watch); 205SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 206 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 207 208#ifdef KDB 209/* 210 * When KDB is enabled and witness_kdb is set to 1, it will cause the system 211 * to drop into kdebug() when: 212 * - a lock hierarchy violation occurs 213 * - locks are held when going to sleep. 214 */ 215#ifdef WITNESS_KDB 216int witness_kdb = 1; 217#else 218int witness_kdb = 0; 219#endif 220TUNABLE_INT("debug.witness.kdb", &witness_kdb); 221SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 222 223/* 224 * When KDB is enabled and witness_trace is set to 1, it will cause the system 225 * to print a stack trace: 226 * - a lock hierarchy violation occurs 227 * - locks are held when going to sleep. 228 */ 229int witness_trace = 1; 230TUNABLE_INT("debug.witness.trace", &witness_trace); 231SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 232#endif /* KDB */ 233 234#ifdef WITNESS_SKIPSPIN 235int witness_skipspin = 1; 236#else 237int witness_skipspin = 0; 238#endif 239TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 240SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, 241 &witness_skipspin, 0, ""); 242 243static struct mtx w_mtx; 244static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 245static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 246static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 247static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 248static struct witness_child_list_entry *w_child_free = NULL; 249static struct lock_list_entry *w_lock_list_free = NULL; 250 251static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt; 252SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 253SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 254SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 255 ""); 256SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD, 257 &w_child_free_cnt, 0, ""); 258SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0, 259 ""); 260 261static struct witness w_data[WITNESS_COUNT]; 262static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 263static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 264 265static struct witness_order_list_entry order_lists[] = { 266 /* 267 * sx locks 268 */ 269 { "proctree", &lock_class_sx }, 270 { "allproc", &lock_class_sx }, 271 { NULL, NULL }, 272 /* 273 * Various mutexes 274 */ 275 { "Giant", &lock_class_mtx_sleep }, 276 { "filedesc structure", &lock_class_mtx_sleep }, 277 { "pipe mutex", &lock_class_mtx_sleep }, 278 { "sigio lock", &lock_class_mtx_sleep }, 279 { "process group", &lock_class_mtx_sleep }, 280 { "process lock", &lock_class_mtx_sleep }, 281 { "session", &lock_class_mtx_sleep }, 282 { "uidinfo hash", &lock_class_mtx_sleep }, 283 { "uidinfo struct", &lock_class_mtx_sleep }, 284 { "allprison", &lock_class_mtx_sleep }, 285 { NULL, NULL }, 286 /* 287 * Sockets 288 */ 289 { "filedesc structure", &lock_class_mtx_sleep }, 290 { "accept", &lock_class_mtx_sleep }, 291 { "so_snd", &lock_class_mtx_sleep }, 292 { "so_rcv", &lock_class_mtx_sleep }, 293 { "sellck", &lock_class_mtx_sleep }, 294 { NULL, NULL }, 295 /* 296 * Routing 297 */ 298 { "so_rcv", &lock_class_mtx_sleep }, 299 { "radix node head", &lock_class_mtx_sleep }, 300 { "rtentry", &lock_class_mtx_sleep }, 301 { "ifaddr", &lock_class_mtx_sleep }, 302 { NULL, NULL }, 303 /* 304 * Multicast - protocol locks before interface locks, after UDP locks. 305 */ 306 { "udpinp", &lock_class_mtx_sleep }, 307 { "in_multi_mtx", &lock_class_mtx_sleep }, 308 { "igmp_mtx", &lock_class_mtx_sleep }, 309 { "if_addr_mtx", &lock_class_mtx_sleep }, 310 { NULL, NULL }, 311 /* 312 * UNIX Domain Sockets 313 */ 314 { "unp", &lock_class_mtx_sleep }, 315 { "so_snd", &lock_class_mtx_sleep }, 316 { NULL, NULL }, 317 /* 318 * UDP/IP 319 */ 320 { "udp", &lock_class_mtx_sleep }, 321 { "udpinp", &lock_class_mtx_sleep }, 322 { "so_snd", &lock_class_mtx_sleep }, 323 { NULL, NULL }, 324 /* 325 * TCP/IP 326 */ 327 { "tcp", &lock_class_mtx_sleep }, 328 { "tcpinp", &lock_class_mtx_sleep }, 329 { "so_snd", &lock_class_mtx_sleep }, 330 { NULL, NULL }, 331 /* 332 * SLIP 333 */ 334 { "slip_mtx", &lock_class_mtx_sleep }, 335 { "slip sc_mtx", &lock_class_mtx_sleep }, 336 { NULL, NULL }, 337 /* 338 * netatalk 339 */ 340 { "ddp_list_mtx", &lock_class_mtx_sleep }, 341 { "ddp_mtx", &lock_class_mtx_sleep }, 342 { NULL, NULL }, 343 /* 344 * BPF 345 */ 346 { "bpf global lock", &lock_class_mtx_sleep }, 347 { "bpf interface lock", &lock_class_mtx_sleep }, 348 { "bpf cdev lock", &lock_class_mtx_sleep }, 349 { NULL, NULL }, 350 /* 351 * NFS server 352 */ 353 { "nfsd_mtx", &lock_class_mtx_sleep }, 354 { "so_snd", &lock_class_mtx_sleep }, 355 { NULL, NULL }, 356 /* 357 * CDEV 358 */ 359 { "system map", &lock_class_mtx_sleep }, 360 { "vm page queue mutex", &lock_class_mtx_sleep }, 361 { "vnode interlock", &lock_class_mtx_sleep }, 362 { "cdev", &lock_class_mtx_sleep }, 363 { NULL, NULL }, 364 /* 365 * spin locks 366 */ 367#ifdef SMP 368 { "ap boot", &lock_class_mtx_spin }, 369#endif 370 { "rm.mutex_mtx", &lock_class_mtx_spin }, 371 { "hptlock", &lock_class_mtx_spin }, 372 { "sio", &lock_class_mtx_spin }, 373#ifdef __i386__ 374 { "cy", &lock_class_mtx_spin }, 375#endif 376 { "uart_hwmtx", &lock_class_mtx_spin }, 377 { "sabtty", &lock_class_mtx_spin }, 378 { "zstty", &lock_class_mtx_spin }, 379 { "ng_node", &lock_class_mtx_spin }, 380 { "ng_worklist", &lock_class_mtx_spin }, 381 { "taskqueue_fast", &lock_class_mtx_spin }, 382 { "intr table", &lock_class_mtx_spin }, 383 { "sleepq chain", &lock_class_mtx_spin }, 384 { "sched lock", &lock_class_mtx_spin }, 385 { "turnstile chain", &lock_class_mtx_spin }, 386 { "td_contested", &lock_class_mtx_spin }, 387 { "callout", &lock_class_mtx_spin }, 388 { "entropy harvest mutex", &lock_class_mtx_spin }, 389 /* 390 * leaf locks 391 */ 392 { "allpmaps", &lock_class_mtx_spin }, 393 { "vm page queue free mutex", &lock_class_mtx_spin }, 394 { "icu", &lock_class_mtx_spin }, 395#ifdef SMP 396 { "smp rendezvous", &lock_class_mtx_spin }, 397#if defined(__i386__) || defined(__amd64__) 398 { "tlb", &lock_class_mtx_spin }, 399#endif 400#ifdef __sparc64__ 401 { "ipi", &lock_class_mtx_spin }, 402 { "rtc_mtx", &lock_class_mtx_spin }, 403#endif 404#endif 405 { "clk", &lock_class_mtx_spin }, 406 { "mutex profiling lock", &lock_class_mtx_spin }, 407 { "kse zombie lock", &lock_class_mtx_spin }, 408 { "ALD Queue", &lock_class_mtx_spin }, 409#ifdef __ia64__ 410 { "MCA spin lock", &lock_class_mtx_spin }, 411#endif 412#if defined(__i386__) || defined(__amd64__) 413 { "pcicfg", &lock_class_mtx_spin }, 414 { "NDIS thread lock", &lock_class_mtx_spin }, 415#endif 416 { "tw_osl_io_lock", &lock_class_mtx_spin }, 417 { "tw_osl_q_lock", &lock_class_mtx_spin }, 418 { "tw_cl_io_lock", &lock_class_mtx_spin }, 419 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 420 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 421 { NULL, NULL }, 422 { NULL, NULL } 423}; 424 425#ifdef BLESSING 426/* 427 * Pairs of locks which have been blessed 428 * Don't complain about order problems with blessed locks 429 */ 430static struct witness_blessed blessed_list[] = { 431}; 432static int blessed_count = 433 sizeof(blessed_list) / sizeof(struct witness_blessed); 434#endif 435 436/* 437 * List of locks initialized prior to witness being initialized whose 438 * enrollment is currently deferred. 439 */ 440STAILQ_HEAD(, lock_object) pending_locks = 441 STAILQ_HEAD_INITIALIZER(pending_locks); 442 443/* 444 * This global is set to 0 once it becomes safe to use the witness code. 445 */ 446static int witness_cold = 1; 447 448/* 449 * This global is set to 1 once the static lock orders have been enrolled 450 * so that a warning can be issued for any spin locks enrolled later. 451 */ 452static int witness_spin_warn = 0; 453 454/* 455 * The WITNESS-enabled diagnostic code. Note that the witness code does 456 * assume that the early boot is single-threaded at least until after this 457 * routine is completed. 458 */ 459static void 460witness_initialize(void *dummy __unused) 461{ 462 struct lock_object *lock; 463 struct witness_order_list_entry *order; 464 struct witness *w, *w1; 465 int i; 466 467 /* 468 * We have to release Giant before initializing its witness 469 * structure so that WITNESS doesn't get confused. 470 */ 471 mtx_unlock(&Giant); 472 mtx_assert(&Giant, MA_NOTOWNED); 473 474 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 475 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 476 MTX_NOWITNESS); 477 for (i = 0; i < WITNESS_COUNT; i++) 478 witness_free(&w_data[i]); 479 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 480 witness_child_free(&w_childdata[i]); 481 for (i = 0; i < LOCK_CHILDCOUNT; i++) 482 witness_lock_list_free(&w_locklistdata[i]); 483 484 /* First add in all the specified order lists. */ 485 for (order = order_lists; order->w_name != NULL; order++) { 486 w = enroll(order->w_name, order->w_class); 487 if (w == NULL) 488 continue; 489 w->w_file = "order list"; 490 for (order++; order->w_name != NULL; order++) { 491 w1 = enroll(order->w_name, order->w_class); 492 if (w1 == NULL) 493 continue; 494 w1->w_file = "order list"; 495 if (!itismychild(w, w1)) 496 panic("Not enough memory for static orders!"); 497 w = w1; 498 } 499 } 500 witness_spin_warn = 1; 501 502 /* Iterate through all locks and add them to witness. */ 503 while (!STAILQ_EMPTY(&pending_locks)) { 504 lock = STAILQ_FIRST(&pending_locks); 505 STAILQ_REMOVE_HEAD(&pending_locks, lo_list); 506 KASSERT(lock->lo_flags & LO_WITNESS, 507 ("%s: lock %s is on pending list but not LO_WITNESS", 508 __func__, lock->lo_name)); 509 lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock)); 510 } 511 512 /* Mark the witness code as being ready for use. */ 513 witness_cold = 0; 514 515 mtx_lock(&Giant); 516} 517SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 518 519static int 520sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 521{ 522 int error, value; 523 524 value = witness_watch; 525 error = sysctl_handle_int(oidp, &value, 0, req); 526 if (error != 0 || req->newptr == NULL) 527 return (error); 528 error = suser(req->td); 529 if (error != 0) 530 return (error); 531 if (value == witness_watch) 532 return (0); 533 if (value != 0) 534 return (EINVAL); 535 witness_watch = 0; 536 return (0); 537} 538 539void 540witness_init(struct lock_object *lock) 541{ 542 struct lock_class *class; 543 544 /* Various sanity checks. */ 545 class = LOCK_CLASS(lock); 546 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 547 (class->lc_flags & LC_RECURSABLE) == 0) 548 panic("%s: lock (%s) %s can not be recursable", __func__, 549 class->lc_name, lock->lo_name); 550 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 551 (class->lc_flags & LC_SLEEPABLE) == 0) 552 panic("%s: lock (%s) %s can not be sleepable", __func__, 553 class->lc_name, lock->lo_name); 554 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 555 (class->lc_flags & LC_UPGRADABLE) == 0) 556 panic("%s: lock (%s) %s can not be upgradable", __func__, 557 class->lc_name, lock->lo_name); 558 559 /* 560 * If we shouldn't watch this lock, then just clear lo_witness. 561 * Otherwise, if witness_cold is set, then it is too early to 562 * enroll this lock, so defer it to witness_initialize() by adding 563 * it to the pending_locks list. If it is not too early, then enroll 564 * the lock now. 565 */ 566 if (witness_watch == 0 || panicstr != NULL || 567 (lock->lo_flags & LO_WITNESS) == 0) 568 lock->lo_witness = NULL; 569 else if (witness_cold) { 570 STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list); 571 lock->lo_flags |= LO_ENROLLPEND; 572 } else 573 lock->lo_witness = enroll(lock->lo_type, class); 574} 575 576void 577witness_destroy(struct lock_object *lock) 578{ 579 struct lock_class *class; 580 struct witness *w; 581 582 class = LOCK_CLASS(lock); 583 if (witness_cold) 584 panic("lock (%s) %s destroyed while witness_cold", 585 class->lc_name, lock->lo_name); 586 587 /* XXX: need to verify that no one holds the lock */ 588 if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS && 589 lock->lo_witness != NULL) { 590 w = lock->lo_witness; 591 mtx_lock_spin(&w_mtx); 592 MPASS(w->w_refcount > 0); 593 w->w_refcount--; 594 595 /* 596 * Lock is already released if we have an allocation failure 597 * and depart() fails. 598 */ 599 if (w->w_refcount != 0 || depart(w)) 600 mtx_unlock_spin(&w_mtx); 601 } 602 603 /* 604 * If this lock is destroyed before witness is up and running, 605 * remove it from the pending list. 606 */ 607 if (lock->lo_flags & LO_ENROLLPEND) { 608 STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list); 609 lock->lo_flags &= ~LO_ENROLLPEND; 610 } 611} 612 613#ifdef DDB 614static void 615witness_levelall (void) 616{ 617 struct witness_list *list; 618 struct witness *w, *w1; 619 620 /* 621 * First clear all levels. 622 */ 623 STAILQ_FOREACH(w, &w_all, w_list) { 624 w->w_level = 0; 625 } 626 627 /* 628 * Look for locks with no parent and level all their descendants. 629 */ 630 STAILQ_FOREACH(w, &w_all, w_list) { 631 /* 632 * This is just an optimization, technically we could get 633 * away just walking the all list each time. 634 */ 635 if (w->w_class->lc_flags & LC_SLEEPLOCK) 636 list = &w_sleep; 637 else 638 list = &w_spin; 639 STAILQ_FOREACH(w1, list, w_typelist) { 640 if (isitmychild(w1, w)) 641 goto skip; 642 } 643 witness_leveldescendents(w, 0); 644 skip: 645 ; /* silence GCC 3.x */ 646 } 647} 648 649static void 650witness_leveldescendents(struct witness *parent, int level) 651{ 652 struct witness_child_list_entry *wcl; 653 int i; 654 655 if (parent->w_level < level) 656 parent->w_level = level; 657 level++; 658 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 659 for (i = 0; i < wcl->wcl_count; i++) 660 witness_leveldescendents(wcl->wcl_children[i], level); 661} 662 663static void 664witness_displaydescendants(void(*prnt)(const char *fmt, ...), 665 struct witness *parent, int indent) 666{ 667 struct witness_child_list_entry *wcl; 668 int i, level; 669 670 level = parent->w_level; 671 prnt("%-2d", level); 672 for (i = 0; i < indent; i++) 673 prnt(" "); 674 if (parent->w_refcount > 0) 675 prnt("%s", parent->w_name); 676 else 677 prnt("(dead)"); 678 if (parent->w_displayed) { 679 prnt(" -- (already displayed)\n"); 680 return; 681 } 682 parent->w_displayed = 1; 683 if (parent->w_refcount > 0) { 684 if (parent->w_file != NULL) 685 prnt(" -- last acquired @ %s:%d", parent->w_file, 686 parent->w_line); 687 } 688 prnt("\n"); 689 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 690 for (i = 0; i < wcl->wcl_count; i++) 691 witness_displaydescendants(prnt, 692 wcl->wcl_children[i], indent + 1); 693} 694 695static void 696witness_display_list(void(*prnt)(const char *fmt, ...), 697 struct witness_list *list) 698{ 699 struct witness *w; 700 701 STAILQ_FOREACH(w, list, w_typelist) { 702 if (w->w_file == NULL || w->w_level > 0) 703 continue; 704 /* 705 * This lock has no anscestors, display its descendants. 706 */ 707 witness_displaydescendants(prnt, w, 0); 708 } 709} 710 711static void 712witness_display(void(*prnt)(const char *fmt, ...)) 713{ 714 struct witness *w; 715 716 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 717 witness_levelall(); 718 719 /* Clear all the displayed flags. */ 720 STAILQ_FOREACH(w, &w_all, w_list) { 721 w->w_displayed = 0; 722 } 723 724 /* 725 * First, handle sleep locks which have been acquired at least 726 * once. 727 */ 728 prnt("Sleep locks:\n"); 729 witness_display_list(prnt, &w_sleep); 730 731 /* 732 * Now do spin locks which have been acquired at least once. 733 */ 734 prnt("\nSpin locks:\n"); 735 witness_display_list(prnt, &w_spin); 736 737 /* 738 * Finally, any locks which have not been acquired yet. 739 */ 740 prnt("\nLocks which were never acquired:\n"); 741 STAILQ_FOREACH(w, &w_all, w_list) { 742 if (w->w_file != NULL || w->w_refcount == 0) 743 continue; 744 prnt("%s\n", w->w_name); 745 } 746} 747#endif /* DDB */ 748 749/* Trim useless garbage from filenames. */ 750static const char * 751fixup_filename(const char *file) 752{ 753 754 if (file == NULL) 755 return (NULL); 756 while (strncmp(file, "../", 3) == 0) 757 file += 3; 758 return (file); 759} 760 761int 762witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 763{ 764 765 if (witness_watch == 0 || panicstr != NULL) 766 return (0); 767 768 /* Require locks that witness knows about. */ 769 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 770 lock2->lo_witness == NULL) 771 return (EINVAL); 772 773 MPASS(!mtx_owned(&w_mtx)); 774 mtx_lock_spin(&w_mtx); 775 776 /* 777 * If we already have either an explicit or implied lock order that 778 * is the other way around, then return an error. 779 */ 780 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 781 mtx_unlock_spin(&w_mtx); 782 return (EDOOFUS); 783 } 784 785 /* Try to add the new order. */ 786 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 787 lock2->lo_type, lock1->lo_type); 788 if (!itismychild(lock1->lo_witness, lock2->lo_witness)) 789 return (ENOMEM); 790 mtx_unlock_spin(&w_mtx); 791 return (0); 792} 793 794void 795witness_checkorder(struct lock_object *lock, int flags, const char *file, 796 int line) 797{ 798 struct lock_list_entry **lock_list, *lle; 799 struct lock_instance *lock1, *lock2; 800 struct lock_class *class; 801 struct witness *w, *w1; 802 struct thread *td; 803 int i, j; 804 805 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 806 panicstr != NULL) 807 return; 808 809 /* 810 * Try locks do not block if they fail to acquire the lock, thus 811 * there is no danger of deadlocks or of switching while holding a 812 * spin lock if we acquire a lock via a try operation. This 813 * function shouldn't even be called for try locks, so panic if 814 * that happens. 815 */ 816 if (flags & LOP_TRYLOCK) 817 panic("%s should not be called for try lock operations", 818 __func__); 819 820 w = lock->lo_witness; 821 class = LOCK_CLASS(lock); 822 td = curthread; 823 file = fixup_filename(file); 824 825 if (class->lc_flags & LC_SLEEPLOCK) { 826 /* 827 * Since spin locks include a critical section, this check 828 * implicitly enforces a lock order of all sleep locks before 829 * all spin locks. 830 */ 831 if (td->td_critnest != 0 && !kdb_active) 832 panic("blockable sleep lock (%s) %s @ %s:%d", 833 class->lc_name, lock->lo_name, file, line); 834 835 /* 836 * If this is the first lock acquired then just return as 837 * no order checking is needed. 838 */ 839 if (td->td_sleeplocks == NULL) 840 return; 841 lock_list = &td->td_sleeplocks; 842 } else { 843 /* 844 * If this is the first lock, just return as no order 845 * checking is needed. We check this in both if clauses 846 * here as unifying the check would require us to use a 847 * critical section to ensure we don't migrate while doing 848 * the check. Note that if this is not the first lock, we 849 * are already in a critical section and are safe for the 850 * rest of the check. 851 */ 852 if (PCPU_GET(spinlocks) == NULL) 853 return; 854 lock_list = PCPU_PTR(spinlocks); 855 } 856 857 /* 858 * Check to see if we are recursing on a lock we already own. If 859 * so, make sure that we don't mismatch exclusive and shared lock 860 * acquires. 861 */ 862 lock1 = find_instance(*lock_list, lock); 863 if (lock1 != NULL) { 864 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 865 (flags & LOP_EXCLUSIVE) == 0) { 866 printf("shared lock of (%s) %s @ %s:%d\n", 867 class->lc_name, lock->lo_name, file, line); 868 printf("while exclusively locked from %s:%d\n", 869 lock1->li_file, lock1->li_line); 870 panic("share->excl"); 871 } 872 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 873 (flags & LOP_EXCLUSIVE) != 0) { 874 printf("exclusive lock of (%s) %s @ %s:%d\n", 875 class->lc_name, lock->lo_name, file, line); 876 printf("while share locked from %s:%d\n", 877 lock1->li_file, lock1->li_line); 878 panic("excl->share"); 879 } 880 return; 881 } 882 883 /* 884 * Try locks do not block if they fail to acquire the lock, thus 885 * there is no danger of deadlocks or of switching while holding a 886 * spin lock if we acquire a lock via a try operation. 887 */ 888 if (flags & LOP_TRYLOCK) 889 return; 890 891 /* 892 * Check for duplicate locks of the same type. Note that we only 893 * have to check for this on the last lock we just acquired. Any 894 * other cases will be caught as lock order violations. 895 */ 896 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 897 w1 = lock1->li_lock->lo_witness; 898 if (w1 == w) { 899 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) || 900 (flags & LOP_DUPOK)) 901 return; 902 w->w_same_squawked = 1; 903 printf("acquiring duplicate lock of same type: \"%s\"\n", 904 lock->lo_type); 905 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 906 lock1->li_file, lock1->li_line); 907 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 908#ifdef KDB 909 goto debugger; 910#else 911 return; 912#endif 913 } 914 MPASS(!mtx_owned(&w_mtx)); 915 mtx_lock_spin(&w_mtx); 916 /* 917 * If we know that the the lock we are acquiring comes after 918 * the lock we most recently acquired in the lock order tree, 919 * then there is no need for any further checks. 920 */ 921 if (isitmychild(w1, w)) { 922 mtx_unlock_spin(&w_mtx); 923 return; 924 } 925 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 926 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 927 928 MPASS(j < WITNESS_COUNT); 929 lock1 = &lle->ll_children[i]; 930 w1 = lock1->li_lock->lo_witness; 931 932 /* 933 * If this lock doesn't undergo witness checking, 934 * then skip it. 935 */ 936 if (w1 == NULL) { 937 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 938 ("lock missing witness structure")); 939 continue; 940 } 941 /* 942 * If we are locking Giant and this is a sleepable 943 * lock, then skip it. 944 */ 945 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 946 lock == &Giant.mtx_object) 947 continue; 948 /* 949 * If we are locking a sleepable lock and this lock 950 * is Giant, then skip it. 951 */ 952 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 953 lock1->li_lock == &Giant.mtx_object) 954 continue; 955 /* 956 * If we are locking a sleepable lock and this lock 957 * isn't sleepable, we want to treat it as a lock 958 * order violation to enfore a general lock order of 959 * sleepable locks before non-sleepable locks. 960 */ 961 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 962 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 963 goto reversal; 964 /* 965 * If we are locking Giant and this is a non-sleepable 966 * lock, then treat it as a reversal. 967 */ 968 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 969 lock == &Giant.mtx_object) 970 goto reversal; 971 /* 972 * Check the lock order hierarchy for a reveresal. 973 */ 974 if (!isitmydescendant(w, w1)) 975 continue; 976 reversal: 977 /* 978 * We have a lock order violation, check to see if it 979 * is allowed or has already been yelled about. 980 */ 981 mtx_unlock_spin(&w_mtx); 982#ifdef BLESSING 983 /* 984 * If the lock order is blessed, just bail. We don't 985 * look for other lock order violations though, which 986 * may be a bug. 987 */ 988 if (blessed(w, w1)) 989 return; 990#endif 991 if (lock1->li_lock == &Giant.mtx_object) { 992 if (w1->w_Giant_squawked) 993 return; 994 else 995 w1->w_Giant_squawked = 1; 996 } else { 997 if (w1->w_other_squawked) 998 return; 999 else 1000 w1->w_other_squawked = 1; 1001 } 1002 /* 1003 * Ok, yell about it. 1004 */ 1005 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1006 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1007 printf( 1008 "lock order reversal: (sleepable after non-sleepable)\n"); 1009 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1010 && lock == &Giant.mtx_object) 1011 printf( 1012 "lock order reversal: (Giant after non-sleepable)\n"); 1013 else 1014 printf("lock order reversal:\n"); 1015 /* 1016 * Try to locate an earlier lock with 1017 * witness w in our list. 1018 */ 1019 do { 1020 lock2 = &lle->ll_children[i]; 1021 MPASS(lock2->li_lock != NULL); 1022 if (lock2->li_lock->lo_witness == w) 1023 break; 1024 if (i == 0 && lle->ll_next != NULL) { 1025 lle = lle->ll_next; 1026 i = lle->ll_count - 1; 1027 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1028 } else 1029 i--; 1030 } while (i >= 0); 1031 if (i < 0) { 1032 printf(" 1st %p %s (%s) @ %s:%d\n", 1033 lock1->li_lock, lock1->li_lock->lo_name, 1034 lock1->li_lock->lo_type, lock1->li_file, 1035 lock1->li_line); 1036 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1037 lock->lo_name, lock->lo_type, file, line); 1038 } else { 1039 printf(" 1st %p %s (%s) @ %s:%d\n", 1040 lock2->li_lock, lock2->li_lock->lo_name, 1041 lock2->li_lock->lo_type, lock2->li_file, 1042 lock2->li_line); 1043 printf(" 2nd %p %s (%s) @ %s:%d\n", 1044 lock1->li_lock, lock1->li_lock->lo_name, 1045 lock1->li_lock->lo_type, lock1->li_file, 1046 lock1->li_line); 1047 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1048 lock->lo_name, lock->lo_type, file, line); 1049 } 1050#ifdef KDB 1051 goto debugger; 1052#else 1053 return; 1054#endif 1055 } 1056 } 1057 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 1058 /* 1059 * If requested, build a new lock order. However, don't build a new 1060 * relationship between a sleepable lock and Giant if it is in the 1061 * wrong direction. The correct lock order is that sleepable locks 1062 * always come before Giant. 1063 */ 1064 if (flags & LOP_NEWORDER && 1065 !(lock1->li_lock == &Giant.mtx_object && 1066 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1067 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1068 lock->lo_type, lock1->li_lock->lo_type); 1069 if (!itismychild(lock1->li_lock->lo_witness, w)) 1070 /* Witness is dead. */ 1071 return; 1072 } 1073 mtx_unlock_spin(&w_mtx); 1074 return; 1075 1076#ifdef KDB 1077debugger: 1078 if (witness_trace) 1079 kdb_backtrace(); 1080 if (witness_kdb) 1081 kdb_enter(__func__); 1082#endif 1083} 1084 1085void 1086witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1087{ 1088 struct lock_list_entry **lock_list, *lle; 1089 struct lock_instance *instance; 1090 struct witness *w; 1091 struct thread *td; 1092 1093 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1094 panicstr != NULL) 1095 return; 1096 w = lock->lo_witness; 1097 td = curthread; 1098 file = fixup_filename(file); 1099 1100 /* Determine lock list for this lock. */ 1101 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1102 lock_list = &td->td_sleeplocks; 1103 else 1104 lock_list = PCPU_PTR(spinlocks); 1105 1106 /* Check to see if we are recursing on a lock we already own. */ 1107 instance = find_instance(*lock_list, lock); 1108 if (instance != NULL) { 1109 instance->li_flags++; 1110 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1111 td->td_proc->p_pid, lock->lo_name, 1112 instance->li_flags & LI_RECURSEMASK); 1113 instance->li_file = file; 1114 instance->li_line = line; 1115 return; 1116 } 1117 1118 /* Update per-witness last file and line acquire. */ 1119 w->w_file = file; 1120 w->w_line = line; 1121 1122 /* Find the next open lock instance in the list and fill it. */ 1123 lle = *lock_list; 1124 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1125 lle = witness_lock_list_get(); 1126 if (lle == NULL) 1127 return; 1128 lle->ll_next = *lock_list; 1129 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1130 td->td_proc->p_pid, lle); 1131 *lock_list = lle; 1132 } 1133 instance = &lle->ll_children[lle->ll_count++]; 1134 instance->li_lock = lock; 1135 instance->li_line = line; 1136 instance->li_file = file; 1137 if ((flags & LOP_EXCLUSIVE) != 0) 1138 instance->li_flags = LI_EXCLUSIVE; 1139 else 1140 instance->li_flags = 0; 1141 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1142 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1143} 1144 1145void 1146witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1147{ 1148 struct lock_instance *instance; 1149 struct lock_class *class; 1150 1151 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1152 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1153 return; 1154 class = LOCK_CLASS(lock); 1155 file = fixup_filename(file); 1156 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1157 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1158 class->lc_name, lock->lo_name, file, line); 1159 if ((flags & LOP_TRYLOCK) == 0) 1160 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 1161 lock->lo_name, file, line); 1162 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1163 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1164 class->lc_name, lock->lo_name, file, line); 1165 instance = find_instance(curthread->td_sleeplocks, lock); 1166 if (instance == NULL) 1167 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1168 class->lc_name, lock->lo_name, file, line); 1169 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1170 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1171 class->lc_name, lock->lo_name, file, line); 1172 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1173 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1174 class->lc_name, lock->lo_name, 1175 instance->li_flags & LI_RECURSEMASK, file, line); 1176 instance->li_flags |= LI_EXCLUSIVE; 1177} 1178 1179void 1180witness_downgrade(struct lock_object *lock, int flags, const char *file, 1181 int line) 1182{ 1183 struct lock_instance *instance; 1184 struct lock_class *class; 1185 1186 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1187 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1188 return; 1189 class = LOCK_CLASS(lock); 1190 file = fixup_filename(file); 1191 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1192 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1193 class->lc_name, lock->lo_name, file, line); 1194 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1195 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1196 class->lc_name, lock->lo_name, file, line); 1197 instance = find_instance(curthread->td_sleeplocks, lock); 1198 if (instance == NULL) 1199 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1200 class->lc_name, lock->lo_name, file, line); 1201 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1202 panic("downgrade of shared lock (%s) %s @ %s:%d", 1203 class->lc_name, lock->lo_name, file, line); 1204 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1205 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1206 class->lc_name, lock->lo_name, 1207 instance->li_flags & LI_RECURSEMASK, file, line); 1208 instance->li_flags &= ~LI_EXCLUSIVE; 1209} 1210 1211void 1212witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1213{ 1214 struct lock_list_entry **lock_list, *lle; 1215 struct lock_instance *instance; 1216 struct lock_class *class; 1217 struct thread *td; 1218 register_t s; 1219 int i, j; 1220 1221 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1222 panicstr != NULL) 1223 return; 1224 td = curthread; 1225 class = LOCK_CLASS(lock); 1226 file = fixup_filename(file); 1227 1228 /* Find lock instance associated with this lock. */ 1229 if (class->lc_flags & LC_SLEEPLOCK) 1230 lock_list = &td->td_sleeplocks; 1231 else 1232 lock_list = PCPU_PTR(spinlocks); 1233 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1234 for (i = 0; i < (*lock_list)->ll_count; i++) { 1235 instance = &(*lock_list)->ll_children[i]; 1236 if (instance->li_lock == lock) 1237 goto found; 1238 } 1239 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1240 file, line); 1241found: 1242 1243 /* First, check for shared/exclusive mismatches. */ 1244 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1245 (flags & LOP_EXCLUSIVE) == 0) { 1246 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1247 lock->lo_name, file, line); 1248 printf("while exclusively locked from %s:%d\n", 1249 instance->li_file, instance->li_line); 1250 panic("excl->ushare"); 1251 } 1252 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1253 (flags & LOP_EXCLUSIVE) != 0) { 1254 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1255 lock->lo_name, file, line); 1256 printf("while share locked from %s:%d\n", instance->li_file, 1257 instance->li_line); 1258 panic("share->uexcl"); 1259 } 1260 1261 /* If we are recursed, unrecurse. */ 1262 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1263 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1264 td->td_proc->p_pid, instance->li_lock->lo_name, 1265 instance->li_flags); 1266 instance->li_flags--; 1267 return; 1268 } 1269 1270 /* Otherwise, remove this item from the list. */ 1271 s = intr_disable(); 1272 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1273 td->td_proc->p_pid, instance->li_lock->lo_name, 1274 (*lock_list)->ll_count - 1); 1275 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1276 (*lock_list)->ll_children[j] = 1277 (*lock_list)->ll_children[j + 1]; 1278 (*lock_list)->ll_count--; 1279 intr_restore(s); 1280 1281 /* If this lock list entry is now empty, free it. */ 1282 if ((*lock_list)->ll_count == 0) { 1283 lle = *lock_list; 1284 *lock_list = lle->ll_next; 1285 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1286 td->td_proc->p_pid, lle); 1287 witness_lock_list_free(lle); 1288 } 1289} 1290 1291/* 1292 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1293 * exempt Giant and sleepable locks from the checks as well. If any 1294 * non-exempt locks are held, then a supplied message is printed to the 1295 * console along with a list of the offending locks. If indicated in the 1296 * flags then a failure results in a panic as well. 1297 */ 1298int 1299witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1300{ 1301 struct lock_list_entry *lle; 1302 struct lock_instance *lock1; 1303 struct thread *td; 1304 va_list ap; 1305 int i, n; 1306 1307 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1308 return (0); 1309 n = 0; 1310 td = curthread; 1311 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1312 for (i = lle->ll_count - 1; i >= 0; i--) { 1313 lock1 = &lle->ll_children[i]; 1314 if (lock1->li_lock == lock) 1315 continue; 1316 if (flags & WARN_GIANTOK && 1317 lock1->li_lock == &Giant.mtx_object) 1318 continue; 1319 if (flags & WARN_SLEEPOK && 1320 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1321 continue; 1322 if (n == 0) { 1323 va_start(ap, fmt); 1324 vprintf(fmt, ap); 1325 va_end(ap); 1326 printf(" with the following"); 1327 if (flags & WARN_SLEEPOK) 1328 printf(" non-sleepable"); 1329 printf(" locks held:\n"); 1330 } 1331 n++; 1332 witness_list_lock(lock1); 1333 } 1334 if (PCPU_GET(spinlocks) != NULL) { 1335 /* 1336 * Since we already hold a spinlock preemption is 1337 * already blocked. 1338 */ 1339 if (n == 0) { 1340 va_start(ap, fmt); 1341 vprintf(fmt, ap); 1342 va_end(ap); 1343 printf(" with the following"); 1344 if (flags & WARN_SLEEPOK) 1345 printf(" non-sleepable"); 1346 printf(" locks held:\n"); 1347 } 1348 n += witness_list_locks(PCPU_PTR(spinlocks)); 1349 } 1350 if (flags & WARN_PANIC && n) 1351 panic("witness_warn"); 1352#ifdef KDB 1353 else if (witness_kdb && n) 1354 kdb_enter(__func__); 1355 else if (witness_trace && n) 1356 kdb_backtrace(); 1357#endif 1358 return (n); 1359} 1360 1361const char * 1362witness_file(struct lock_object *lock) 1363{ 1364 struct witness *w; 1365 1366 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1367 return ("?"); 1368 w = lock->lo_witness; 1369 return (w->w_file); 1370} 1371 1372int 1373witness_line(struct lock_object *lock) 1374{ 1375 struct witness *w; 1376 1377 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1378 return (0); 1379 w = lock->lo_witness; 1380 return (w->w_line); 1381} 1382 1383static struct witness * 1384enroll(const char *description, struct lock_class *lock_class) 1385{ 1386 struct witness *w; 1387 1388 if (witness_watch == 0 || panicstr != NULL) 1389 return (NULL); 1390 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 1391 return (NULL); 1392 mtx_lock_spin(&w_mtx); 1393 STAILQ_FOREACH(w, &w_all, w_list) { 1394 if (w->w_name == description || (w->w_refcount > 0 && 1395 strcmp(description, w->w_name) == 0)) { 1396 w->w_refcount++; 1397 mtx_unlock_spin(&w_mtx); 1398 if (lock_class != w->w_class) 1399 panic( 1400 "lock (%s) %s does not match earlier (%s) lock", 1401 description, lock_class->lc_name, 1402 w->w_class->lc_name); 1403 return (w); 1404 } 1405 } 1406 if ((w = witness_get()) == NULL) 1407 goto out; 1408 w->w_name = description; 1409 w->w_class = lock_class; 1410 w->w_refcount = 1; 1411 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1412 if (lock_class->lc_flags & LC_SPINLOCK) { 1413 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1414 w_spin_cnt++; 1415 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1416 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1417 w_sleep_cnt++; 1418 } else { 1419 mtx_unlock_spin(&w_mtx); 1420 panic("lock class %s is not sleep or spin", 1421 lock_class->lc_name); 1422 } 1423 mtx_unlock_spin(&w_mtx); 1424out: 1425 /* 1426 * We issue a warning for any spin locks not defined in the static 1427 * order list as a way to discourage their use (folks should really 1428 * be using non-spin mutexes most of the time). However, several 1429 * 3rd part device drivers use spin locks because that is all they 1430 * have available on Windows and Linux and they think that normal 1431 * mutexes are insufficient. 1432 */ 1433 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn) 1434 printf("WITNESS: spin lock %s not in order list\n", 1435 description); 1436 return (w); 1437} 1438 1439/* Don't let the door bang you on the way out... */ 1440static int 1441depart(struct witness *w) 1442{ 1443 struct witness_child_list_entry *wcl, *nwcl; 1444 struct witness_list *list; 1445 struct witness *parent; 1446 1447 MPASS(w->w_refcount == 0); 1448 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1449 list = &w_sleep; 1450 w_sleep_cnt--; 1451 } else { 1452 list = &w_spin; 1453 w_spin_cnt--; 1454 } 1455 /* 1456 * First, we run through the entire tree looking for any 1457 * witnesses that the outgoing witness is a child of. For 1458 * each parent that we find, we reparent all the direct 1459 * children of the outgoing witness to its parent. 1460 */ 1461 STAILQ_FOREACH(parent, list, w_typelist) { 1462 if (!isitmychild(parent, w)) 1463 continue; 1464 removechild(parent, w); 1465 } 1466 1467 /* 1468 * Now we go through and free up the child list of the 1469 * outgoing witness. 1470 */ 1471 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) { 1472 nwcl = wcl->wcl_next; 1473 w_child_cnt--; 1474 witness_child_free(wcl); 1475 } 1476 1477 /* 1478 * Detach from various lists and free. 1479 */ 1480 STAILQ_REMOVE(list, w, witness, w_typelist); 1481 STAILQ_REMOVE(&w_all, w, witness, w_list); 1482 witness_free(w); 1483 1484 return (1); 1485} 1486 1487/* 1488 * Add "child" as a direct child of "parent". Returns false if 1489 * we fail due to out of memory. 1490 */ 1491static int 1492insertchild(struct witness *parent, struct witness *child) 1493{ 1494 struct witness_child_list_entry **wcl; 1495 1496 MPASS(child != NULL && parent != NULL); 1497 1498 /* 1499 * Insert "child" after "parent" 1500 */ 1501 wcl = &parent->w_children; 1502 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1503 wcl = &(*wcl)->wcl_next; 1504 if (*wcl == NULL) { 1505 *wcl = witness_child_get(); 1506 if (*wcl == NULL) 1507 return (0); 1508 w_child_cnt++; 1509 } 1510 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1511 1512 return (1); 1513} 1514 1515 1516static int 1517itismychild(struct witness *parent, struct witness *child) 1518{ 1519 struct witness_list *list; 1520 1521 MPASS(child != NULL && parent != NULL); 1522 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1523 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1524 panic( 1525 "%s: parent (%s) and child (%s) are not the same lock type", 1526 __func__, parent->w_class->lc_name, 1527 child->w_class->lc_name); 1528 1529 if (!insertchild(parent, child)) 1530 return (0); 1531 1532 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1533 list = &w_sleep; 1534 else 1535 list = &w_spin; 1536 return (1); 1537} 1538 1539static void 1540removechild(struct witness *parent, struct witness *child) 1541{ 1542 struct witness_child_list_entry **wcl, *wcl1; 1543 int i; 1544 1545 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1546 for (i = 0; i < (*wcl)->wcl_count; i++) 1547 if ((*wcl)->wcl_children[i] == child) 1548 goto found; 1549 return; 1550found: 1551 (*wcl)->wcl_count--; 1552 if ((*wcl)->wcl_count > i) 1553 (*wcl)->wcl_children[i] = 1554 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1555 MPASS((*wcl)->wcl_children[i] != NULL); 1556 if ((*wcl)->wcl_count != 0) 1557 return; 1558 wcl1 = *wcl; 1559 *wcl = wcl1->wcl_next; 1560 w_child_cnt--; 1561 witness_child_free(wcl1); 1562} 1563 1564static int 1565isitmychild(struct witness *parent, struct witness *child) 1566{ 1567 struct witness_child_list_entry *wcl; 1568 int i; 1569 1570 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1571 for (i = 0; i < wcl->wcl_count; i++) { 1572 if (wcl->wcl_children[i] == child) 1573 return (1); 1574 } 1575 } 1576 return (0); 1577} 1578 1579static int 1580isitmydescendant(struct witness *parent, struct witness *child) 1581{ 1582 struct witness_child_list_entry *wcl; 1583 int i, j; 1584 1585 if (isitmychild(parent, child)) 1586 return (1); 1587 j = 0; 1588 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1589 MPASS(j < 1000); 1590 for (i = 0; i < wcl->wcl_count; i++) { 1591 if (isitmydescendant(wcl->wcl_children[i], child)) 1592 return (1); 1593 } 1594 j++; 1595 } 1596 return (0); 1597} 1598 1599#ifdef BLESSING 1600static int 1601blessed(struct witness *w1, struct witness *w2) 1602{ 1603 int i; 1604 struct witness_blessed *b; 1605 1606 for (i = 0; i < blessed_count; i++) { 1607 b = &blessed_list[i]; 1608 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1609 if (strcmp(w2->w_name, b->b_lock2) == 0) 1610 return (1); 1611 continue; 1612 } 1613 if (strcmp(w1->w_name, b->b_lock2) == 0) 1614 if (strcmp(w2->w_name, b->b_lock1) == 0) 1615 return (1); 1616 } 1617 return (0); 1618} 1619#endif 1620 1621static struct witness * 1622witness_get(void) 1623{ 1624 struct witness *w; 1625 1626 if (witness_watch == 0) { 1627 mtx_unlock_spin(&w_mtx); 1628 return (NULL); 1629 } 1630 if (STAILQ_EMPTY(&w_free)) { 1631 witness_watch = 0; 1632 mtx_unlock_spin(&w_mtx); 1633 printf("%s: witness exhausted\n", __func__); 1634 return (NULL); 1635 } 1636 w = STAILQ_FIRST(&w_free); 1637 STAILQ_REMOVE_HEAD(&w_free, w_list); 1638 w_free_cnt--; 1639 bzero(w, sizeof(*w)); 1640 return (w); 1641} 1642 1643static void 1644witness_free(struct witness *w) 1645{ 1646 1647 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1648 w_free_cnt++; 1649} 1650 1651static struct witness_child_list_entry * 1652witness_child_get(void) 1653{ 1654 struct witness_child_list_entry *wcl; 1655 1656 if (witness_watch == 0) { 1657 mtx_unlock_spin(&w_mtx); 1658 return (NULL); 1659 } 1660 wcl = w_child_free; 1661 if (wcl == NULL) { 1662 witness_watch = 0; 1663 mtx_unlock_spin(&w_mtx); 1664 printf("%s: witness exhausted\n", __func__); 1665 return (NULL); 1666 } 1667 w_child_free = wcl->wcl_next; 1668 w_child_free_cnt--; 1669 bzero(wcl, sizeof(*wcl)); 1670 return (wcl); 1671} 1672 1673static void 1674witness_child_free(struct witness_child_list_entry *wcl) 1675{ 1676 1677 wcl->wcl_next = w_child_free; 1678 w_child_free = wcl; 1679 w_child_free_cnt++; 1680} 1681 1682static struct lock_list_entry * 1683witness_lock_list_get(void) 1684{ 1685 struct lock_list_entry *lle; 1686 1687 if (witness_watch == 0) 1688 return (NULL); 1689 mtx_lock_spin(&w_mtx); 1690 lle = w_lock_list_free; 1691 if (lle == NULL) { 1692 witness_watch = 0; 1693 mtx_unlock_spin(&w_mtx); 1694 printf("%s: witness exhausted\n", __func__); 1695 return (NULL); 1696 } 1697 w_lock_list_free = lle->ll_next; 1698 mtx_unlock_spin(&w_mtx); 1699 bzero(lle, sizeof(*lle)); 1700 return (lle); 1701} 1702 1703static void 1704witness_lock_list_free(struct lock_list_entry *lle) 1705{ 1706 1707 mtx_lock_spin(&w_mtx); 1708 lle->ll_next = w_lock_list_free; 1709 w_lock_list_free = lle; 1710 mtx_unlock_spin(&w_mtx); 1711} 1712 1713static struct lock_instance * 1714find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1715{ 1716 struct lock_list_entry *lle; 1717 struct lock_instance *instance; 1718 int i; 1719 1720 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1721 for (i = lle->ll_count - 1; i >= 0; i--) { 1722 instance = &lle->ll_children[i]; 1723 if (instance->li_lock == lock) 1724 return (instance); 1725 } 1726 return (NULL); 1727} 1728 1729static void 1730witness_list_lock(struct lock_instance *instance) 1731{ 1732 struct lock_object *lock; 1733 1734 lock = instance->li_lock; 1735 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1736 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 1737 if (lock->lo_type != lock->lo_name) 1738 printf(" (%s)", lock->lo_type); 1739 printf(" r = %d (%p) locked @ %s:%d\n", 1740 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1741 instance->li_line); 1742} 1743 1744#ifdef DDB 1745static int 1746witness_thread_has_locks(struct thread *td) 1747{ 1748 1749 return (td->td_sleeplocks != NULL); 1750} 1751 1752static int 1753witness_proc_has_locks(struct proc *p) 1754{ 1755 struct thread *td; 1756 1757 FOREACH_THREAD_IN_PROC(p, td) { 1758 if (witness_thread_has_locks(td)) 1759 return (1); 1760 } 1761 return (0); 1762} 1763#endif 1764 1765int 1766witness_list_locks(struct lock_list_entry **lock_list) 1767{ 1768 struct lock_list_entry *lle; 1769 int i, nheld; 1770 1771 nheld = 0; 1772 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1773 for (i = lle->ll_count - 1; i >= 0; i--) { 1774 witness_list_lock(&lle->ll_children[i]); 1775 nheld++; 1776 } 1777 return (nheld); 1778} 1779 1780/* 1781 * This is a bit risky at best. We call this function when we have timed 1782 * out acquiring a spin lock, and we assume that the other CPU is stuck 1783 * with this lock held. So, we go groveling around in the other CPU's 1784 * per-cpu data to try to find the lock instance for this spin lock to 1785 * see when it was last acquired. 1786 */ 1787void 1788witness_display_spinlock(struct lock_object *lock, struct thread *owner) 1789{ 1790 struct lock_instance *instance; 1791 struct pcpu *pc; 1792 1793 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 1794 return; 1795 pc = pcpu_find(owner->td_oncpu); 1796 instance = find_instance(pc->pc_spinlocks, lock); 1797 if (instance != NULL) 1798 witness_list_lock(instance); 1799} 1800 1801void 1802witness_save(struct lock_object *lock, const char **filep, int *linep) 1803{ 1804 struct lock_list_entry *lock_list; 1805 struct lock_instance *instance; 1806 struct lock_class *class; 1807 1808 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1809 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1810 return; 1811 class = LOCK_CLASS(lock); 1812 if (class->lc_flags & LC_SLEEPLOCK) 1813 lock_list = curthread->td_sleeplocks; 1814 else { 1815 if (witness_skipspin) 1816 return; 1817 lock_list = PCPU_GET(spinlocks); 1818 } 1819 instance = find_instance(lock_list, lock); 1820 if (instance == NULL) 1821 panic("%s: lock (%s) %s not locked", __func__, 1822 class->lc_name, lock->lo_name); 1823 *filep = instance->li_file; 1824 *linep = instance->li_line; 1825} 1826 1827void 1828witness_restore(struct lock_object *lock, const char *file, int line) 1829{ 1830 struct lock_list_entry *lock_list; 1831 struct lock_instance *instance; 1832 struct lock_class *class; 1833 1834 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1835 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1836 return; 1837 class = LOCK_CLASS(lock); 1838 if (class->lc_flags & LC_SLEEPLOCK) 1839 lock_list = curthread->td_sleeplocks; 1840 else { 1841 if (witness_skipspin) 1842 return; 1843 lock_list = PCPU_GET(spinlocks); 1844 } 1845 instance = find_instance(lock_list, lock); 1846 if (instance == NULL) 1847 panic("%s: lock (%s) %s not locked", __func__, 1848 class->lc_name, lock->lo_name); 1849 lock->lo_witness->w_file = file; 1850 lock->lo_witness->w_line = line; 1851 instance->li_file = file; 1852 instance->li_line = line; 1853} 1854 1855void 1856witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1857{ 1858#ifdef INVARIANT_SUPPORT 1859 struct lock_instance *instance; 1860 struct lock_class *class; 1861 1862 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1863 return; 1864 class = LOCK_CLASS(lock); 1865 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 1866 instance = find_instance(curthread->td_sleeplocks, lock); 1867 else if ((class->lc_flags & LC_SPINLOCK) != 0) 1868 instance = find_instance(PCPU_GET(spinlocks), lock); 1869 else { 1870 panic("Lock (%s) %s is not sleep or spin!", 1871 class->lc_name, lock->lo_name); 1872 } 1873 file = fixup_filename(file); 1874 switch (flags) { 1875 case LA_UNLOCKED: 1876 if (instance != NULL) 1877 panic("Lock (%s) %s locked @ %s:%d.", 1878 class->lc_name, lock->lo_name, file, line); 1879 break; 1880 case LA_LOCKED: 1881 case LA_LOCKED | LA_RECURSED: 1882 case LA_LOCKED | LA_NOTRECURSED: 1883 case LA_SLOCKED: 1884 case LA_SLOCKED | LA_RECURSED: 1885 case LA_SLOCKED | LA_NOTRECURSED: 1886 case LA_XLOCKED: 1887 case LA_XLOCKED | LA_RECURSED: 1888 case LA_XLOCKED | LA_NOTRECURSED: 1889 if (instance == NULL) { 1890 panic("Lock (%s) %s not locked @ %s:%d.", 1891 class->lc_name, lock->lo_name, file, line); 1892 break; 1893 } 1894 if ((flags & LA_XLOCKED) != 0 && 1895 (instance->li_flags & LI_EXCLUSIVE) == 0) 1896 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1897 class->lc_name, lock->lo_name, file, line); 1898 if ((flags & LA_SLOCKED) != 0 && 1899 (instance->li_flags & LI_EXCLUSIVE) != 0) 1900 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1901 class->lc_name, lock->lo_name, file, line); 1902 if ((flags & LA_RECURSED) != 0 && 1903 (instance->li_flags & LI_RECURSEMASK) == 0) 1904 panic("Lock (%s) %s not recursed @ %s:%d.", 1905 class->lc_name, lock->lo_name, file, line); 1906 if ((flags & LA_NOTRECURSED) != 0 && 1907 (instance->li_flags & LI_RECURSEMASK) != 0) 1908 panic("Lock (%s) %s recursed @ %s:%d.", 1909 class->lc_name, lock->lo_name, file, line); 1910 break; 1911 default: 1912 panic("Invalid lock assertion at %s:%d.", file, line); 1913 1914 } 1915#endif /* INVARIANT_SUPPORT */ 1916} 1917 1918#ifdef DDB 1919static void 1920witness_list(struct thread *td) 1921{ 1922 1923 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1924 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 1925 1926 if (witness_watch == 0) 1927 return; 1928 1929 witness_list_locks(&td->td_sleeplocks); 1930 1931 /* 1932 * We only handle spinlocks if td == curthread. This is somewhat broken 1933 * if td is currently executing on some other CPU and holds spin locks 1934 * as we won't display those locks. If we had a MI way of getting 1935 * the per-cpu data for a given cpu then we could use 1936 * td->td_oncpu to get the list of spinlocks for this thread 1937 * and "fix" this. 1938 * 1939 * That still wouldn't really fix this unless we locked sched_lock 1940 * or stopped the other CPU to make sure it wasn't changing the list 1941 * out from under us. It is probably best to just not try to handle 1942 * threads on other CPU's for now. 1943 */ 1944 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1945 witness_list_locks(PCPU_PTR(spinlocks)); 1946} 1947 1948DB_SHOW_COMMAND(locks, db_witness_list) 1949{ 1950 struct thread *td; 1951 pid_t pid; 1952 struct proc *p; 1953 1954 if (have_addr) { 1955 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1956 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1957 ((addr >> 16) % 16) * 10000; 1958 /* sx_slock(&allproc_lock); */ 1959 FOREACH_PROC_IN_SYSTEM(p) { 1960 if (p->p_pid == pid) 1961 break; 1962 } 1963 /* sx_sunlock(&allproc_lock); */ 1964 if (p == NULL) { 1965 db_printf("pid %d not found\n", pid); 1966 return; 1967 } 1968 FOREACH_THREAD_IN_PROC(p, td) { 1969 witness_list(td); 1970 } 1971 } else { 1972 td = curthread; 1973 witness_list(td); 1974 } 1975} 1976 1977DB_SHOW_COMMAND(alllocks, db_witness_list_all) 1978{ 1979 struct thread *td; 1980 struct proc *p; 1981 1982 /* 1983 * It would be nice to list only threads and processes that actually 1984 * held sleep locks, but that information is currently not exported 1985 * by WITNESS. 1986 */ 1987 FOREACH_PROC_IN_SYSTEM(p) { 1988 if (!witness_proc_has_locks(p)) 1989 continue; 1990 FOREACH_THREAD_IN_PROC(p, td) { 1991 if (!witness_thread_has_locks(td)) 1992 continue; 1993 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 1994 p->p_comm, td, td->td_tid); 1995 witness_list(td); 1996 } 1997 } 1998} 1999 2000DB_SHOW_COMMAND(witness, db_witness_display) 2001{ 2002 2003 witness_display(db_printf); 2004} 2005#endif 2006