subr_witness.c revision 111028
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/subr_witness.c 111028 2003-02-17 05:14:26Z jeff $ 31 */ 32 33/* 34 * Implementation of the `witness' lock verifier. Originally implemented for 35 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 36 * classes in FreeBSD. 37 */ 38 39/* 40 * Main Entry: witness 41 * Pronunciation: 'wit-n&s 42 * Function: noun 43 * Etymology: Middle English witnesse, from Old English witnes knowledge, 44 * testimony, witness, from 2wit 45 * Date: before 12th century 46 * 1 : attestation of a fact or event : TESTIMONY 47 * 2 : one that gives evidence; specifically : one who testifies in 48 * a cause or before a judicial tribunal 49 * 3 : one asked to be present at a transaction so as to be able to 50 * testify to its having taken place 51 * 4 : one who has personal knowledge of something 52 * 5 a : something serving as evidence or proof : SIGN 53 * b : public affirmation by word or example of usually 54 * religious faith or conviction <the heroic witness to divine 55 * life -- Pilot> 56 * 6 capitalized : a member of the Jehovah's Witnesses 57 */ 58 59#include "opt_ddb.h" 60#include "opt_witness.h" 61 62#include <sys/param.h> 63#include <sys/bus.h> 64#include <sys/kernel.h> 65#include <sys/ktr.h> 66#include <sys/lock.h> 67#include <sys/malloc.h> 68#include <sys/mutex.h> 69#include <sys/proc.h> 70#include <sys/sysctl.h> 71#include <sys/systm.h> 72 73#include <ddb/ddb.h> 74 75/* Define this to check for blessed mutexes */ 76#undef BLESSING 77 78#define WITNESS_COUNT 200 79#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 80/* 81 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads 82 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should 83 * probably be safe for the most part, but it's still a SWAG. 84 */ 85#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2 86 87#define WITNESS_NCHILDREN 6 88 89struct witness_child_list_entry; 90 91struct witness { 92 const char *w_name; 93 struct lock_class *w_class; 94 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 95 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 96 struct witness_child_list_entry *w_children; /* Great evilness... */ 97 const char *w_file; 98 int w_line; 99 u_int w_level; 100 u_int w_refcount; 101 u_char w_Giant_squawked:1; 102 u_char w_other_squawked:1; 103 u_char w_same_squawked:1; 104}; 105 106struct witness_child_list_entry { 107 struct witness_child_list_entry *wcl_next; 108 struct witness *wcl_children[WITNESS_NCHILDREN]; 109 u_int wcl_count; 110}; 111 112STAILQ_HEAD(witness_list, witness); 113 114#ifdef BLESSING 115struct witness_blessed { 116 const char *b_lock1; 117 const char *b_lock2; 118}; 119#endif 120 121struct witness_order_list_entry { 122 const char *w_name; 123 struct lock_class *w_class; 124}; 125 126static struct witness *enroll(const char *description, 127 struct lock_class *lock_class); 128static int itismychild(struct witness *parent, struct witness *child); 129static void removechild(struct witness *parent, struct witness *child); 130static int isitmychild(struct witness *parent, struct witness *child); 131static int isitmydescendant(struct witness *parent, struct witness *child); 132#ifdef BLESSING 133static int blessed(struct witness *, struct witness *); 134#endif 135static void witness_displaydescendants(void(*)(const char *fmt, ...), 136 struct witness *); 137static void witness_leveldescendents(struct witness *parent, int level); 138static void witness_levelall(void); 139static struct witness *witness_get(void); 140static void witness_free(struct witness *m); 141static struct witness_child_list_entry *witness_child_get(void); 142static void witness_child_free(struct witness_child_list_entry *wcl); 143static struct lock_list_entry *witness_lock_list_get(void); 144static void witness_lock_list_free(struct lock_list_entry *lle); 145static struct lock_instance *find_instance(struct lock_list_entry *lock_list, 146 struct lock_object *lock); 147#if defined(DDB) 148static void witness_display_list(void(*prnt)(const char *fmt, ...), 149 struct witness_list *list); 150static void witness_display(void(*)(const char *fmt, ...)); 151#endif 152 153MALLOC_DEFINE(M_WITNESS, "witness", "witness structure"); 154 155static int witness_watch = 1; 156TUNABLE_INT("debug.witness_watch", &witness_watch); 157SYSCTL_INT(_debug, OID_AUTO, witness_watch, CTLFLAG_RD, &witness_watch, 0, ""); 158 159#ifdef DDB 160/* 161 * When DDB is enabled and witness_ddb is set to 1, it will cause the system to 162 * drop into kdebug() when: 163 * - a lock heirarchy violation occurs 164 * - locks are held when going to sleep. 165 */ 166#ifdef WITNESS_DDB 167int witness_ddb = 1; 168#else 169int witness_ddb = 0; 170#endif 171TUNABLE_INT("debug.witness_ddb", &witness_ddb); 172SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, ""); 173 174/* 175 * When DDB is enabled and witness_trace is set to 1, it will cause the system 176 * to print a stack trace: 177 * - a lock heirarchy violation occurs 178 * - locks are held when going to sleep. 179 */ 180int witness_trace = 1; 181TUNABLE_INT("debug.witness_trace", &witness_trace); 182SYSCTL_INT(_debug, OID_AUTO, witness_trace, CTLFLAG_RW, &witness_trace, 0, ""); 183#endif /* DDB */ 184 185#ifdef WITNESS_SKIPSPIN 186int witness_skipspin = 1; 187#else 188int witness_skipspin = 0; 189#endif 190TUNABLE_INT("debug.witness_skipspin", &witness_skipspin); 191SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0, 192 ""); 193 194static struct mtx w_mtx; 195static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 196static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 197static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 198static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 199static struct witness_child_list_entry *w_child_free = NULL; 200static struct lock_list_entry *w_lock_list_free = NULL; 201static int witness_dead; /* fatal error, probably no memory */ 202 203static struct witness w_data[WITNESS_COUNT]; 204static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT]; 205static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 206 207static struct witness_order_list_entry order_lists[] = { 208 { "Giant", &lock_class_mtx_sleep }, 209 { "proctree", &lock_class_sx }, 210 { "allproc", &lock_class_sx }, 211 { "filedesc structure", &lock_class_mtx_sleep }, 212 { "pipe mutex", &lock_class_mtx_sleep }, 213 { "sigio lock", &lock_class_mtx_sleep }, 214 { "process group", &lock_class_mtx_sleep }, 215 { "process lock", &lock_class_mtx_sleep }, 216 { "session", &lock_class_mtx_sleep }, 217 { "uidinfo hash", &lock_class_mtx_sleep }, 218 { "uidinfo struct", &lock_class_mtx_sleep }, 219 { NULL, NULL }, 220 /* 221 * spin locks 222 */ 223#ifdef SMP 224 { "ap boot", &lock_class_mtx_spin }, 225#ifdef __i386__ 226 { "com", &lock_class_mtx_spin }, 227#endif 228#endif 229 { "sio", &lock_class_mtx_spin }, 230#ifdef __i386__ 231 { "cy", &lock_class_mtx_spin }, 232#endif 233 { "sabtty", &lock_class_mtx_spin }, 234 { "zstty", &lock_class_mtx_spin }, 235 { "ng_node", &lock_class_mtx_spin }, 236 { "ng_worklist", &lock_class_mtx_spin }, 237 { "ithread table lock", &lock_class_mtx_spin }, 238 { "sched lock", &lock_class_mtx_spin }, 239 { "callout", &lock_class_mtx_spin }, 240 /* 241 * leaf locks 242 */ 243 { "allpmaps", &lock_class_mtx_spin }, 244 { "vm page queue free mutex", &lock_class_mtx_spin }, 245 { "icu", &lock_class_mtx_spin }, 246#ifdef SMP 247 { "smp rendezvous", &lock_class_mtx_spin }, 248#if defined(__i386__) && defined(APIC_IO) 249 { "tlb", &lock_class_mtx_spin }, 250#endif 251#ifdef __sparc64__ 252 { "ipi", &lock_class_mtx_spin }, 253#endif 254#endif 255 { "clk", &lock_class_mtx_spin }, 256 { "mutex profiling lock", &lock_class_mtx_spin }, 257 { "kse zombie lock", &lock_class_mtx_spin }, 258 { "ALD Queue", &lock_class_mtx_spin }, 259#ifdef __ia64__ 260 { "MCA spin lock", &lock_class_mtx_spin }, 261#endif 262 { NULL, NULL }, 263 { NULL, NULL } 264}; 265 266#ifdef BLESSING 267/* 268 * Pairs of locks which have been blessed 269 * Don't complain about order problems with blessed locks 270 */ 271static struct witness_blessed blessed_list[] = { 272}; 273static int blessed_count = 274 sizeof(blessed_list) / sizeof(struct witness_blessed); 275#endif 276 277/* 278 * List of all locks in the system. 279 */ 280TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks); 281 282static struct mtx all_mtx = { 283 { &lock_class_mtx_sleep, /* mtx_object.lo_class */ 284 "All locks list", /* mtx_object.lo_name */ 285 "All locks list", /* mtx_object.lo_type */ 286 LO_INITIALIZED, /* mtx_object.lo_flags */ 287 { NULL, NULL }, /* mtx_object.lo_list */ 288 NULL }, /* mtx_object.lo_witness */ 289 MTX_UNOWNED, 0, /* mtx_lock, mtx_recurse */ 290 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked), 291 { NULL, NULL } /* mtx_contested */ 292}; 293 294/* 295 * This global is set to 0 once it becomes safe to use the witness code. 296 */ 297static int witness_cold = 1; 298 299/* 300 * Global variables for book keeping. 301 */ 302static int lock_cur_cnt; 303static int lock_max_cnt; 304 305/* 306 * The WITNESS-enabled diagnostic code. 307 */ 308static void 309witness_initialize(void *dummy __unused) 310{ 311 struct lock_object *lock; 312 struct witness_order_list_entry *order; 313 struct witness *w, *w1; 314 int i; 315 316 /* 317 * We have to release Giant before initializing its witness 318 * structure so that WITNESS doesn't get confused. 319 */ 320 mtx_unlock(&Giant); 321 mtx_assert(&Giant, MA_NOTOWNED); 322 323 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 324 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list); 325 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 326 MTX_NOWITNESS); 327 for (i = 0; i < WITNESS_COUNT; i++) 328 witness_free(&w_data[i]); 329 for (i = 0; i < WITNESS_CHILDCOUNT; i++) 330 witness_child_free(&w_childdata[i]); 331 for (i = 0; i < LOCK_CHILDCOUNT; i++) 332 witness_lock_list_free(&w_locklistdata[i]); 333 334 /* First add in all the specified order lists. */ 335 for (order = order_lists; order->w_name != NULL; order++) { 336 w = enroll(order->w_name, order->w_class); 337 if (w == NULL) 338 continue; 339 w->w_file = "order list"; 340 for (order++; order->w_name != NULL; order++) { 341 w1 = enroll(order->w_name, order->w_class); 342 if (w1 == NULL) 343 continue; 344 w1->w_file = "order list"; 345 itismychild(w, w1); 346 w = w1; 347 } 348 } 349 350 /* Iterate through all locks and add them to witness. */ 351 mtx_lock(&all_mtx); 352 TAILQ_FOREACH(lock, &all_locks, lo_list) { 353 if (lock->lo_flags & LO_WITNESS) 354 lock->lo_witness = enroll(lock->lo_type, 355 lock->lo_class); 356 else 357 lock->lo_witness = NULL; 358 } 359 mtx_unlock(&all_mtx); 360 361 /* Mark the witness code as being ready for use. */ 362 atomic_store_rel_int(&witness_cold, 0); 363 364 mtx_lock(&Giant); 365} 366SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL) 367 368void 369witness_init(struct lock_object *lock) 370{ 371 struct lock_class *class; 372 373 class = lock->lo_class; 374 if (lock->lo_flags & LO_INITIALIZED) 375 panic("%s: lock (%s) %s is already initialized", __func__, 376 class->lc_name, lock->lo_name); 377 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 378 (class->lc_flags & LC_RECURSABLE) == 0) 379 panic("%s: lock (%s) %s can not be recursable", __func__, 380 class->lc_name, lock->lo_name); 381 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 382 (class->lc_flags & LC_SLEEPABLE) == 0) 383 panic("%s: lock (%s) %s can not be sleepable", __func__, 384 class->lc_name, lock->lo_name); 385 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 386 (class->lc_flags & LC_UPGRADABLE) == 0) 387 panic("%s: lock (%s) %s can not be upgradable", __func__, 388 class->lc_name, lock->lo_name); 389 390 mtx_lock(&all_mtx); 391 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list); 392 lock->lo_flags |= LO_INITIALIZED; 393 lock_cur_cnt++; 394 if (lock_cur_cnt > lock_max_cnt) 395 lock_max_cnt = lock_cur_cnt; 396 mtx_unlock(&all_mtx); 397 if (!witness_cold && !witness_dead && panicstr == NULL && 398 (lock->lo_flags & LO_WITNESS) != 0) 399 lock->lo_witness = enroll(lock->lo_type, class); 400 else 401 lock->lo_witness = NULL; 402} 403 404void 405witness_destroy(struct lock_object *lock) 406{ 407 struct witness *w; 408 409 if (witness_cold) 410 panic("lock (%s) %s destroyed while witness_cold", 411 lock->lo_class->lc_name, lock->lo_name); 412 if ((lock->lo_flags & LO_INITIALIZED) == 0) 413 panic("%s: lock (%s) %s is not initialized", __func__, 414 lock->lo_class->lc_name, lock->lo_name); 415 416 /* XXX: need to verify that no one holds the lock */ 417 w = lock->lo_witness; 418 if (w != NULL) { 419 mtx_lock_spin(&w_mtx); 420 MPASS(w->w_refcount > 0); 421 w->w_refcount--; 422 mtx_unlock_spin(&w_mtx); 423 } 424 425 mtx_lock(&all_mtx); 426 lock_cur_cnt--; 427 TAILQ_REMOVE(&all_locks, lock, lo_list); 428 lock->lo_flags &= ~LO_INITIALIZED; 429 mtx_unlock(&all_mtx); 430} 431 432#if defined(DDB) 433static void 434witness_display_list(void(*prnt)(const char *fmt, ...), 435 struct witness_list *list) 436{ 437 struct witness *w, *w1; 438 int found; 439 440 STAILQ_FOREACH(w, list, w_typelist) { 441 if (w->w_file == NULL) 442 continue; 443 found = 0; 444 STAILQ_FOREACH(w1, list, w_typelist) { 445 if (isitmychild(w1, w)) { 446 found++; 447 break; 448 } 449 } 450 if (found) 451 continue; 452 /* 453 * This lock has no anscestors, display its descendants. 454 */ 455 witness_displaydescendants(prnt, w); 456 } 457} 458 459static void 460witness_display(void(*prnt)(const char *fmt, ...)) 461{ 462 struct witness *w; 463 464 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 465 witness_levelall(); 466 467 /* 468 * First, handle sleep locks which have been acquired at least 469 * once. 470 */ 471 prnt("Sleep locks:\n"); 472 witness_display_list(prnt, &w_sleep); 473 474 /* 475 * Now do spin locks which have been acquired at least once. 476 */ 477 prnt("\nSpin locks:\n"); 478 witness_display_list(prnt, &w_spin); 479 480 /* 481 * Finally, any locks which have not been acquired yet. 482 */ 483 prnt("\nLocks which were never acquired:\n"); 484 STAILQ_FOREACH(w, &w_all, w_list) { 485 if (w->w_file != NULL || w->w_refcount == 0) 486 continue; 487 prnt("%s\n", w->w_name); 488 } 489} 490#endif 491 492void 493witness_lock(struct lock_object *lock, int flags, const char *file, int line) 494{ 495 struct lock_list_entry **lock_list, *lle; 496 struct lock_instance *lock1, *lock2; 497 struct lock_class *class; 498 struct witness *w, *w1; 499 struct thread *td; 500 int i, j; 501#ifdef DDB 502 int go_into_ddb = 0; 503#endif /* DDB */ 504 505 if (witness_cold || witness_dead || lock->lo_witness == NULL || 506 panicstr != NULL) 507 return; 508 w = lock->lo_witness; 509 class = lock->lo_class; 510 td = curthread; 511 512 if (class->lc_flags & LC_SLEEPLOCK) { 513 /* 514 * Since spin locks include a critical section, this check 515 * impliclty enforces a lock order of all sleep locks before 516 * all spin locks. 517 */ 518 if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0) 519 panic("blockable sleep lock (%s) %s @ %s:%d", 520 class->lc_name, lock->lo_name, file, line); 521 lock_list = &td->td_sleeplocks; 522 } else 523 lock_list = PCPU_PTR(spinlocks); 524 525 /* 526 * Try locks do not block if they fail to acquire the lock, thus 527 * there is no danger of deadlocks or of switching while holding a 528 * spin lock if we acquire a lock via a try operation. 529 */ 530 if (flags & LOP_TRYLOCK) 531 goto out; 532 533 /* 534 * Is this the first lock acquired? If so, then no order checking 535 * is needed. 536 */ 537 if (*lock_list == NULL) 538 goto out; 539 540 /* 541 * Check to see if we are recursing on a lock we already own. 542 */ 543 lock1 = find_instance(*lock_list, lock); 544 if (lock1 != NULL) { 545 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 546 (flags & LOP_EXCLUSIVE) == 0) { 547 printf("shared lock of (%s) %s @ %s:%d\n", 548 class->lc_name, lock->lo_name, file, line); 549 printf("while exclusively locked from %s:%d\n", 550 lock1->li_file, lock1->li_line); 551 panic("share->excl"); 552 } 553 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 554 (flags & LOP_EXCLUSIVE) != 0) { 555 printf("exclusive lock of (%s) %s @ %s:%d\n", 556 class->lc_name, lock->lo_name, file, line); 557 printf("while share locked from %s:%d\n", 558 lock1->li_file, lock1->li_line); 559 panic("excl->share"); 560 } 561 lock1->li_flags++; 562 if ((lock->lo_flags & LO_RECURSABLE) == 0) { 563 printf( 564 "recursed on non-recursive lock (%s) %s @ %s:%d\n", 565 class->lc_name, lock->lo_name, file, line); 566 printf("first acquired @ %s:%d\n", lock1->li_file, 567 lock1->li_line); 568 panic("recurse"); 569 } 570 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 571 td->td_proc->p_pid, lock->lo_name, 572 lock1->li_flags & LI_RECURSEMASK); 573 lock1->li_file = file; 574 lock1->li_line = line; 575 return; 576 } 577 578 /* 579 * Check for duplicate locks of the same type. Note that we only 580 * have to check for this on the last lock we just acquired. Any 581 * other cases will be caught as lock order violations. 582 */ 583 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 584 w1 = lock1->li_lock->lo_witness; 585 if (w1 == w) { 586 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK)) 587 goto out; 588 w->w_same_squawked = 1; 589 printf("acquiring duplicate lock of same type: \"%s\"\n", 590 lock->lo_type); 591 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 592 lock1->li_file, lock1->li_line); 593 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 594#ifdef DDB 595 go_into_ddb = 1; 596#endif /* DDB */ 597 goto out; 598 } 599 MPASS(!mtx_owned(&w_mtx)); 600 mtx_lock_spin(&w_mtx); 601 /* 602 * If we have a known higher number just say ok 603 */ 604 if (witness_watch > 1 && w->w_level > w1->w_level) { 605 mtx_unlock_spin(&w_mtx); 606 goto out; 607 } 608 if (isitmydescendant(w1, w)) { 609 mtx_unlock_spin(&w_mtx); 610 goto out; 611 } 612 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 613 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 614 615 MPASS(j < WITNESS_COUNT); 616 lock1 = &lle->ll_children[i]; 617 w1 = lock1->li_lock->lo_witness; 618 619 /* 620 * If this lock doesn't undergo witness checking, 621 * then skip it. 622 */ 623 if (w1 == NULL) { 624 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 625 ("lock missing witness structure")); 626 continue; 627 } 628 /* 629 * If we are locking Giant and we slept with this 630 * lock, then skip it. 631 */ 632 if ((lock1->li_flags & LI_SLEPT) != 0 && 633 lock == &Giant.mtx_object) 634 continue; 635 /* 636 * If we are locking a sleepable lock and this lock 637 * isn't sleepable and isn't Giant, we want to treat 638 * it as a lock order violation to enfore a general 639 * lock order of sleepable locks before non-sleepable 640 * locks. Thus, we only bother checking the lock 641 * order hierarchy if we pass the initial test. 642 */ 643 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 && 644 ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 645 lock1->li_lock != &Giant.mtx_object)) && 646 !isitmydescendant(w, w1)) 647 continue; 648 /* 649 * We have a lock order violation, check to see if it 650 * is allowed or has already been yelled about. 651 */ 652 mtx_unlock_spin(&w_mtx); 653#ifdef BLESSING 654 if (blessed(w, w1)) 655 goto out; 656#endif 657 if (lock1->li_lock == &Giant.mtx_object) { 658 if (w1->w_Giant_squawked) 659 goto out; 660 else 661 w1->w_Giant_squawked = 1; 662 } else { 663 if (w1->w_other_squawked) 664 goto out; 665 else 666 w1->w_other_squawked = 1; 667 } 668 /* 669 * Ok, yell about it. 670 */ 671 printf("lock order reversal\n"); 672 /* 673 * Try to locate an earlier lock with 674 * witness w in our list. 675 */ 676 do { 677 lock2 = &lle->ll_children[i]; 678 MPASS(lock2->li_lock != NULL); 679 if (lock2->li_lock->lo_witness == w) 680 break; 681 i--; 682 if (i == 0 && lle->ll_next != NULL) { 683 lle = lle->ll_next; 684 i = lle->ll_count - 1; 685 MPASS(i >= 0 && i < LOCK_NCHILDREN); 686 } 687 } while (i >= 0); 688 if (i < 0) { 689 printf(" 1st %p %s (%s) @ %s:%d\n", 690 lock1->li_lock, lock1->li_lock->lo_name, 691 lock1->li_lock->lo_type, lock1->li_file, 692 lock1->li_line); 693 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 694 lock->lo_name, lock->lo_type, file, line); 695 } else { 696 printf(" 1st %p %s (%s) @ %s:%d\n", 697 lock2->li_lock, lock2->li_lock->lo_name, 698 lock2->li_lock->lo_type, lock2->li_file, 699 lock2->li_line); 700 printf(" 2nd %p %s (%s) @ %s:%d\n", 701 lock1->li_lock, lock1->li_lock->lo_name, 702 lock1->li_lock->lo_type, lock1->li_file, 703 lock1->li_line); 704 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 705 lock->lo_name, lock->lo_type, file, line); 706 } 707#ifdef DDB 708 go_into_ddb = 1; 709#endif /* DDB */ 710 goto out; 711 } 712 } 713 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 714 /* 715 * Don't build a new relationship if we are locking Giant just 716 * after waking up and the previous lock in the list was acquired 717 * prior to blocking. 718 */ 719 if (lock == &Giant.mtx_object && (lock1->li_flags & LI_SLEPT) != 0) 720 mtx_unlock_spin(&w_mtx); 721 else { 722 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 723 lock->lo_type, lock1->li_lock->lo_type); 724 if (!itismychild(lock1->li_lock->lo_witness, w)) 725 mtx_unlock_spin(&w_mtx); 726 } 727 728out: 729#ifdef DDB 730 if (go_into_ddb) { 731 if (witness_trace) 732 backtrace(); 733 if (witness_ddb) 734 Debugger(__func__); 735 } 736#endif /* DDB */ 737 w->w_file = file; 738 w->w_line = line; 739 740 lle = *lock_list; 741 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 742 lle = witness_lock_list_get(); 743 if (lle == NULL) 744 return; 745 lle->ll_next = *lock_list; 746 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 747 td->td_proc->p_pid, lle); 748 *lock_list = lle; 749 } 750 lock1 = &lle->ll_children[lle->ll_count++]; 751 lock1->li_lock = lock; 752 lock1->li_line = line; 753 lock1->li_file = file; 754 if ((flags & LOP_EXCLUSIVE) != 0) 755 lock1->li_flags = LI_EXCLUSIVE; 756 else 757 lock1->li_flags = 0; 758 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 759 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 760} 761 762void 763witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 764{ 765 struct lock_instance *instance; 766 struct lock_class *class; 767 768 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 769 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 770 return; 771 class = lock->lo_class; 772 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 773 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 774 class->lc_name, lock->lo_name, file, line); 775 if ((flags & LOP_TRYLOCK) == 0) 776 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name, 777 lock->lo_name, file, line); 778 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 779 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 780 class->lc_name, lock->lo_name, file, line); 781 instance = find_instance(curthread->td_sleeplocks, lock); 782 if (instance == NULL) 783 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 784 class->lc_name, lock->lo_name, file, line); 785 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 786 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 787 class->lc_name, lock->lo_name, file, line); 788 if ((instance->li_flags & LI_RECURSEMASK) != 0) 789 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 790 class->lc_name, lock->lo_name, 791 instance->li_flags & LI_RECURSEMASK, file, line); 792 instance->li_flags |= LI_EXCLUSIVE; 793} 794 795void 796witness_downgrade(struct lock_object *lock, int flags, const char *file, 797 int line) 798{ 799 struct lock_instance *instance; 800 struct lock_class *class; 801 802 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 803 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 804 return; 805 class = lock->lo_class; 806 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 807 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 808 class->lc_name, lock->lo_name, file, line); 809 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 810 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 811 class->lc_name, lock->lo_name, file, line); 812 instance = find_instance(curthread->td_sleeplocks, lock); 813 if (instance == NULL) 814 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 815 class->lc_name, lock->lo_name, file, line); 816 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 817 panic("downgrade of shared lock (%s) %s @ %s:%d", 818 class->lc_name, lock->lo_name, file, line); 819 if ((instance->li_flags & LI_RECURSEMASK) != 0) 820 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 821 class->lc_name, lock->lo_name, 822 instance->li_flags & LI_RECURSEMASK, file, line); 823 instance->li_flags &= ~LI_EXCLUSIVE; 824} 825 826void 827witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 828{ 829 struct lock_list_entry **lock_list, *lle; 830 struct lock_instance *instance; 831 struct lock_class *class; 832 struct thread *td; 833 register_t s; 834 int i, j; 835 836 if (witness_cold || witness_dead || lock->lo_witness == NULL || 837 panicstr != NULL) 838 return; 839 td = curthread; 840 class = lock->lo_class; 841 if (class->lc_flags & LC_SLEEPLOCK) 842 lock_list = &td->td_sleeplocks; 843 else 844 lock_list = PCPU_PTR(spinlocks); 845 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 846 for (i = 0; i < (*lock_list)->ll_count; i++) { 847 instance = &(*lock_list)->ll_children[i]; 848 if (instance->li_lock == lock) { 849 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 850 (flags & LOP_EXCLUSIVE) == 0) { 851 printf( 852 "shared unlock of (%s) %s @ %s:%d\n", 853 class->lc_name, lock->lo_name, 854 file, line); 855 printf( 856 "while exclusively locked from %s:%d\n", 857 instance->li_file, 858 instance->li_line); 859 panic("excl->ushare"); 860 } 861 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 862 (flags & LOP_EXCLUSIVE) != 0) { 863 printf( 864 "exclusive unlock of (%s) %s @ %s:%d\n", 865 class->lc_name, lock->lo_name, 866 file, line); 867 printf( 868 "while share locked from %s:%d\n", 869 instance->li_file, 870 instance->li_line); 871 panic("share->uexcl"); 872 } 873 /* If we are recursed, unrecurse. */ 874 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 875 CTR4(KTR_WITNESS, 876 "%s: pid %d unrecursed on %s r=%d", __func__, 877 td->td_proc->p_pid, 878 instance->li_lock->lo_name, 879 instance->li_flags); 880 instance->li_flags--; 881 return; 882 } 883 s = intr_disable(); 884 CTR4(KTR_WITNESS, 885 "%s: pid %d removed %s from lle[%d]", __func__, 886 td->td_proc->p_pid, 887 instance->li_lock->lo_name, 888 (*lock_list)->ll_count - 1); 889 for (j = i; j < (*lock_list)->ll_count - 1; j++) 890 (*lock_list)->ll_children[j] = 891 (*lock_list)->ll_children[j + 1]; 892 (*lock_list)->ll_count--; 893 intr_restore(s); 894 if ((*lock_list)->ll_count == 0) { 895 lle = *lock_list; 896 *lock_list = lle->ll_next; 897 CTR3(KTR_WITNESS, 898 "%s: pid %d removed lle %p", __func__, 899 td->td_proc->p_pid, lle); 900 witness_lock_list_free(lle); 901 } 902 return; 903 } 904 } 905 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 906 file, line); 907} 908 909/* 910 * Warn if any held locks are not sleepable. Note that Giant and the lock 911 * passed in are both special cases since they are both released during the 912 * sleep process and aren't actually held while the thread is asleep. 913 */ 914int 915witness_sleep(int check_only, struct lock_object *lock, const char *file, 916 int line) 917{ 918 struct lock_list_entry **lock_list, *lle; 919 struct lock_instance *lock1; 920 struct thread *td; 921 int i, n; 922 923 if (witness_cold || witness_dead || panicstr != NULL) 924 return (0); 925 n = 0; 926 td = curthread; 927 lock_list = &td->td_sleeplocks; 928again: 929 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 930 for (i = lle->ll_count - 1; i >= 0; i--) { 931 lock1 = &lle->ll_children[i]; 932 if (lock1->li_lock == lock || 933 lock1->li_lock == &Giant.mtx_object) 934 continue; 935 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) { 936 if (check_only == 0) { 937 CTR3(KTR_WITNESS, 938 "pid %d: sleeping with lock (%s) %s held", 939 td->td_proc->p_pid, 940 lock1->li_lock->lo_class->lc_name, 941 lock1->li_lock->lo_name); 942 lock1->li_flags |= LI_SLEPT; 943 } 944 continue; 945 } 946 n++; 947 printf("%s:%d: %s with \"%s\" locked from %s:%d\n", 948 file, line, check_only ? "could sleep" : "sleeping", 949 lock1->li_lock->lo_name, lock1->li_file, 950 lock1->li_line); 951 } 952 if (lock_list == &td->td_sleeplocks && PCPU_GET(spinlocks) != NULL) { 953 /* 954 * Since we already hold a spinlock preemption is 955 * already blocked. 956 */ 957 lock_list = PCPU_PTR(spinlocks); 958 goto again; 959 } 960#ifdef DDB 961 if (witness_ddb && n) 962 Debugger(__func__); 963#endif /* DDB */ 964 return (n); 965} 966 967const char * 968witness_file(struct lock_object *lock) 969{ 970 struct witness *w; 971 972 if (witness_cold || witness_dead || lock->lo_witness == NULL) 973 return ("?"); 974 w = lock->lo_witness; 975 return (w->w_file); 976} 977 978int 979witness_line(struct lock_object *lock) 980{ 981 struct witness *w; 982 983 if (witness_cold || witness_dead || lock->lo_witness == NULL) 984 return (0); 985 w = lock->lo_witness; 986 return (w->w_line); 987} 988 989static struct witness * 990enroll(const char *description, struct lock_class *lock_class) 991{ 992 struct witness *w; 993 994 if (!witness_watch || witness_dead || panicstr != NULL) 995 return (NULL); 996 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin) 997 return (NULL); 998 mtx_lock_spin(&w_mtx); 999 STAILQ_FOREACH(w, &w_all, w_list) { 1000 if (w->w_name == description || (w->w_refcount > 0 && 1001 strcmp(description, w->w_name) == 0)) { 1002 w->w_refcount++; 1003 mtx_unlock_spin(&w_mtx); 1004 if (lock_class != w->w_class) 1005 panic( 1006 "lock (%s) %s does not match earlier (%s) lock", 1007 description, lock_class->lc_name, 1008 w->w_class->lc_name); 1009 return (w); 1010 } 1011 } 1012 /* 1013 * This isn't quite right, as witness_cold is still 0 while we 1014 * enroll all the locks initialized before witness_initialize(). 1015 */ 1016 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) { 1017 mtx_unlock_spin(&w_mtx); 1018 panic("spin lock %s not in order list", description); 1019 } 1020 if ((w = witness_get()) == NULL) 1021 return (NULL); 1022 w->w_name = description; 1023 w->w_class = lock_class; 1024 w->w_refcount = 1; 1025 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1026 if (lock_class->lc_flags & LC_SPINLOCK) 1027 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1028 else if (lock_class->lc_flags & LC_SLEEPLOCK) 1029 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1030 else { 1031 mtx_unlock_spin(&w_mtx); 1032 panic("lock class %s is not sleep or spin", 1033 lock_class->lc_name); 1034 } 1035 mtx_unlock_spin(&w_mtx); 1036 return (w); 1037} 1038 1039static int 1040itismychild(struct witness *parent, struct witness *child) 1041{ 1042 static int recursed; 1043 struct witness_child_list_entry **wcl; 1044 struct witness_list *list; 1045 1046 MPASS(child != NULL && parent != NULL); 1047 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) != 1048 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))) 1049 panic( 1050 "%s: parent (%s) and child (%s) are not the same lock type", 1051 __func__, parent->w_class->lc_name, 1052 child->w_class->lc_name); 1053 1054 /* 1055 * Insert "child" after "parent" 1056 */ 1057 wcl = &parent->w_children; 1058 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN) 1059 wcl = &(*wcl)->wcl_next; 1060 if (*wcl == NULL) { 1061 *wcl = witness_child_get(); 1062 if (*wcl == NULL) 1063 return (1); 1064 } 1065 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child; 1066 1067 /* 1068 * Now prune whole tree. We look for cases where a lock is now 1069 * both a descendant and a direct child of a given lock. In that 1070 * case, we want to remove the direct child link from the tree. 1071 */ 1072 if (recursed) 1073 return (0); 1074 recursed = 1; 1075 if (parent->w_class->lc_flags & LC_SLEEPLOCK) 1076 list = &w_sleep; 1077 else 1078 list = &w_spin; 1079 STAILQ_FOREACH(child, list, w_typelist) { 1080 STAILQ_FOREACH(parent, list, w_typelist) { 1081 if (!isitmychild(parent, child)) 1082 continue; 1083 removechild(parent, child); 1084 if (isitmydescendant(parent, child)) 1085 continue; 1086 itismychild(parent, child); 1087 } 1088 } 1089 recursed = 0; 1090 witness_levelall(); 1091 return (0); 1092} 1093 1094static void 1095removechild(struct witness *parent, struct witness *child) 1096{ 1097 struct witness_child_list_entry **wcl, *wcl1; 1098 int i; 1099 1100 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next) 1101 for (i = 0; i < (*wcl)->wcl_count; i++) 1102 if ((*wcl)->wcl_children[i] == child) 1103 goto found; 1104 return; 1105found: 1106 (*wcl)->wcl_count--; 1107 if ((*wcl)->wcl_count > i) 1108 (*wcl)->wcl_children[i] = 1109 (*wcl)->wcl_children[(*wcl)->wcl_count]; 1110 MPASS((*wcl)->wcl_children[i] != NULL); 1111 if ((*wcl)->wcl_count != 0) 1112 return; 1113 wcl1 = *wcl; 1114 *wcl = wcl1->wcl_next; 1115 witness_child_free(wcl1); 1116} 1117 1118static int 1119isitmychild(struct witness *parent, struct witness *child) 1120{ 1121 struct witness_child_list_entry *wcl; 1122 int i; 1123 1124 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1125 for (i = 0; i < wcl->wcl_count; i++) { 1126 if (wcl->wcl_children[i] == child) 1127 return (1); 1128 } 1129 } 1130 return (0); 1131} 1132 1133static int 1134isitmydescendant(struct witness *parent, struct witness *child) 1135{ 1136 struct witness_child_list_entry *wcl; 1137 int i, j; 1138 1139 if (isitmychild(parent, child)) 1140 return (1); 1141 j = 0; 1142 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) { 1143 MPASS(j < 1000); 1144 for (i = 0; i < wcl->wcl_count; i++) { 1145 if (isitmydescendant(wcl->wcl_children[i], child)) 1146 return (1); 1147 } 1148 j++; 1149 } 1150 return (0); 1151} 1152 1153static void 1154witness_levelall (void) 1155{ 1156 struct witness_list *list; 1157 struct witness *w, *w1; 1158 1159 /* 1160 * First clear all levels. 1161 */ 1162 STAILQ_FOREACH(w, &w_all, w_list) { 1163 w->w_level = 0; 1164 } 1165 1166 /* 1167 * Look for locks with no parent and level all their descendants. 1168 */ 1169 STAILQ_FOREACH(w, &w_all, w_list) { 1170 /* 1171 * This is just an optimization, technically we could get 1172 * away just walking the all list each time. 1173 */ 1174 if (w->w_class->lc_flags & LC_SLEEPLOCK) 1175 list = &w_sleep; 1176 else 1177 list = &w_spin; 1178 STAILQ_FOREACH(w1, list, w_typelist) { 1179 if (isitmychild(w1, w)) 1180 goto skip; 1181 } 1182 witness_leveldescendents(w, 0); 1183 skip: 1184 ; /* silence GCC 3.x */ 1185 } 1186} 1187 1188static void 1189witness_leveldescendents(struct witness *parent, int level) 1190{ 1191 struct witness_child_list_entry *wcl; 1192 int i; 1193 1194 if (parent->w_level < level) 1195 parent->w_level = level; 1196 level++; 1197 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1198 for (i = 0; i < wcl->wcl_count; i++) 1199 witness_leveldescendents(wcl->wcl_children[i], level); 1200} 1201 1202static void 1203witness_displaydescendants(void(*prnt)(const char *fmt, ...), 1204 struct witness *parent) 1205{ 1206 struct witness_child_list_entry *wcl; 1207 int i, level; 1208 1209 level = parent->w_level; 1210 prnt("%-2d", level); 1211 for (i = 0; i < level; i++) 1212 prnt(" "); 1213 if (parent->w_refcount > 0) { 1214 prnt("%s", parent->w_name); 1215 if (parent->w_file != NULL) 1216 prnt(" -- last acquired @ %s:%d\n", parent->w_file, 1217 parent->w_line); 1218 } else 1219 prnt("(dead)\n"); 1220 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) 1221 for (i = 0; i < wcl->wcl_count; i++) 1222 witness_displaydescendants(prnt, 1223 wcl->wcl_children[i]); 1224} 1225 1226#ifdef BLESSING 1227static int 1228blessed(struct witness *w1, struct witness *w2) 1229{ 1230 int i; 1231 struct witness_blessed *b; 1232 1233 for (i = 0; i < blessed_count; i++) { 1234 b = &blessed_list[i]; 1235 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1236 if (strcmp(w2->w_name, b->b_lock2) == 0) 1237 return (1); 1238 continue; 1239 } 1240 if (strcmp(w1->w_name, b->b_lock2) == 0) 1241 if (strcmp(w2->w_name, b->b_lock1) == 0) 1242 return (1); 1243 } 1244 return (0); 1245} 1246#endif 1247 1248static struct witness * 1249witness_get(void) 1250{ 1251 struct witness *w; 1252 1253 if (witness_dead) { 1254 mtx_unlock_spin(&w_mtx); 1255 return (NULL); 1256 } 1257 if (STAILQ_EMPTY(&w_free)) { 1258 witness_dead = 1; 1259 mtx_unlock_spin(&w_mtx); 1260 printf("%s: witness exhausted\n", __func__); 1261 return (NULL); 1262 } 1263 w = STAILQ_FIRST(&w_free); 1264 STAILQ_REMOVE_HEAD(&w_free, w_list); 1265 bzero(w, sizeof(*w)); 1266 return (w); 1267} 1268 1269static void 1270witness_free(struct witness *w) 1271{ 1272 1273 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1274} 1275 1276static struct witness_child_list_entry * 1277witness_child_get(void) 1278{ 1279 struct witness_child_list_entry *wcl; 1280 1281 if (witness_dead) { 1282 mtx_unlock_spin(&w_mtx); 1283 return (NULL); 1284 } 1285 wcl = w_child_free; 1286 if (wcl == NULL) { 1287 witness_dead = 1; 1288 mtx_unlock_spin(&w_mtx); 1289 printf("%s: witness exhausted\n", __func__); 1290 return (NULL); 1291 } 1292 w_child_free = wcl->wcl_next; 1293 bzero(wcl, sizeof(*wcl)); 1294 return (wcl); 1295} 1296 1297static void 1298witness_child_free(struct witness_child_list_entry *wcl) 1299{ 1300 1301 wcl->wcl_next = w_child_free; 1302 w_child_free = wcl; 1303} 1304 1305static struct lock_list_entry * 1306witness_lock_list_get(void) 1307{ 1308 struct lock_list_entry *lle; 1309 1310 if (witness_dead) 1311 return (NULL); 1312 mtx_lock_spin(&w_mtx); 1313 lle = w_lock_list_free; 1314 if (lle == NULL) { 1315 witness_dead = 1; 1316 mtx_unlock_spin(&w_mtx); 1317 printf("%s: witness exhausted\n", __func__); 1318 return (NULL); 1319 } 1320 w_lock_list_free = lle->ll_next; 1321 mtx_unlock_spin(&w_mtx); 1322 bzero(lle, sizeof(*lle)); 1323 return (lle); 1324} 1325 1326static void 1327witness_lock_list_free(struct lock_list_entry *lle) 1328{ 1329 1330 mtx_lock_spin(&w_mtx); 1331 lle->ll_next = w_lock_list_free; 1332 w_lock_list_free = lle; 1333 mtx_unlock_spin(&w_mtx); 1334} 1335 1336static struct lock_instance * 1337find_instance(struct lock_list_entry *lock_list, struct lock_object *lock) 1338{ 1339 struct lock_list_entry *lle; 1340 struct lock_instance *instance; 1341 int i; 1342 1343 for (lle = lock_list; lle != NULL; lle = lle->ll_next) 1344 for (i = lle->ll_count - 1; i >= 0; i--) { 1345 instance = &lle->ll_children[i]; 1346 if (instance->li_lock == lock) 1347 return (instance); 1348 } 1349 return (NULL); 1350} 1351 1352int 1353witness_list_locks(struct lock_list_entry **lock_list) 1354{ 1355 struct lock_list_entry *lle; 1356 struct lock_instance *instance; 1357 struct lock_object *lock; 1358 int i, nheld; 1359 1360 nheld = 0; 1361 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 1362 for (i = lle->ll_count - 1; i >= 0; i--) { 1363 instance = &lle->ll_children[i]; 1364 lock = instance->li_lock; 1365 printf("%s %s %s", 1366 (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1367 "exclusive" : "shared", 1368 lock->lo_class->lc_name, lock->lo_name); 1369 if (lock->lo_type != lock->lo_name) 1370 printf(" (%s)", lock->lo_type); 1371 printf(" r = %d (%p) locked @ %s:%d\n", 1372 instance->li_flags & LI_RECURSEMASK, lock, 1373 instance->li_file, instance->li_line); 1374 nheld++; 1375 } 1376 return (nheld); 1377} 1378 1379/* 1380 * Calling this on td != curthread is bad unless we are in ddb. 1381 */ 1382int 1383witness_list(struct thread *td) 1384{ 1385 int nheld; 1386 1387 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1388#ifdef DDB 1389 KASSERT(td == curthread || db_active, 1390 ("%s: td != curthread and we aren't in the debugger", __func__)); 1391 if (!db_active && witness_dead) 1392 return (0); 1393#else 1394 KASSERT(td == curthread, ("%s: p != curthread", __func__)); 1395 if (witness_dead) 1396 return (0); 1397#endif 1398 nheld = witness_list_locks(&td->td_sleeplocks); 1399 1400 /* 1401 * We only handle spinlocks if td == curthread. This is somewhat broken 1402 * if td is currently executing on some other CPU and holds spin locks 1403 * as we won't display those locks. If we had a MI way of getting 1404 * the per-cpu data for a given cpu then we could use 1405 * td->td_kse->ke_oncpu to get the list of spinlocks for this thread 1406 * and "fix" this. 1407 * 1408 * That still wouldn't really fix this unless we locked sched_lock 1409 * or stopped the other CPU to make sure it wasn't changing the list 1410 * out from under us. It is probably best to just not try to handle 1411 * threads on other CPU's for now. 1412 */ 1413 if (td == curthread && PCPU_GET(spinlocks) != NULL) 1414 nheld += witness_list_locks(PCPU_PTR(spinlocks)); 1415 1416 return (nheld); 1417} 1418 1419void 1420witness_save(struct lock_object *lock, const char **filep, int *linep) 1421{ 1422 struct lock_instance *instance; 1423 1424 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1425 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 1426 return; 1427 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1428 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1429 lock->lo_class->lc_name, lock->lo_name); 1430 instance = find_instance(curthread->td_sleeplocks, lock); 1431 if (instance == NULL) 1432 panic("%s: lock (%s) %s not locked", __func__, 1433 lock->lo_class->lc_name, lock->lo_name); 1434 *filep = instance->li_file; 1435 *linep = instance->li_line; 1436} 1437 1438void 1439witness_restore(struct lock_object *lock, const char *file, int line) 1440{ 1441 struct lock_instance *instance; 1442 1443 KASSERT(!witness_cold, ("%s: witness_cold", __func__)); 1444 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 1445 return; 1446 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0) 1447 panic("%s: lock (%s) %s is not a sleep lock", __func__, 1448 lock->lo_class->lc_name, lock->lo_name); 1449 instance = find_instance(curthread->td_sleeplocks, lock); 1450 if (instance == NULL) 1451 panic("%s: lock (%s) %s not locked", __func__, 1452 lock->lo_class->lc_name, lock->lo_name); 1453 lock->lo_witness->w_file = file; 1454 lock->lo_witness->w_line = line; 1455 instance->li_file = file; 1456 instance->li_line = line; 1457} 1458 1459void 1460witness_assert(struct lock_object *lock, int flags, const char *file, int line) 1461{ 1462#ifdef INVARIANT_SUPPORT 1463 struct lock_instance *instance; 1464 1465 if (lock->lo_witness == NULL || witness_dead || panicstr != NULL) 1466 return; 1467 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0) 1468 instance = find_instance(curthread->td_sleeplocks, lock); 1469 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0) 1470 instance = find_instance(PCPU_GET(spinlocks), lock); 1471 else { 1472 panic("Lock (%s) %s is not sleep or spin!", 1473 lock->lo_class->lc_name, lock->lo_name); 1474 return; 1475 } 1476 switch (flags) { 1477 case LA_UNLOCKED: 1478 if (instance != NULL) 1479 panic("Lock (%s) %s locked @ %s:%d.", 1480 lock->lo_class->lc_name, lock->lo_name, file, line); 1481 break; 1482 case LA_LOCKED: 1483 case LA_LOCKED | LA_RECURSED: 1484 case LA_LOCKED | LA_NOTRECURSED: 1485 case LA_SLOCKED: 1486 case LA_SLOCKED | LA_RECURSED: 1487 case LA_SLOCKED | LA_NOTRECURSED: 1488 case LA_XLOCKED: 1489 case LA_XLOCKED | LA_RECURSED: 1490 case LA_XLOCKED | LA_NOTRECURSED: 1491 if (instance == NULL) { 1492 panic("Lock (%s) %s not locked @ %s:%d.", 1493 lock->lo_class->lc_name, lock->lo_name, file, line); 1494 break; 1495 } 1496 if ((flags & LA_XLOCKED) != 0 && 1497 (instance->li_flags & LI_EXCLUSIVE) == 0) 1498 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 1499 lock->lo_class->lc_name, lock->lo_name, file, line); 1500 if ((flags & LA_SLOCKED) != 0 && 1501 (instance->li_flags & LI_EXCLUSIVE) != 0) 1502 panic("Lock (%s) %s exclusively locked @ %s:%d.", 1503 lock->lo_class->lc_name, lock->lo_name, file, line); 1504 if ((flags & LA_RECURSED) != 0 && 1505 (instance->li_flags & LI_RECURSEMASK) == 0) 1506 panic("Lock (%s) %s not recursed @ %s:%d.", 1507 lock->lo_class->lc_name, lock->lo_name, file, line); 1508 if ((flags & LA_NOTRECURSED) != 0 && 1509 (instance->li_flags & LI_RECURSEMASK) != 0) 1510 panic("Lock (%s) %s recursed @ %s:%d.", 1511 lock->lo_class->lc_name, lock->lo_name, file, line); 1512 break; 1513 default: 1514 panic("Invalid lock assertion at %s:%d.", file, line); 1515 1516 } 1517#endif /* INVARIANT_SUPPORT */ 1518} 1519 1520#ifdef DDB 1521 1522DB_SHOW_COMMAND(locks, db_witness_list) 1523{ 1524 struct thread *td; 1525 pid_t pid; 1526 struct proc *p; 1527 1528 if (have_addr) { 1529 pid = (addr % 16) + ((addr >> 4) % 16) * 10 + 1530 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 + 1531 ((addr >> 16) % 16) * 10000; 1532 /* sx_slock(&allproc_lock); */ 1533 FOREACH_PROC_IN_SYSTEM(p) { 1534 if (p->p_pid == pid) 1535 break; 1536 } 1537 /* sx_sunlock(&allproc_lock); */ 1538 if (p == NULL) { 1539 db_printf("pid %d not found\n", pid); 1540 return; 1541 } 1542 FOREACH_THREAD_IN_PROC(p, td) { 1543 witness_list(td); 1544 } 1545 } else { 1546 td = curthread; 1547 witness_list(td); 1548 } 1549} 1550 1551DB_SHOW_COMMAND(witness, db_witness_display) 1552{ 1553 1554 witness_display(db_printf); 1555} 1556#endif 1557