subr_witness.c revision 181695
1/*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35/* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41/* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61/* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87#include <sys/cdefs.h> 88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 181695 2008-08-13 18:24:22Z attilio $"); 89 90#include "opt_ddb.h" 91#include "opt_hwpmc_hooks.h" 92#include "opt_stack.h" 93#include "opt_witness.h" 94 95#include <sys/param.h> 96#include <sys/bus.h> 97#include <sys/kdb.h> 98#include <sys/kernel.h> 99#include <sys/ktr.h> 100#include <sys/lock.h> 101#include <sys/malloc.h> 102#include <sys/mutex.h> 103#include <sys/priv.h> 104#include <sys/proc.h> 105#include <sys/sbuf.h> 106#include <sys/stack.h> 107#include <sys/sysctl.h> 108#include <sys/systm.h> 109 110#ifdef DDB 111#include <ddb/ddb.h> 112#endif 113 114#include <machine/stdarg.h> 115 116#if !defined(DDB) && !defined(STACK) 117#error "DDB or STACK options are required for WITNESS" 118#endif 119 120/* Note that these traces do not work with KTR_ALQ. */ 121#if 0 122#define KTR_WITNESS KTR_SUBSYS 123#else 124#define KTR_WITNESS 0 125#endif 126 127#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 128#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 129 130/* Define this to check for blessed mutexes */ 131#undef BLESSING 132 133#define WITNESS_COUNT 1024 134#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 135#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 136#define WITNESS_PENDLIST 512 137 138/* Allocate 256 KB of stack data space */ 139#define WITNESS_LO_DATA_COUNT 2048 140 141/* Prime, gives load factor of ~2 at full load */ 142#define WITNESS_LO_HASH_SIZE 1021 143 144/* 145 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 146 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 147 * probably be safe for the most part, but it's still a SWAG. 148 */ 149#define LOCK_NCHILDREN 5 150#define LOCK_CHILDCOUNT 2048 151 152#define MAX_W_NAME 64 153 154#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 155#define CYCLEGRAPH_SBUF_SIZE 8192 156#define FULLGRAPH_SBUF_SIZE 32768 157 158/* 159 * These flags go in the witness relationship matrix and describe the 160 * relationship between any two struct witness objects. 161 */ 162#define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 163#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 164#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 165#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 166#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 167#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 168#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 169#define WITNESS_RELATED_MASK \ 170 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 171#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 172 * observed. */ 173#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 174#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 175#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 176 177/* Descendant to ancestor flags */ 178#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 179 180/* Ancestor to descendant flags */ 181#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 182 183#define WITNESS_INDEX_ASSERT(i) \ 184 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 185 186MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 187 188/* 189 * Lock instances. A lock instance is the data associated with a lock while 190 * it is held by witness. For example, a lock instance will hold the 191 * recursion count of a lock. Lock instances are held in lists. Spin locks 192 * are held in a per-cpu list while sleep locks are held in per-thread list. 193 */ 194struct lock_instance { 195 struct lock_object *li_lock; 196 const char *li_file; 197 int li_line; 198 u_int li_flags; 199}; 200 201/* 202 * A simple list type used to build the list of locks held by a thread 203 * or CPU. We can't simply embed the list in struct lock_object since a 204 * lock may be held by more than one thread if it is a shared lock. Locks 205 * are added to the head of the list, so we fill up each list entry from 206 * "the back" logically. To ease some of the arithmetic, we actually fill 207 * in each list entry the normal way (children[0] then children[1], etc.) but 208 * when we traverse the list we read children[count-1] as the first entry 209 * down to children[0] as the final entry. 210 */ 211struct lock_list_entry { 212 struct lock_list_entry *ll_next; 213 struct lock_instance ll_children[LOCK_NCHILDREN]; 214 u_int ll_count; 215}; 216 217/* 218 * The main witness structure. One of these per named lock type in the system 219 * (for example, "vnode interlock"). 220 */ 221struct witness { 222 char w_name[MAX_W_NAME]; 223 uint32_t w_index; /* Index in the relationship matrix */ 224 struct lock_class *w_class; 225 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 226 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 227 struct witness *w_hash_next; /* Linked list in hash buckets. */ 228 const char *w_file; /* File where last acquired */ 229 uint32_t w_line; /* Line where last acquired */ 230 uint32_t w_refcount; 231 uint16_t w_num_ancestors; /* direct/indirect 232 * ancestor count */ 233 uint16_t w_num_descendants; /* direct/indirect 234 * descendant count */ 235 int16_t w_ddb_level; 236 int w_displayed:1; 237 int w_reversed:1; 238}; 239 240STAILQ_HEAD(witness_list, witness); 241 242/* 243 * The witness hash table. Keys are witness names (const char *), elements are 244 * witness objects (struct witness *). 245 */ 246struct witness_hash { 247 struct witness *wh_array[WITNESS_HASH_SIZE]; 248 uint32_t wh_size; 249 uint32_t wh_count; 250}; 251 252/* 253 * Key type for the lock order data hash table. 254 */ 255struct witness_lock_order_key { 256 uint16_t from; 257 uint16_t to; 258}; 259 260struct witness_lock_order_data { 261 struct stack wlod_stack; 262 struct witness_lock_order_key wlod_key; 263 struct witness_lock_order_data *wlod_next; 264}; 265 266/* 267 * The witness lock order data hash table. Keys are witness index tuples 268 * (struct witness_lock_order_key), elements are lock order data objects 269 * (struct witness_lock_order_data). 270 */ 271struct witness_lock_order_hash { 272 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 273 u_int wloh_size; 274 u_int wloh_count; 275}; 276 277#ifdef BLESSING 278struct witness_blessed { 279 const char *b_lock1; 280 const char *b_lock2; 281}; 282#endif 283 284struct witness_pendhelp { 285 const char *wh_type; 286 struct lock_object *wh_lock; 287}; 288 289struct witness_order_list_entry { 290 const char *w_name; 291 struct lock_class *w_class; 292}; 293 294/* 295 * Returns 0 if one of the locks is a spin lock and the other is not. 296 * Returns 1 otherwise. 297 */ 298static __inline int 299witness_lock_type_equal(struct witness *w1, struct witness *w2) 300{ 301 302 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 303 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 304} 305 306static __inline int 307witness_lock_order_key_empty(const struct witness_lock_order_key *key) 308{ 309 310 return (key->from == 0 && key->to == 0); 311} 312 313static __inline int 314witness_lock_order_key_equal(const struct witness_lock_order_key *a, 315 const struct witness_lock_order_key *b) 316{ 317 318 return (a->from == b->from && a->to == b->to); 319} 320 321static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 322 const char *fname); 323#ifdef KDB 324static void _witness_debugger(int cond, const char *msg); 325#endif 326static void adopt(struct witness *parent, struct witness *child); 327#ifdef BLESSING 328static int blessed(struct witness *, struct witness *); 329#endif 330static void depart(struct witness *w); 331static struct witness *enroll(const char *description, 332 struct lock_class *lock_class); 333static struct lock_instance *find_instance(struct lock_list_entry *list, 334 struct lock_object *lock); 335static int isitmychild(struct witness *parent, struct witness *child); 336static int isitmydescendant(struct witness *parent, struct witness *child); 337static void itismychild(struct witness *parent, struct witness *child); 338static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 339static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 340static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 341static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 342#ifdef DDB 343static void witness_ddb_compute_levels(void); 344static void witness_ddb_display(void(*)(const char *fmt, ...)); 345static void witness_ddb_display_descendants(void(*)(const char *fmt, ...), 346 struct witness *, int indent); 347static void witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 348 struct witness_list *list); 349static void witness_ddb_level_descendants(struct witness *parent, int l); 350static void witness_ddb_list(struct thread *td); 351#endif 352static void witness_free(struct witness *m); 353static struct witness *witness_get(void); 354static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 355static struct witness *witness_hash_get(const char *key); 356static void witness_hash_put(struct witness *w); 357static void witness_init_hash_tables(void); 358static void witness_increment_graph_generation(void); 359static void witness_lock_list_free(struct lock_list_entry *lle); 360static struct lock_list_entry *witness_lock_list_get(void); 361static int witness_lock_order_add(struct witness *parent, 362 struct witness *child); 363static int witness_lock_order_check(struct witness *parent, 364 struct witness *child); 365static struct witness_lock_order_data *witness_lock_order_get( 366 struct witness *parent, 367 struct witness *child); 368static void witness_list_lock(struct lock_instance *instance); 369 370#ifdef KDB 371#define witness_debugger(c) _witness_debugger(c, __func__) 372#else 373#define witness_debugger(c) 374#endif 375 376SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 377 378/* 379 * If set to 0, witness is disabled. Otherwise witness performs full lock order 380 * checking for all locks. At runtime, witness is allowed to be turned off. 381 * witness is not allowed be turned on once it is turned off, however. 382 */ 383static int witness_watch = 1; 384TUNABLE_INT("debug.witness.watch", &witness_watch); 385SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 386 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 387 388#ifdef KDB 389/* 390 * When KDB is enabled and witness_kdb is 1, it will cause the system 391 * to drop into kdebug() when: 392 * - a lock hierarchy violation occurs 393 * - locks are held when going to sleep. 394 */ 395#ifdef WITNESS_KDB 396int witness_kdb = 1; 397#else 398int witness_kdb = 0; 399#endif 400TUNABLE_INT("debug.witness.kdb", &witness_kdb); 401SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 402 403/* 404 * When KDB is enabled and witness_trace is 1, it will cause the system 405 * to print a stack trace: 406 * - a lock hierarchy violation occurs 407 * - locks are held when going to sleep. 408 */ 409int witness_trace = 1; 410TUNABLE_INT("debug.witness.trace", &witness_trace); 411SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 412#endif /* KDB */ 413 414#ifdef WITNESS_SKIPSPIN 415int witness_skipspin = 1; 416#else 417int witness_skipspin = 0; 418#endif 419TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 420SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 421 0, ""); 422 423/* 424 * Call this to print out the relations between locks. 425 */ 426SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 427 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 428 429/* 430 * Call this to print out the witness faulty stacks. 431 */ 432SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 433 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 434 435static struct mtx w_mtx; 436 437/* w_list */ 438static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 439static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 440 441/* w_typelist */ 442static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 443static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 444 445/* lock list */ 446static struct lock_list_entry *w_lock_list_free = NULL; 447static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 448static u_int pending_cnt; 449 450static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 451SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 452SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 453SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 454 ""); 455 456static struct witness *w_data; 457static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 458static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 459static struct witness_hash w_hash; /* The witness hash table. */ 460 461/* The lock order data hash */ 462static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 463static struct witness_lock_order_data *w_lofree = NULL; 464static struct witness_lock_order_hash w_lohash; 465static int w_max_used_index = 0; 466static unsigned int w_generation = 0; 467static const char *w_notrunning = "Witness not running, witness_watch == 0\n"; 468static const char *w_stillcold = "Witness is still cold\n"; 469 470 471static struct witness_order_list_entry order_lists[] = { 472 /* 473 * sx locks 474 */ 475 { "proctree", &lock_class_sx }, 476 { "allproc", &lock_class_sx }, 477 { "allprison", &lock_class_sx }, 478 { NULL, NULL }, 479 /* 480 * Various mutexes 481 */ 482 { "Giant", &lock_class_mtx_sleep }, 483 { "pipe mutex", &lock_class_mtx_sleep }, 484 { "sigio lock", &lock_class_mtx_sleep }, 485 { "process group", &lock_class_mtx_sleep }, 486 { "process lock", &lock_class_mtx_sleep }, 487 { "session", &lock_class_mtx_sleep }, 488 { "uidinfo hash", &lock_class_rw }, 489#ifdef HWPMC_HOOKS 490 { "pmc-sleep", &lock_class_mtx_sleep }, 491#endif 492 { NULL, NULL }, 493 /* 494 * Sockets 495 */ 496 { "accept", &lock_class_mtx_sleep }, 497 { "so_snd", &lock_class_mtx_sleep }, 498 { "so_rcv", &lock_class_mtx_sleep }, 499 { "sellck", &lock_class_mtx_sleep }, 500 { NULL, NULL }, 501 /* 502 * Routing 503 */ 504 { "so_rcv", &lock_class_mtx_sleep }, 505 { "radix node head", &lock_class_mtx_sleep }, 506 { "rtentry", &lock_class_mtx_sleep }, 507 { "ifaddr", &lock_class_mtx_sleep }, 508 { NULL, NULL }, 509 /* 510 * Multicast - protocol locks before interface locks, after UDP locks. 511 */ 512 { "udpinp", &lock_class_rw }, 513 { "in_multi_mtx", &lock_class_mtx_sleep }, 514 { "igmp_mtx", &lock_class_mtx_sleep }, 515 { "if_addr_mtx", &lock_class_mtx_sleep }, 516 { NULL, NULL }, 517 /* 518 * UNIX Domain Sockets 519 */ 520 { "unp", &lock_class_mtx_sleep }, 521 { "so_snd", &lock_class_mtx_sleep }, 522 { NULL, NULL }, 523 /* 524 * UDP/IP 525 */ 526 { "udp", &lock_class_rw }, 527 { "udpinp", &lock_class_rw }, 528 { "so_snd", &lock_class_mtx_sleep }, 529 { NULL, NULL }, 530 /* 531 * TCP/IP 532 */ 533 { "tcp", &lock_class_rw }, 534 { "tcpinp", &lock_class_rw }, 535 { "so_snd", &lock_class_mtx_sleep }, 536 { NULL, NULL }, 537 /* 538 * SLIP 539 */ 540 { "slip_mtx", &lock_class_mtx_sleep }, 541 { "slip sc_mtx", &lock_class_mtx_sleep }, 542 { NULL, NULL }, 543 /* 544 * netatalk 545 */ 546 { "ddp_list_mtx", &lock_class_mtx_sleep }, 547 { "ddp_mtx", &lock_class_mtx_sleep }, 548 { NULL, NULL }, 549 /* 550 * BPF 551 */ 552 { "bpf global lock", &lock_class_mtx_sleep }, 553 { "bpf interface lock", &lock_class_mtx_sleep }, 554 { "bpf cdev lock", &lock_class_mtx_sleep }, 555 { NULL, NULL }, 556 /* 557 * NFS server 558 */ 559 { "nfsd_mtx", &lock_class_mtx_sleep }, 560 { "so_snd", &lock_class_mtx_sleep }, 561 { NULL, NULL }, 562 563 /* 564 * IEEE 802.11 565 */ 566 { "802.11 com lock", &lock_class_mtx_sleep}, 567 { NULL, NULL }, 568 /* 569 * Network drivers 570 */ 571 { "network driver", &lock_class_mtx_sleep}, 572 { NULL, NULL }, 573 574 /* 575 * Netgraph 576 */ 577 { "ng_node", &lock_class_mtx_sleep }, 578 { "ng_worklist", &lock_class_mtx_sleep }, 579 { NULL, NULL }, 580 /* 581 * CDEV 582 */ 583 { "system map", &lock_class_mtx_sleep }, 584 { "vm page queue mutex", &lock_class_mtx_sleep }, 585 { "vnode interlock", &lock_class_mtx_sleep }, 586 { "cdev", &lock_class_mtx_sleep }, 587 { NULL, NULL }, 588 /* 589 * kqueue/VFS interaction 590 */ 591 { "kqueue", &lock_class_mtx_sleep }, 592 { "struct mount mtx", &lock_class_mtx_sleep }, 593 { "vnode interlock", &lock_class_mtx_sleep }, 594 { NULL, NULL }, 595 /* 596 * spin locks 597 */ 598#ifdef SMP 599 { "ap boot", &lock_class_mtx_spin }, 600#endif 601 { "rm.mutex_mtx", &lock_class_mtx_spin }, 602 { "sio", &lock_class_mtx_spin }, 603 { "scrlock", &lock_class_mtx_spin }, 604#ifdef __i386__ 605 { "cy", &lock_class_mtx_spin }, 606#endif 607#ifdef __sparc64__ 608 { "pcib_mtx", &lock_class_mtx_spin }, 609 { "rtc_mtx", &lock_class_mtx_spin }, 610#endif 611 { "scc_hwmtx", &lock_class_mtx_spin }, 612 { "uart_hwmtx", &lock_class_mtx_spin }, 613 { "fast_taskqueue", &lock_class_mtx_spin }, 614 { "intr table", &lock_class_mtx_spin }, 615#ifdef HWPMC_HOOKS 616 { "pmc-per-proc", &lock_class_mtx_spin }, 617#endif 618 { "process slock", &lock_class_mtx_spin }, 619 { "sleepq chain", &lock_class_mtx_spin }, 620 { "umtx lock", &lock_class_mtx_spin }, 621 { "rm_spinlock", &lock_class_mtx_spin }, 622 { "turnstile chain", &lock_class_mtx_spin }, 623 { "turnstile lock", &lock_class_mtx_spin }, 624 { "sched lock", &lock_class_mtx_spin }, 625 { "td_contested", &lock_class_mtx_spin }, 626 { "callout", &lock_class_mtx_spin }, 627 { "entropy harvest mutex", &lock_class_mtx_spin }, 628 { "syscons video lock", &lock_class_mtx_spin }, 629 { "time lock", &lock_class_mtx_spin }, 630#ifdef SMP 631 { "smp rendezvous", &lock_class_mtx_spin }, 632#endif 633#ifdef __powerpc__ 634 { "tlb0", &lock_class_mtx_spin }, 635#endif 636 /* 637 * leaf locks 638 */ 639 { "intrcnt", &lock_class_mtx_spin }, 640 { "icu", &lock_class_mtx_spin }, 641#if defined(SMP) && defined(__sparc64__) 642 { "ipi", &lock_class_mtx_spin }, 643#endif 644#ifdef __i386__ 645 { "allpmaps", &lock_class_mtx_spin }, 646 { "descriptor tables", &lock_class_mtx_spin }, 647#endif 648 { "clk", &lock_class_mtx_spin }, 649 { "cpuset", &lock_class_mtx_spin }, 650 { "mprof lock", &lock_class_mtx_spin }, 651 { "zombie lock", &lock_class_mtx_spin }, 652 { "ALD Queue", &lock_class_mtx_spin }, 653#ifdef __ia64__ 654 { "MCA spin lock", &lock_class_mtx_spin }, 655#endif 656#if defined(__i386__) || defined(__amd64__) 657 { "pcicfg", &lock_class_mtx_spin }, 658 { "NDIS thread lock", &lock_class_mtx_spin }, 659#endif 660 { "tw_osl_io_lock", &lock_class_mtx_spin }, 661 { "tw_osl_q_lock", &lock_class_mtx_spin }, 662 { "tw_cl_io_lock", &lock_class_mtx_spin }, 663 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 664 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 665#ifdef HWPMC_HOOKS 666 { "pmc-leaf", &lock_class_mtx_spin }, 667#endif 668 { "blocked lock", &lock_class_mtx_spin }, 669 { NULL, NULL }, 670 { NULL, NULL } 671}; 672 673#ifdef BLESSING 674/* 675 * Pairs of locks which have been blessed 676 * Don't complain about order problems with blessed locks 677 */ 678static struct witness_blessed blessed_list[] = { 679}; 680static int blessed_count = 681 sizeof(blessed_list) / sizeof(struct witness_blessed); 682#endif 683 684/* 685 * This global is set to 0 once it becomes safe to use the witness code. 686 */ 687static int witness_cold = 1; 688 689/* 690 * This global is set to 1 once the static lock orders have been enrolled 691 * so that a warning can be issued for any spin locks enrolled later. 692 */ 693static int witness_spin_warn = 0; 694 695/* 696 * The WITNESS-enabled diagnostic code. Note that the witness code does 697 * assume that the early boot is single-threaded at least until after this 698 * routine is completed. 699 */ 700static void 701witness_initialize(void *dummy __unused) 702{ 703 struct lock_object *lock; 704 struct witness_order_list_entry *order; 705 struct witness *w, *w1; 706 int i; 707 708 MALLOC(w_data, struct witness *, 709 sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 710 M_NOWAIT | M_ZERO); 711 712 /* 713 * We have to release Giant before initializing its witness 714 * structure so that WITNESS doesn't get confused. 715 */ 716 mtx_unlock(&Giant); 717 mtx_assert(&Giant, MA_NOTOWNED); 718 719 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 720 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 721 MTX_NOWITNESS | MTX_NOPROFILE); 722 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 723 w = &w_data[i]; 724 memset(w, 0, sizeof(*w)); 725 w_data[i].w_index = i; /* Witness index never changes. */ 726 witness_free(w); 727 } 728 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 729 ("%s: Invalid list of free witness objects", __func__)); 730 731 /* Witness with index 0 is not used to aid in debugging. */ 732 STAILQ_REMOVE_HEAD(&w_free, w_list); 733 w_free_cnt--; 734 735 memset(w_rmatrix, 0, 736 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 737 738 for (i = 0; i < LOCK_CHILDCOUNT; i++) 739 witness_lock_list_free(&w_locklistdata[i]); 740 witness_init_hash_tables(); 741 742 /* First add in all the specified order lists. */ 743 for (order = order_lists; order->w_name != NULL; order++) { 744 w = enroll(order->w_name, order->w_class); 745 if (w == NULL) 746 continue; 747 w->w_file = "order list"; 748 for (order++; order->w_name != NULL; order++) { 749 w1 = enroll(order->w_name, order->w_class); 750 if (w1 == NULL) 751 continue; 752 w1->w_file = "order list"; 753 itismychild(w, w1); 754 w = w1; 755 } 756 } 757 witness_spin_warn = 1; 758 759 /* Iterate through all locks and add them to witness. */ 760 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 761 lock = pending_locks[i].wh_lock; 762 KASSERT(lock->lo_flags & LO_WITNESS, 763 ("%s: lock %s is on pending list but not LO_WITNESS", 764 __func__, lock->lo_name)); 765 lock->lo_witness = enroll(pending_locks[i].wh_type, 766 LOCK_CLASS(lock)); 767 } 768 769 /* Mark the witness code as being ready for use. */ 770 witness_cold = 0; 771 772 mtx_lock(&Giant); 773} 774SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 775 NULL); 776 777void 778witness_init(struct lock_object *lock, const char *type) 779{ 780 struct lock_class *class; 781 782 /* Various sanity checks. */ 783 class = LOCK_CLASS(lock); 784 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 785 (class->lc_flags & LC_RECURSABLE) == 0) 786 panic("%s: lock (%s) %s can not be recursable", __func__, 787 class->lc_name, lock->lo_name); 788 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 789 (class->lc_flags & LC_SLEEPABLE) == 0) 790 panic("%s: lock (%s) %s can not be sleepable", __func__, 791 class->lc_name, lock->lo_name); 792 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 793 (class->lc_flags & LC_UPGRADABLE) == 0) 794 panic("%s: lock (%s) %s can not be upgradable", __func__, 795 class->lc_name, lock->lo_name); 796 797 /* 798 * If we shouldn't watch this lock, then just clear lo_witness. 799 * Otherwise, if witness_cold is set, then it is too early to 800 * enroll this lock, so defer it to witness_initialize() by adding 801 * it to the pending_locks list. If it is not too early, then enroll 802 * the lock now. 803 */ 804 if (witness_watch == 0 || panicstr != NULL || 805 (lock->lo_flags & LO_WITNESS) == 0) 806 lock->lo_witness = NULL; 807 else if (witness_cold) { 808 pending_locks[pending_cnt].wh_lock = lock; 809 pending_locks[pending_cnt++].wh_type = type; 810 if (pending_cnt > WITNESS_PENDLIST) 811 panic("%s: pending locks list is too small, bump it\n", 812 __func__); 813 } else 814 lock->lo_witness = enroll(type, class); 815} 816 817void 818witness_destroy(struct lock_object *lock) 819{ 820 struct lock_class *class; 821 struct witness *w; 822 823 class = LOCK_CLASS(lock); 824 825 if (witness_cold) 826 panic("lock (%s) %s destroyed while witness_cold", 827 class->lc_name, lock->lo_name); 828 829 /* XXX: need to verify that no one holds the lock */ 830 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 831 return; 832 w = lock->lo_witness; 833 834 mtx_lock_spin(&w_mtx); 835 MPASS(w->w_refcount > 0); 836 w->w_refcount--; 837 838 if (w->w_refcount == 0) 839 depart(w); 840 mtx_unlock_spin(&w_mtx); 841} 842 843#ifdef DDB 844static void 845witness_ddb_compute_levels(void) 846{ 847 struct witness *w; 848 849 /* 850 * First clear all levels. 851 */ 852 STAILQ_FOREACH(w, &w_all, w_list) 853 w->w_ddb_level = -1; 854 855 /* 856 * Look for locks with no parents and level all their descendants. 857 */ 858 STAILQ_FOREACH(w, &w_all, w_list) { 859 860 /* If the witness has ancestors (is not a root), skip it. */ 861 if (w->w_num_ancestors > 0) 862 continue; 863 witness_ddb_level_descendants(w, 0); 864 } 865} 866 867static void 868witness_ddb_level_descendants(struct witness *w, int l) 869{ 870 int i; 871 872 if (w->w_ddb_level >= l) 873 return; 874 875 w->w_ddb_level = l; 876 l++; 877 878 for (i = 1; i <= w_max_used_index; i++) { 879 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 880 witness_ddb_level_descendants(&w_data[i], l); 881 } 882} 883 884static void 885witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...), 886 struct witness *w, int indent) 887{ 888 int i; 889 890 for (i = 0; i < indent; i++) 891 prnt(" "); 892 prnt("%s (type: %s, depth: %d, active refs: %d)", 893 w->w_name, w->w_class->lc_name, 894 w->w_ddb_level, w->w_refcount); 895 if (w->w_displayed) { 896 prnt(" -- (already displayed)\n"); 897 return; 898 } 899 w->w_displayed = 1; 900 if (w->w_file != NULL && w->w_line != 0) 901 prnt(" -- last acquired @ %s:%d\n", w->w_file, 902 w->w_line); 903 else 904 prnt(" -- never acquired\n"); 905 indent++; 906 WITNESS_INDEX_ASSERT(w->w_index); 907 for (i = 1; i <= w_max_used_index; i++) { 908 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 909 witness_ddb_display_descendants(prnt, &w_data[i], 910 indent); 911 } 912} 913 914static void 915witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 916 struct witness_list *list) 917{ 918 struct witness *w; 919 920 STAILQ_FOREACH(w, list, w_typelist) { 921 if (w->w_file == NULL || w->w_ddb_level > 0) 922 continue; 923 924 /* This lock has no anscestors - display its descendants. */ 925 witness_ddb_display_descendants(prnt, w, 0); 926 } 927} 928 929static void 930witness_ddb_display(void(*prnt)(const char *fmt, ...)) 931{ 932 struct witness *w; 933 934 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 935 witness_ddb_compute_levels(); 936 937 /* Clear all the displayed flags. */ 938 STAILQ_FOREACH(w, &w_all, w_list) 939 w->w_displayed = 0; 940 941 /* 942 * First, handle sleep locks which have been acquired at least 943 * once. 944 */ 945 prnt("Sleep locks:\n"); 946 witness_ddb_display_list(prnt, &w_sleep); 947 948 /* 949 * Now do spin locks which have been acquired at least once. 950 */ 951 prnt("\nSpin locks:\n"); 952 witness_ddb_display_list(prnt, &w_spin); 953 954 /* 955 * Finally, any locks which have not been acquired yet. 956 */ 957 prnt("\nLocks which were never acquired:\n"); 958 STAILQ_FOREACH(w, &w_all, w_list) { 959 if (w->w_file != NULL || w->w_refcount == 0) 960 continue; 961 prnt("%s (type: %s, depth: %d)\n", w->w_name, 962 w->w_class->lc_name, w->w_ddb_level); 963 } 964} 965#endif /* DDB */ 966 967/* Trim useless garbage from filenames. */ 968static const char * 969fixup_filename(const char *file) 970{ 971 972 if (file == NULL) 973 return (NULL); 974 while (strncmp(file, "../", 3) == 0) 975 file += 3; 976 return (file); 977} 978 979int 980witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 981{ 982 983 if (witness_watch == 0 || panicstr != NULL) 984 return (0); 985 986 /* Require locks that witness knows about. */ 987 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 988 lock2->lo_witness == NULL) 989 return (EINVAL); 990 991 mtx_assert(&w_mtx, MA_NOTOWNED); 992 mtx_lock_spin(&w_mtx); 993 994 /* 995 * If we already have either an explicit or implied lock order that 996 * is the other way around, then return an error. 997 */ 998 if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 999 mtx_unlock_spin(&w_mtx); 1000 return (EDOOFUS); 1001 } 1002 1003 /* Try to add the new order. */ 1004 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1005 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1006 itismychild(lock1->lo_witness, lock2->lo_witness); 1007 mtx_unlock_spin(&w_mtx); 1008 return (0); 1009} 1010 1011void 1012witness_checkorder(struct lock_object *lock, int flags, const char *file, 1013 int line) 1014{ 1015 struct lock_list_entry **lock_list, *lle; 1016 struct lock_instance *lock1, *lock2; 1017 struct lock_class *class; 1018 struct witness *w, *w1; 1019 struct thread *td; 1020 int i, j; 1021 1022 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1023 panicstr != NULL) 1024 return; 1025 1026 w = lock->lo_witness; 1027 class = LOCK_CLASS(lock); 1028 td = curthread; 1029 file = fixup_filename(file); 1030 1031 if (class->lc_flags & LC_SLEEPLOCK) { 1032 1033 /* 1034 * Since spin locks include a critical section, this check 1035 * implicitly enforces a lock order of all sleep locks before 1036 * all spin locks. 1037 */ 1038 if (td->td_critnest != 0 && !kdb_active) 1039 panic("blockable sleep lock (%s) %s @ %s:%d", 1040 class->lc_name, lock->lo_name, file, line); 1041 1042 /* 1043 * If this is the first lock acquired then just return as 1044 * no order checking is needed. 1045 */ 1046 if (td->td_sleeplocks == NULL) 1047 return; 1048 lock_list = &td->td_sleeplocks; 1049 } else { 1050 1051 /* 1052 * If this is the first lock, just return as no order 1053 * checking is needed. We check this in both if clauses 1054 * here as unifying the check would require us to use a 1055 * critical section to ensure we don't migrate while doing 1056 * the check. Note that if this is not the first lock, we 1057 * are already in a critical section and are safe for the 1058 * rest of the check. 1059 */ 1060 if (PCPU_GET(spinlocks) == NULL) 1061 return; 1062 lock_list = PCPU_PTR(spinlocks); 1063 } 1064 1065 /* Empty list? */ 1066 if ((*lock_list)->ll_count == 0) 1067 return; 1068 1069 /* 1070 * Check to see if we are recursing on a lock we already own. If 1071 * so, make sure that we don't mismatch exclusive and shared lock 1072 * acquires. 1073 */ 1074 lock1 = find_instance(*lock_list, lock); 1075 if (lock1 != NULL) { 1076 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1077 (flags & LOP_EXCLUSIVE) == 0) { 1078 printf("shared lock of (%s) %s @ %s:%d\n", 1079 class->lc_name, lock->lo_name, file, line); 1080 printf("while exclusively locked from %s:%d\n", 1081 lock1->li_file, lock1->li_line); 1082 panic("share->excl"); 1083 } 1084 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1085 (flags & LOP_EXCLUSIVE) != 0) { 1086 printf("exclusive lock of (%s) %s @ %s:%d\n", 1087 class->lc_name, lock->lo_name, file, line); 1088 printf("while share locked from %s:%d\n", 1089 lock1->li_file, lock1->li_line); 1090 panic("excl->share"); 1091 } 1092 return; 1093 } 1094 1095 /* 1096 * Try to perform most checks without a lock. If this succeeds we 1097 * can skip acquiring the lock and return success. 1098 */ 1099 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 1100 w1 = lock1->li_lock->lo_witness; 1101 if (witness_lock_order_check(w1, w)) 1102 return; 1103 1104 /* 1105 * Check for duplicate locks of the same type. Note that we only 1106 * have to check for this on the last lock we just acquired. Any 1107 * other cases will be caught as lock order violations. 1108 */ 1109 mtx_lock_spin(&w_mtx); 1110 witness_lock_order_add(w1, w); 1111 if (w1 == w) { 1112 i = w->w_index; 1113 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1114 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1115 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1116 w->w_reversed = 1; 1117 mtx_unlock_spin(&w_mtx); 1118 printf("acquiring duplicate lock of same type: \"%s\"\n", 1119 w->w_name); 1120 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 1121 lock1->li_file, lock1->li_line); 1122 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 1123 witness_debugger(1); 1124 } else 1125 mtx_unlock_spin(&w_mtx); 1126 return; 1127 } 1128 mtx_assert(&w_mtx, MA_OWNED); 1129 1130 /* 1131 * If we know that the the lock we are acquiring comes after 1132 * the lock we most recently acquired in the lock order tree, 1133 * then there is no need for any further checks. 1134 */ 1135 if (isitmychild(w1, w)) 1136 goto out; 1137 1138 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 1139 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1140 1141 MPASS(j < WITNESS_COUNT); 1142 lock1 = &lle->ll_children[i]; 1143 w1 = lock1->li_lock->lo_witness; 1144 1145 /* 1146 * If this lock doesn't undergo witness checking, 1147 * then skip it. 1148 */ 1149 if (w1 == NULL) { 1150 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1151 ("lock missing witness structure")); 1152 continue; 1153 } 1154 1155 /* 1156 * If we are locking Giant and this is a sleepable 1157 * lock, then skip it. 1158 */ 1159 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1160 lock == &Giant.lock_object) 1161 continue; 1162 1163 /* 1164 * If we are locking a sleepable lock and this lock 1165 * is Giant, then skip it. 1166 */ 1167 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1168 lock1->li_lock == &Giant.lock_object) 1169 continue; 1170 1171 /* 1172 * If we are locking a sleepable lock and this lock 1173 * isn't sleepable, we want to treat it as a lock 1174 * order violation to enfore a general lock order of 1175 * sleepable locks before non-sleepable locks. 1176 */ 1177 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1178 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1179 goto reversal; 1180 1181 /* 1182 * If we are locking Giant and this is a non-sleepable 1183 * lock, then treat it as a reversal. 1184 */ 1185 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1186 lock == &Giant.lock_object) 1187 goto reversal; 1188 1189 /* 1190 * Check the lock order hierarchy for a reveresal. 1191 */ 1192 if (!isitmydescendant(w, w1)) 1193 continue; 1194 reversal: 1195 1196 /* 1197 * We have a lock order violation, check to see if it 1198 * is allowed or has already been yelled about. 1199 */ 1200#ifdef BLESSING 1201 1202 /* 1203 * If the lock order is blessed, just bail. We don't 1204 * look for other lock order violations though, which 1205 * may be a bug. 1206 */ 1207 if (blessed(w, w1)) 1208 goto out; 1209#endif 1210 1211 /* Bail if this violation is known */ 1212 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1213 goto out; 1214 1215 /* Record this as a violation */ 1216 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1217 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1218 w->w_reversed = w1->w_reversed = 1; 1219 witness_increment_graph_generation(); 1220 mtx_unlock_spin(&w_mtx); 1221 1222 /* 1223 * Ok, yell about it. 1224 */ 1225 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1226 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1227 printf( 1228 "lock order reversal: (sleepable after non-sleepable)\n"); 1229 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1230 && lock == &Giant.lock_object) 1231 printf( 1232 "lock order reversal: (Giant after non-sleepable)\n"); 1233 else 1234 printf("lock order reversal:\n"); 1235 1236 /* 1237 * Try to locate an earlier lock with 1238 * witness w in our list. 1239 */ 1240 do { 1241 lock2 = &lle->ll_children[i]; 1242 MPASS(lock2->li_lock != NULL); 1243 if (lock2->li_lock->lo_witness == w) 1244 break; 1245 if (i == 0 && lle->ll_next != NULL) { 1246 lle = lle->ll_next; 1247 i = lle->ll_count - 1; 1248 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1249 } else 1250 i--; 1251 } while (i >= 0); 1252 if (i < 0) { 1253 printf(" 1st %p %s (%s) @ %s:%d\n", 1254 lock1->li_lock, lock1->li_lock->lo_name, 1255 w1->w_name, lock1->li_file, lock1->li_line); 1256 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1257 lock->lo_name, w->w_name, file, line); 1258 } else { 1259 printf(" 1st %p %s (%s) @ %s:%d\n", 1260 lock2->li_lock, lock2->li_lock->lo_name, 1261 lock2->li_lock->lo_witness->w_name, 1262 lock2->li_file, lock2->li_line); 1263 printf(" 2nd %p %s (%s) @ %s:%d\n", 1264 lock1->li_lock, lock1->li_lock->lo_name, 1265 w1->w_name, lock1->li_file, lock1->li_line); 1266 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1267 lock->lo_name, w->w_name, file, line); 1268 } 1269 witness_debugger(1); 1270 return; 1271 } 1272 } 1273 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 1274 1275 /* 1276 * If requested, build a new lock order. However, don't build a new 1277 * relationship between a sleepable lock and Giant if it is in the 1278 * wrong direction. The correct lock order is that sleepable locks 1279 * always come before Giant. 1280 */ 1281 if (flags & LOP_NEWORDER && 1282 !(lock1->li_lock == &Giant.lock_object && 1283 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1284 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1285 w->w_name, lock1->li_lock->lo_witness->w_name); 1286 itismychild(lock1->li_lock->lo_witness, w); 1287 } 1288out: 1289 mtx_unlock_spin(&w_mtx); 1290} 1291 1292void 1293witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1294{ 1295 struct lock_list_entry **lock_list, *lle; 1296 struct lock_instance *instance; 1297 struct witness *w; 1298 struct thread *td; 1299 1300 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1301 panicstr != NULL) 1302 return; 1303 w = lock->lo_witness; 1304 td = curthread; 1305 file = fixup_filename(file); 1306 1307 /* Determine lock list for this lock. */ 1308 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1309 lock_list = &td->td_sleeplocks; 1310 else 1311 lock_list = PCPU_PTR(spinlocks); 1312 1313 /* Check to see if we are recursing on a lock we already own. */ 1314 instance = find_instance(*lock_list, lock); 1315 if (instance != NULL) { 1316 instance->li_flags++; 1317 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1318 td->td_proc->p_pid, lock->lo_name, 1319 instance->li_flags & LI_RECURSEMASK); 1320 instance->li_file = file; 1321 instance->li_line = line; 1322 return; 1323 } 1324 1325 /* Update per-witness last file and line acquire. */ 1326 w->w_file = file; 1327 w->w_line = line; 1328 1329 /* Find the next open lock instance in the list and fill it. */ 1330 lle = *lock_list; 1331 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1332 lle = witness_lock_list_get(); 1333 if (lle == NULL) 1334 return; 1335 lle->ll_next = *lock_list; 1336 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1337 td->td_proc->p_pid, lle); 1338 *lock_list = lle; 1339 } 1340 instance = &lle->ll_children[lle->ll_count++]; 1341 instance->li_lock = lock; 1342 instance->li_line = line; 1343 instance->li_file = file; 1344 if ((flags & LOP_EXCLUSIVE) != 0) 1345 instance->li_flags = LI_EXCLUSIVE; 1346 else 1347 instance->li_flags = 0; 1348 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1349 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1350} 1351 1352void 1353witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1354{ 1355 struct lock_instance *instance; 1356 struct lock_class *class; 1357 1358 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1359 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1360 return; 1361 class = LOCK_CLASS(lock); 1362 file = fixup_filename(file); 1363 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1364 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1365 class->lc_name, lock->lo_name, file, line); 1366 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1367 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1368 class->lc_name, lock->lo_name, file, line); 1369 instance = find_instance(curthread->td_sleeplocks, lock); 1370 if (instance == NULL) 1371 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1372 class->lc_name, lock->lo_name, file, line); 1373 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1374 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1375 class->lc_name, lock->lo_name, file, line); 1376 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1377 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1378 class->lc_name, lock->lo_name, 1379 instance->li_flags & LI_RECURSEMASK, file, line); 1380 instance->li_flags |= LI_EXCLUSIVE; 1381} 1382 1383void 1384witness_downgrade(struct lock_object *lock, int flags, const char *file, 1385 int line) 1386{ 1387 struct lock_instance *instance; 1388 struct lock_class *class; 1389 1390 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1391 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 1392 return; 1393 class = LOCK_CLASS(lock); 1394 file = fixup_filename(file); 1395 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1396 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1397 class->lc_name, lock->lo_name, file, line); 1398 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1399 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1400 class->lc_name, lock->lo_name, file, line); 1401 instance = find_instance(curthread->td_sleeplocks, lock); 1402 if (instance == NULL) 1403 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1404 class->lc_name, lock->lo_name, file, line); 1405 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1406 panic("downgrade of shared lock (%s) %s @ %s:%d", 1407 class->lc_name, lock->lo_name, file, line); 1408 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1409 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1410 class->lc_name, lock->lo_name, 1411 instance->li_flags & LI_RECURSEMASK, file, line); 1412 instance->li_flags &= ~LI_EXCLUSIVE; 1413} 1414 1415void 1416witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1417{ 1418 struct lock_list_entry **lock_list, *lle; 1419 struct lock_instance *instance; 1420 struct lock_class *class; 1421 struct thread *td; 1422 register_t s; 1423 int i, j; 1424 1425 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL || 1426 panicstr != NULL) 1427 return; 1428 td = curthread; 1429 class = LOCK_CLASS(lock); 1430 file = fixup_filename(file); 1431 1432 /* Find lock instance associated with this lock. */ 1433 if (class->lc_flags & LC_SLEEPLOCK) 1434 lock_list = &td->td_sleeplocks; 1435 else 1436 lock_list = PCPU_PTR(spinlocks); 1437 lle = *lock_list; 1438 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1439 for (i = 0; i < (*lock_list)->ll_count; i++) { 1440 instance = &(*lock_list)->ll_children[i]; 1441 if (instance->li_lock == lock) 1442 goto found; 1443 } 1444 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name, 1445 file, line); 1446found: 1447 1448 /* First, check for shared/exclusive mismatches. */ 1449 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && 1450 (flags & LOP_EXCLUSIVE) == 0) { 1451 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1452 lock->lo_name, file, line); 1453 printf("while exclusively locked from %s:%d\n", 1454 instance->li_file, instance->li_line); 1455 panic("excl->ushare"); 1456 } 1457 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && 1458 (flags & LOP_EXCLUSIVE) != 0) { 1459 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1460 lock->lo_name, file, line); 1461 printf("while share locked from %s:%d\n", instance->li_file, 1462 instance->li_line); 1463 panic("share->uexcl"); 1464 } 1465 1466 /* If we are recursed, unrecurse. */ 1467 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1468 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1469 td->td_proc->p_pid, instance->li_lock->lo_name, 1470 instance->li_flags); 1471 instance->li_flags--; 1472 return; 1473 } 1474 1475 /* Otherwise, remove this item from the list. */ 1476 s = intr_disable(); 1477 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1478 td->td_proc->p_pid, instance->li_lock->lo_name, 1479 (*lock_list)->ll_count - 1); 1480 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1481 (*lock_list)->ll_children[j] = 1482 (*lock_list)->ll_children[j + 1]; 1483 (*lock_list)->ll_count--; 1484 intr_restore(s); 1485 1486 /* 1487 * If this lock list entry is not the first and is now empty, free it. 1488 */ 1489 if (*lock_list != lle && (*lock_list)->ll_count == 0) { 1490 lle = *lock_list; 1491 *lock_list = lle->ll_next; 1492 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1493 td->td_proc->p_pid, lle); 1494 witness_lock_list_free(lle); 1495 } 1496} 1497 1498void 1499witness_thread_exit(struct thread *td) 1500{ 1501 struct lock_list_entry *lle; 1502 int i, n; 1503 1504 lle = td->td_sleeplocks; 1505 if (lle == NULL || panicstr != NULL) 1506 return; 1507 if (lle->ll_count != 0) { 1508 for (n = 0; lle != NULL; lle = lle->ll_next) 1509 for (i = lle->ll_count - 1; i >= 0; i--) { 1510 if (n == 0) 1511 printf("Thread %p exiting with the following locks held:\n", 1512 td); 1513 n++; 1514 witness_list_lock(&lle->ll_children[i]); 1515 1516 } 1517 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1518 } 1519 witness_lock_list_free(lle); 1520} 1521 1522/* 1523 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1524 * exempt Giant and sleepable locks from the checks as well. If any 1525 * non-exempt locks are held, then a supplied message is printed to the 1526 * console along with a list of the offending locks. If indicated in the 1527 * flags then a failure results in a panic as well. 1528 */ 1529int 1530witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1531{ 1532 struct lock_list_entry **lock_list, *lle; 1533 struct lock_instance *lock1; 1534 struct thread *td; 1535 va_list ap; 1536 int i, n; 1537 1538 if (witness_cold || witness_watch == 0 || panicstr != NULL) 1539 return (0); 1540 n = 0; 1541 td = curthread; 1542 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1543 for (i = lle->ll_count - 1; i >= 0; i--) { 1544 lock1 = &lle->ll_children[i]; 1545 if (lock1->li_lock == lock) 1546 continue; 1547 if (flags & WARN_GIANTOK && 1548 lock1->li_lock == &Giant.lock_object) 1549 continue; 1550 if (flags & WARN_SLEEPOK && 1551 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1552 continue; 1553 if (n == 0) { 1554 va_start(ap, fmt); 1555 vprintf(fmt, ap); 1556 va_end(ap); 1557 printf(" with the following"); 1558 if (flags & WARN_SLEEPOK) 1559 printf(" non-sleepable"); 1560 printf(" locks held:\n"); 1561 } 1562 n++; 1563 witness_list_lock(lock1); 1564 } 1565 if (PCPU_GET(spinlocks) != NULL) { 1566 lock_list = PCPU_PTR(spinlocks); 1567 1568 /* Empty list? */ 1569 if ((*lock_list)->ll_count == 0) 1570 return (n); 1571 1572 /* 1573 * Since we already hold a spinlock preemption is 1574 * already blocked. 1575 */ 1576 if (n == 0) { 1577 va_start(ap, fmt); 1578 vprintf(fmt, ap); 1579 va_end(ap); 1580 printf(" with the following"); 1581 if (flags & WARN_SLEEPOK) 1582 printf(" non-sleepable"); 1583 printf(" locks held:\n"); 1584 } 1585 n += witness_list_locks(PCPU_PTR(spinlocks)); 1586 } 1587 if (flags & WARN_PANIC && n) 1588 panic("%s", __func__); 1589 else 1590 witness_debugger(n); 1591 return (n); 1592} 1593 1594const char * 1595witness_file(struct lock_object *lock) 1596{ 1597 struct witness *w; 1598 1599 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1600 return ("?"); 1601 w = lock->lo_witness; 1602 return (w->w_file); 1603} 1604 1605int 1606witness_line(struct lock_object *lock) 1607{ 1608 struct witness *w; 1609 1610 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL) 1611 return (0); 1612 w = lock->lo_witness; 1613 return (w->w_line); 1614} 1615 1616static struct witness * 1617enroll(const char *description, struct lock_class *lock_class) 1618{ 1619 struct witness *w; 1620 struct witness_list *typelist; 1621 1622 MPASS(description != NULL); 1623 1624 if (witness_watch == 0 || panicstr != NULL) 1625 return (NULL); 1626 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1627 if (witness_skipspin) 1628 return (NULL); 1629 else 1630 typelist = &w_spin; 1631 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1632 typelist = &w_sleep; 1633 else 1634 panic("lock class %s is not sleep or spin", 1635 lock_class->lc_name); 1636 1637 mtx_lock_spin(&w_mtx); 1638 w = witness_hash_get(description); 1639 if (w) 1640 goto found; 1641 if ((w = witness_get()) == NULL) 1642 return (NULL); 1643 MPASS(strlen(description) < MAX_W_NAME); 1644 strcpy(w->w_name, description); 1645 w->w_class = lock_class; 1646 w->w_refcount = 1; 1647 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1648 if (lock_class->lc_flags & LC_SPINLOCK) { 1649 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1650 w_spin_cnt++; 1651 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1652 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1653 w_sleep_cnt++; 1654 } 1655 1656 /* Insert new witness into the hash */ 1657 witness_hash_put(w); 1658 witness_increment_graph_generation(); 1659 mtx_unlock_spin(&w_mtx); 1660 return (w); 1661found: 1662 w->w_refcount++; 1663 mtx_unlock_spin(&w_mtx); 1664 if (lock_class != w->w_class) 1665 panic( 1666 "lock (%s) %s does not match earlier (%s) lock", 1667 description, lock_class->lc_name, 1668 w->w_class->lc_name); 1669 return (w); 1670} 1671 1672static void 1673depart(struct witness *w) 1674{ 1675 struct witness_list *list; 1676 1677 MPASS(w->w_refcount == 0); 1678 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1679 list = &w_sleep; 1680 w_sleep_cnt--; 1681 } else { 1682 list = &w_spin; 1683 w_spin_cnt--; 1684 } 1685 /* 1686 * Set file to NULL as it may point into a loadable module. 1687 */ 1688 w->w_file = NULL; 1689 w->w_line = 0; 1690 witness_increment_graph_generation(); 1691} 1692 1693 1694static void 1695adopt(struct witness *parent, struct witness *child) 1696{ 1697 int pi, ci, i, j; 1698 1699 if (witness_cold == 0) 1700 mtx_assert(&w_mtx, MA_OWNED); 1701 1702 /* If the relationship is already known, there's no work to be done. */ 1703 if (isitmychild(parent, child)) 1704 return; 1705 1706 /* When the structure of the graph changes, bump up the generation. */ 1707 witness_increment_graph_generation(); 1708 1709 /* 1710 * The hard part ... create the direct relationship, then propagate all 1711 * indirect relationships. 1712 */ 1713 pi = parent->w_index; 1714 ci = child->w_index; 1715 WITNESS_INDEX_ASSERT(pi); 1716 WITNESS_INDEX_ASSERT(ci); 1717 MPASS(pi != ci); 1718 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1719 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1720 1721 /* 1722 * If parent was not already an ancestor of child, 1723 * then we increment the descendant and ancestor counters. 1724 */ 1725 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1726 parent->w_num_descendants++; 1727 child->w_num_ancestors++; 1728 } 1729 1730 /* 1731 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1732 * an ancestor of 'pi' during this loop. 1733 */ 1734 for (i = 1; i <= w_max_used_index; i++) { 1735 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1736 (i != pi)) 1737 continue; 1738 1739 /* Find each descendant of 'i' and mark it as a descendant. */ 1740 for (j = 1; j <= w_max_used_index; j++) { 1741 1742 /* 1743 * Skip children that are already marked as 1744 * descendants of 'i'. 1745 */ 1746 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1747 continue; 1748 1749 /* 1750 * We are only interested in descendants of 'ci'. Note 1751 * that 'ci' itself is counted as a descendant of 'ci'. 1752 */ 1753 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1754 (j != ci)) 1755 continue; 1756 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1757 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1758 w_data[i].w_num_descendants++; 1759 w_data[j].w_num_ancestors++; 1760 1761 /* 1762 * Make sure we aren't marking a node as both an 1763 * ancestor and descendant. We should have caught 1764 * this as a lock order reversal earlier. 1765 */ 1766 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1767 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1768 printf("witness rmatrix paradox! [%d][%d]=%d " 1769 "both ancestor and descendant\n", 1770 i, j, w_rmatrix[i][j]); 1771 kdb_backtrace(); 1772 printf("Witness disabled.\n"); 1773 witness_watch = 0; 1774 } 1775 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1776 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1777 printf("witness rmatrix paradox! [%d][%d]=%d " 1778 "both ancestor and descendant\n", 1779 j, i, w_rmatrix[j][i]); 1780 kdb_backtrace(); 1781 printf("Witness disabled.\n"); 1782 witness_watch = 0; 1783 } 1784 } 1785 } 1786} 1787 1788static void 1789itismychild(struct witness *parent, struct witness *child) 1790{ 1791 1792 MPASS(child != NULL && parent != NULL); 1793 if (witness_cold == 0) 1794 mtx_assert(&w_mtx, MA_OWNED); 1795 1796 if (!witness_lock_type_equal(parent, child)) { 1797 if (witness_cold == 0) 1798 mtx_unlock_spin(&w_mtx); 1799 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1800 "the same lock type", __func__, parent->w_name, 1801 parent->w_class->lc_name, child->w_name, 1802 child->w_class->lc_name); 1803 } 1804 adopt(parent, child); 1805} 1806 1807/* 1808 * Generic code for the isitmy*() functions. The rmask parameter is the 1809 * expected relationship of w1 to w2. 1810 */ 1811static int 1812_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1813{ 1814 unsigned char r1, r2; 1815 int i1, i2; 1816 1817 i1 = w1->w_index; 1818 i2 = w2->w_index; 1819 WITNESS_INDEX_ASSERT(i1); 1820 WITNESS_INDEX_ASSERT(i2); 1821 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1822 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1823 1824 /* The flags on one better be the inverse of the flags on the other */ 1825 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1826 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1827 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1828 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1829 "w_rmatrix[%d][%d] == %hhx\n", 1830 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1831 i2, i1, r2); 1832 kdb_backtrace(); 1833 printf("Witness disabled.\n"); 1834 witness_watch = 0; 1835 } 1836 return (r1 & rmask); 1837} 1838 1839/* 1840 * Checks if @child is a direct child of @parent. 1841 */ 1842static int 1843isitmychild(struct witness *parent, struct witness *child) 1844{ 1845 1846 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1847} 1848 1849/* 1850 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1851 */ 1852static int 1853isitmydescendant(struct witness *ancestor, struct witness *descendant) 1854{ 1855 1856 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1857 __func__)); 1858} 1859 1860#ifdef BLESSING 1861static int 1862blessed(struct witness *w1, struct witness *w2) 1863{ 1864 int i; 1865 struct witness_blessed *b; 1866 1867 for (i = 0; i < blessed_count; i++) { 1868 b = &blessed_list[i]; 1869 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1870 if (strcmp(w2->w_name, b->b_lock2) == 0) 1871 return (1); 1872 continue; 1873 } 1874 if (strcmp(w1->w_name, b->b_lock2) == 0) 1875 if (strcmp(w2->w_name, b->b_lock1) == 0) 1876 return (1); 1877 } 1878 return (0); 1879} 1880#endif 1881 1882static struct witness * 1883witness_get(void) 1884{ 1885 struct witness *w; 1886 int index; 1887 1888 if (witness_cold == 0) 1889 mtx_assert(&w_mtx, MA_OWNED); 1890 1891 if (witness_watch == 0) { 1892 mtx_unlock_spin(&w_mtx); 1893 return (NULL); 1894 } 1895 if (STAILQ_EMPTY(&w_free)) { 1896 witness_watch = 0; 1897 mtx_unlock_spin(&w_mtx); 1898 printf("WITNESS: unable to allocate a new witness object\n"); 1899 return (NULL); 1900 } 1901 w = STAILQ_FIRST(&w_free); 1902 STAILQ_REMOVE_HEAD(&w_free, w_list); 1903 w_free_cnt--; 1904 index = w->w_index; 1905 MPASS(index > 0 && index == w_max_used_index+1 && 1906 index < WITNESS_COUNT); 1907 bzero(w, sizeof(*w)); 1908 w->w_index = index; 1909 if (index > w_max_used_index) 1910 w_max_used_index = index; 1911 return (w); 1912} 1913 1914static void 1915witness_free(struct witness *w) 1916{ 1917 1918 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1919 w_free_cnt++; 1920} 1921 1922static struct lock_list_entry * 1923witness_lock_list_get(void) 1924{ 1925 struct lock_list_entry *lle; 1926 1927 if (witness_watch == 0) 1928 return (NULL); 1929 mtx_lock_spin(&w_mtx); 1930 lle = w_lock_list_free; 1931 if (lle == NULL) { 1932 witness_watch = 0; 1933 mtx_unlock_spin(&w_mtx); 1934 printf("%s: witness exhausted\n", __func__); 1935 return (NULL); 1936 } 1937 w_lock_list_free = lle->ll_next; 1938 mtx_unlock_spin(&w_mtx); 1939 bzero(lle, sizeof(*lle)); 1940 return (lle); 1941} 1942 1943static void 1944witness_lock_list_free(struct lock_list_entry *lle) 1945{ 1946 1947 mtx_lock_spin(&w_mtx); 1948 lle->ll_next = w_lock_list_free; 1949 w_lock_list_free = lle; 1950 mtx_unlock_spin(&w_mtx); 1951} 1952 1953static struct lock_instance * 1954find_instance(struct lock_list_entry *list, struct lock_object *lock) 1955{ 1956 struct lock_list_entry *lle; 1957 struct lock_instance *instance; 1958 int i; 1959 1960 for (lle = list; lle != NULL; lle = lle->ll_next) 1961 for (i = lle->ll_count - 1; i >= 0; i--) { 1962 instance = &lle->ll_children[i]; 1963 if (instance->li_lock == lock) 1964 return (instance); 1965 } 1966 return (NULL); 1967} 1968 1969static void 1970witness_list_lock(struct lock_instance *instance) 1971{ 1972 struct lock_object *lock; 1973 1974 lock = instance->li_lock; 1975 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 1976 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 1977 if (lock->lo_witness->w_name != lock->lo_name) 1978 printf(" (%s)", lock->lo_witness->w_name); 1979 printf(" r = %d (%p) locked @ %s:%d\n", 1980 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 1981 instance->li_line); 1982} 1983 1984#ifdef DDB 1985static int 1986witness_thread_has_locks(struct thread *td) 1987{ 1988 1989 return (td->td_sleeplocks != NULL); 1990} 1991 1992static int 1993witness_proc_has_locks(struct proc *p) 1994{ 1995 struct thread *td; 1996 1997 FOREACH_THREAD_IN_PROC(p, td) { 1998 if (witness_thread_has_locks(td)) 1999 return (1); 2000 } 2001 return (0); 2002} 2003#endif 2004 2005int 2006witness_list_locks(struct lock_list_entry **lock_list) 2007{ 2008 struct lock_list_entry *lle; 2009 int i, nheld; 2010 2011 nheld = 0; 2012 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2013 for (i = lle->ll_count - 1; i >= 0; i--) { 2014 witness_list_lock(&lle->ll_children[i]); 2015 nheld++; 2016 } 2017 return (nheld); 2018} 2019 2020/* 2021 * This is a bit risky at best. We call this function when we have timed 2022 * out acquiring a spin lock, and we assume that the other CPU is stuck 2023 * with this lock held. So, we go groveling around in the other CPU's 2024 * per-cpu data to try to find the lock instance for this spin lock to 2025 * see when it was last acquired. 2026 */ 2027void 2028witness_display_spinlock(struct lock_object *lock, struct thread *owner) 2029{ 2030 struct lock_instance *instance; 2031 struct pcpu *pc; 2032 2033 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2034 return; 2035 pc = pcpu_find(owner->td_oncpu); 2036 instance = find_instance(pc->pc_spinlocks, lock); 2037 if (instance != NULL) 2038 witness_list_lock(instance); 2039} 2040 2041void 2042witness_save(struct lock_object *lock, const char **filep, int *linep) 2043{ 2044 struct lock_list_entry *lock_list; 2045 struct lock_instance *instance; 2046 struct lock_class *class; 2047 2048 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2049 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 2050 return; 2051 class = LOCK_CLASS(lock); 2052 if (class->lc_flags & LC_SLEEPLOCK) 2053 lock_list = curthread->td_sleeplocks; 2054 else { 2055 if (witness_skipspin) 2056 return; 2057 lock_list = PCPU_GET(spinlocks); 2058 } 2059 instance = find_instance(lock_list, lock); 2060 if (instance == NULL) 2061 panic("%s: lock (%s) %s not locked", __func__, 2062 class->lc_name, lock->lo_name); 2063 *filep = instance->li_file; 2064 *linep = instance->li_line; 2065} 2066 2067void 2068witness_restore(struct lock_object *lock, const char *file, int line) 2069{ 2070 struct lock_list_entry *lock_list; 2071 struct lock_instance *instance; 2072 struct lock_class *class; 2073 2074 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2075 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 2076 return; 2077 class = LOCK_CLASS(lock); 2078 if (class->lc_flags & LC_SLEEPLOCK) 2079 lock_list = curthread->td_sleeplocks; 2080 else { 2081 if (witness_skipspin) 2082 return; 2083 lock_list = PCPU_GET(spinlocks); 2084 } 2085 instance = find_instance(lock_list, lock); 2086 if (instance == NULL) 2087 panic("%s: lock (%s) %s not locked", __func__, 2088 class->lc_name, lock->lo_name); 2089 lock->lo_witness->w_file = file; 2090 lock->lo_witness->w_line = line; 2091 instance->li_file = file; 2092 instance->li_line = line; 2093} 2094 2095void 2096witness_assert(struct lock_object *lock, int flags, const char *file, int line) 2097{ 2098#ifdef INVARIANT_SUPPORT 2099 struct lock_instance *instance; 2100 struct lock_class *class; 2101 2102 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL) 2103 return; 2104 class = LOCK_CLASS(lock); 2105 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2106 instance = find_instance(curthread->td_sleeplocks, lock); 2107 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2108 instance = find_instance(PCPU_GET(spinlocks), lock); 2109 else { 2110 panic("Lock (%s) %s is not sleep or spin!", 2111 class->lc_name, lock->lo_name); 2112 } 2113 file = fixup_filename(file); 2114 switch (flags) { 2115 case LA_UNLOCKED: 2116 if (instance != NULL) 2117 panic("Lock (%s) %s locked @ %s:%d.", 2118 class->lc_name, lock->lo_name, file, line); 2119 break; 2120 case LA_LOCKED: 2121 case LA_LOCKED | LA_RECURSED: 2122 case LA_LOCKED | LA_NOTRECURSED: 2123 case LA_SLOCKED: 2124 case LA_SLOCKED | LA_RECURSED: 2125 case LA_SLOCKED | LA_NOTRECURSED: 2126 case LA_XLOCKED: 2127 case LA_XLOCKED | LA_RECURSED: 2128 case LA_XLOCKED | LA_NOTRECURSED: 2129 if (instance == NULL) { 2130 panic("Lock (%s) %s not locked @ %s:%d.", 2131 class->lc_name, lock->lo_name, file, line); 2132 break; 2133 } 2134 if ((flags & LA_XLOCKED) != 0 && 2135 (instance->li_flags & LI_EXCLUSIVE) == 0) 2136 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2137 class->lc_name, lock->lo_name, file, line); 2138 if ((flags & LA_SLOCKED) != 0 && 2139 (instance->li_flags & LI_EXCLUSIVE) != 0) 2140 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2141 class->lc_name, lock->lo_name, file, line); 2142 if ((flags & LA_RECURSED) != 0 && 2143 (instance->li_flags & LI_RECURSEMASK) == 0) 2144 panic("Lock (%s) %s not recursed @ %s:%d.", 2145 class->lc_name, lock->lo_name, file, line); 2146 if ((flags & LA_NOTRECURSED) != 0 && 2147 (instance->li_flags & LI_RECURSEMASK) != 0) 2148 panic("Lock (%s) %s recursed @ %s:%d.", 2149 class->lc_name, lock->lo_name, file, line); 2150 break; 2151 default: 2152 panic("Invalid lock assertion at %s:%d.", file, line); 2153 2154 } 2155#endif /* INVARIANT_SUPPORT */ 2156} 2157 2158#ifdef DDB 2159static void 2160witness_ddb_list(struct thread *td) 2161{ 2162 2163 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2164 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2165 2166 if (witness_watch == 0) 2167 return; 2168 2169 witness_list_locks(&td->td_sleeplocks); 2170 2171 /* 2172 * We only handle spinlocks if td == curthread. This is somewhat broken 2173 * if td is currently executing on some other CPU and holds spin locks 2174 * as we won't display those locks. If we had a MI way of getting 2175 * the per-cpu data for a given cpu then we could use 2176 * td->td_oncpu to get the list of spinlocks for this thread 2177 * and "fix" this. 2178 * 2179 * That still wouldn't really fix this unless we locked the scheduler 2180 * lock or stopped the other CPU to make sure it wasn't changing the 2181 * list out from under us. It is probably best to just not try to 2182 * handle threads on other CPU's for now. 2183 */ 2184 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2185 witness_list_locks(PCPU_PTR(spinlocks)); 2186} 2187 2188DB_SHOW_COMMAND(locks, db_witness_list) 2189{ 2190 struct thread *td; 2191 2192 if (have_addr) 2193 td = db_lookup_thread(addr, TRUE); 2194 else 2195 td = kdb_thread; 2196 witness_ddb_list(td); 2197} 2198 2199DB_SHOW_COMMAND(alllocks, db_witness_list_all) 2200{ 2201 struct thread *td; 2202 struct proc *p; 2203 2204 /* 2205 * It would be nice to list only threads and processes that actually 2206 * held sleep locks, but that information is currently not exported 2207 * by WITNESS. 2208 */ 2209 FOREACH_PROC_IN_SYSTEM(p) { 2210 if (!witness_proc_has_locks(p)) 2211 continue; 2212 FOREACH_THREAD_IN_PROC(p, td) { 2213 if (!witness_thread_has_locks(td)) 2214 continue; 2215 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2216 td->td_name, td, td->td_tid); 2217 witness_ddb_list(td); 2218 } 2219 } 2220} 2221 2222DB_SHOW_COMMAND(witness, db_witness_display) 2223{ 2224 2225 witness_ddb_display(db_printf); 2226} 2227#endif 2228 2229static int 2230sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2231{ 2232 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2233 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2234 struct sbuf *sb; 2235 u_int w_rmatrix1, w_rmatrix2; 2236 int error, generation, i, j; 2237 2238 tmp_data1 = NULL; 2239 tmp_data2 = NULL; 2240 tmp_w1 = NULL; 2241 tmp_w2 = NULL; 2242 if (witness_watch == 0) { 2243 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2244 return (error); 2245 } 2246 if (witness_cold) { 2247 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2248 return (error); 2249 } 2250 error = 0; 2251 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2252 if (sb == NULL) 2253 return (ENOMEM); 2254 2255 /* Allocate and init temporary storage space. */ 2256 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2257 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2258 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2259 M_WAITOK | M_ZERO); 2260 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2261 M_WAITOK | M_ZERO); 2262 stack_zero(&tmp_data1->wlod_stack); 2263 stack_zero(&tmp_data2->wlod_stack); 2264 2265restart: 2266 mtx_lock_spin(&w_mtx); 2267 generation = w_generation; 2268 mtx_unlock_spin(&w_mtx); 2269 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2270 w_lohash.wloh_count); 2271 for (i = 1; i < w_max_used_index; i++) { 2272 mtx_lock_spin(&w_mtx); 2273 if (generation != w_generation) { 2274 mtx_unlock_spin(&w_mtx); 2275 2276 /* The graph has changed, try again. */ 2277 req->oldidx = 0; 2278 sbuf_clear(sb); 2279 goto restart; 2280 } 2281 2282 w1 = &w_data[i]; 2283 if (w1->w_reversed == 0) { 2284 mtx_unlock_spin(&w_mtx); 2285 continue; 2286 } 2287 2288 /* Copy w1 locally so we can release the spin lock. */ 2289 *tmp_w1 = *w1; 2290 mtx_unlock_spin(&w_mtx); 2291 2292 if (tmp_w1->w_reversed == 0) 2293 continue; 2294 for (j = 1; j < w_max_used_index; j++) { 2295 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2296 continue; 2297 2298 mtx_lock_spin(&w_mtx); 2299 if (generation != w_generation) { 2300 mtx_unlock_spin(&w_mtx); 2301 2302 /* The graph has changed, try again. */ 2303 req->oldidx = 0; 2304 sbuf_clear(sb); 2305 goto restart; 2306 } 2307 2308 w2 = &w_data[j]; 2309 data1 = witness_lock_order_get(w1, w2); 2310 data2 = witness_lock_order_get(w2, w1); 2311 2312 /* 2313 * Copy information locally so we can release the 2314 * spin lock. 2315 */ 2316 *tmp_w2 = *w2; 2317 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2318 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2319 2320 if (data1) { 2321 stack_zero(&tmp_data1->wlod_stack); 2322 stack_copy(&data1->wlod_stack, 2323 &tmp_data1->wlod_stack); 2324 } 2325 if (data2 && data2 != data1) { 2326 stack_zero(&tmp_data2->wlod_stack); 2327 stack_copy(&data2->wlod_stack, 2328 &tmp_data2->wlod_stack); 2329 } 2330 mtx_unlock_spin(&w_mtx); 2331 2332 sbuf_printf(sb, 2333 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2334 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2335 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2336#if 0 2337 sbuf_printf(sb, 2338 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2339 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2340 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2341#endif 2342 if (data1) { 2343 sbuf_printf(sb, 2344 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2345 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2346 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2347 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2348 sbuf_printf(sb, "\n"); 2349 } 2350 if (data2 && data2 != data1) { 2351 sbuf_printf(sb, 2352 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2353 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2354 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2355 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2356 sbuf_printf(sb, "\n"); 2357 } 2358 } 2359 } 2360 mtx_lock_spin(&w_mtx); 2361 if (generation != w_generation) { 2362 mtx_unlock_spin(&w_mtx); 2363 2364 /* 2365 * The graph changed while we were printing stack data, 2366 * try again. 2367 */ 2368 req->oldidx = 0; 2369 sbuf_clear(sb); 2370 goto restart; 2371 } 2372 mtx_unlock_spin(&w_mtx); 2373 2374 /* Free temporary storage space. */ 2375 free(tmp_data1, M_TEMP); 2376 free(tmp_data2, M_TEMP); 2377 free(tmp_w1, M_TEMP); 2378 free(tmp_w2, M_TEMP); 2379 2380 sbuf_finish(sb); 2381 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2382 sbuf_delete(sb); 2383 2384 return (error); 2385} 2386 2387static int 2388sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2389{ 2390 struct witness *w; 2391 struct sbuf *sb; 2392 int error; 2393 2394 if (witness_watch == 0) { 2395 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2396 return (error); 2397 } 2398 if (witness_cold) { 2399 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2400 return (error); 2401 } 2402 error = 0; 2403 sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN); 2404 if (sb == NULL) 2405 return (ENOMEM); 2406 sbuf_printf(sb, "\n"); 2407 2408 mtx_lock_spin(&w_mtx); 2409 STAILQ_FOREACH(w, &w_all, w_list) 2410 w->w_displayed = 0; 2411 STAILQ_FOREACH(w, &w_all, w_list) 2412 witness_add_fullgraph(sb, w); 2413 mtx_unlock_spin(&w_mtx); 2414 2415 /* 2416 * While using SBUF_FIXEDLEN, check if the sbuf overflowed. 2417 */ 2418 if (sbuf_overflowed(sb)) { 2419 sbuf_delete(sb); 2420 panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n", 2421 __func__); 2422 } 2423 2424 /* 2425 * Close the sbuf and return to userland. 2426 */ 2427 sbuf_finish(sb); 2428 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2429 sbuf_delete(sb); 2430 2431 return (error); 2432} 2433 2434static int 2435sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2436{ 2437 int error, value; 2438 2439 value = witness_watch; 2440 error = sysctl_handle_int(oidp, &value, 0, req); 2441 if (error != 0 || req->newptr == NULL) 2442 return (error); 2443 if (value == witness_watch) 2444 return (0); 2445 if (value != 0) 2446 return (EINVAL); 2447 witness_watch = 0; 2448 return (0); 2449} 2450 2451static void 2452witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2453{ 2454 int i; 2455 2456 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2457 return; 2458 w->w_displayed = 1; 2459 2460 WITNESS_INDEX_ASSERT(w->w_index); 2461 for (i = 1; i <= w_max_used_index; i++) { 2462 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2463 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2464 w_data[i].w_name); 2465 witness_add_fullgraph(sb, &w_data[i]); 2466 } 2467 } 2468} 2469 2470/* 2471 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2472 * interprets the key as a string and reads until the null 2473 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2474 * hash value computed from the key. 2475 */ 2476static uint32_t 2477witness_hash_djb2(const uint8_t *key, uint32_t size) 2478{ 2479 unsigned int hash = 5381; 2480 int i; 2481 2482 /* hash = hash * 33 + key[i] */ 2483 if (size) 2484 for (i = 0; i < size; i++) 2485 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2486 else 2487 for (i = 0; key[i] != 0; i++) 2488 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2489 2490 return (hash); 2491} 2492 2493 2494/* 2495 * Initializes the two witness hash tables. Called exactly once from 2496 * witness_initialize(). 2497 */ 2498static void 2499witness_init_hash_tables(void) 2500{ 2501 int i; 2502 2503 MPASS(witness_cold); 2504 2505 /* Initialize the hash tables. */ 2506 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2507 w_hash.wh_array[i] = NULL; 2508 2509 w_hash.wh_size = WITNESS_HASH_SIZE; 2510 w_hash.wh_count = 0; 2511 2512 /* Initialize the lock order data hash. */ 2513 w_lofree = NULL; 2514 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2515 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2516 w_lodata[i].wlod_next = w_lofree; 2517 w_lofree = &w_lodata[i]; 2518 } 2519 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2520 w_lohash.wloh_count = 0; 2521 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2522 w_lohash.wloh_array[i] = NULL; 2523} 2524 2525static struct witness * 2526witness_hash_get(const char *key) 2527{ 2528 struct witness *w; 2529 uint32_t hash; 2530 2531 MPASS(key != NULL); 2532 if (witness_cold == 0) 2533 mtx_assert(&w_mtx, MA_OWNED); 2534 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2535 w = w_hash.wh_array[hash]; 2536 while (w != NULL) { 2537 if (strcmp(w->w_name, key) == 0) 2538 goto out; 2539 w = w->w_hash_next; 2540 } 2541 2542out: 2543 return (w); 2544} 2545 2546static void 2547witness_hash_put(struct witness *w) 2548{ 2549 uint32_t hash; 2550 2551 MPASS(w != NULL); 2552 MPASS(w->w_name != NULL); 2553 if (witness_cold == 0) 2554 mtx_assert(&w_mtx, MA_OWNED); 2555 KASSERT(witness_hash_get(w->w_name) == NULL, 2556 ("%s: trying to add a hash entry that already exists!", __func__)); 2557 KASSERT(w->w_hash_next == NULL, 2558 ("%s: w->w_hash_next != NULL", __func__)); 2559 2560 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2561 w->w_hash_next = w_hash.wh_array[hash]; 2562 w_hash.wh_array[hash] = w; 2563 w_hash.wh_count++; 2564} 2565 2566 2567static struct witness_lock_order_data * 2568witness_lock_order_get(struct witness *parent, struct witness *child) 2569{ 2570 struct witness_lock_order_data *data = NULL; 2571 struct witness_lock_order_key key; 2572 unsigned int hash; 2573 2574 MPASS(parent != NULL && child != NULL); 2575 key.from = parent->w_index; 2576 key.to = child->w_index; 2577 WITNESS_INDEX_ASSERT(key.from); 2578 WITNESS_INDEX_ASSERT(key.to); 2579 if ((w_rmatrix[parent->w_index][child->w_index] 2580 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2581 goto out; 2582 2583 hash = witness_hash_djb2((const char*)&key, 2584 sizeof(key)) % w_lohash.wloh_size; 2585 data = w_lohash.wloh_array[hash]; 2586 while (data != NULL) { 2587 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2588 break; 2589 data = data->wlod_next; 2590 } 2591 2592out: 2593 return (data); 2594} 2595 2596/* 2597 * Verify that parent and child have a known relationship, are not the same, 2598 * and child is actually a child of parent. This is done without w_mtx 2599 * to avoid contention in the common case. 2600 */ 2601static int 2602witness_lock_order_check(struct witness *parent, struct witness *child) 2603{ 2604 2605 if (parent != child && 2606 w_rmatrix[parent->w_index][child->w_index] 2607 & WITNESS_LOCK_ORDER_KNOWN && 2608 isitmychild(parent, child)) 2609 return (1); 2610 2611 return (0); 2612} 2613 2614static int 2615witness_lock_order_add(struct witness *parent, struct witness *child) 2616{ 2617 struct witness_lock_order_data *data = NULL; 2618 struct witness_lock_order_key key; 2619 unsigned int hash; 2620 2621 MPASS(parent != NULL && child != NULL); 2622 key.from = parent->w_index; 2623 key.to = child->w_index; 2624 WITNESS_INDEX_ASSERT(key.from); 2625 WITNESS_INDEX_ASSERT(key.to); 2626 if (w_rmatrix[parent->w_index][child->w_index] 2627 & WITNESS_LOCK_ORDER_KNOWN) 2628 return (1); 2629 2630 hash = witness_hash_djb2((const char*)&key, 2631 sizeof(key)) % w_lohash.wloh_size; 2632 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2633 data = w_lofree; 2634 if (data == NULL) 2635 return (0); 2636 w_lofree = data->wlod_next; 2637 data->wlod_next = w_lohash.wloh_array[hash]; 2638 data->wlod_key = key; 2639 w_lohash.wloh_array[hash] = data; 2640 w_lohash.wloh_count++; 2641 stack_zero(&data->wlod_stack); 2642 stack_save(&data->wlod_stack); 2643 return (1); 2644} 2645 2646/* Call this whenver the structure of the witness graph changes. */ 2647static void 2648witness_increment_graph_generation(void) 2649{ 2650 2651 if (witness_cold == 0) 2652 mtx_assert(&w_mtx, MA_OWNED); 2653 w_generation++; 2654} 2655 2656#ifdef KDB 2657static void 2658_witness_debugger(int cond, const char *msg) 2659{ 2660 2661 if (witness_trace && cond) 2662 kdb_backtrace(); 2663 if (witness_kdb && cond) 2664 kdb_enter(KDB_WHY_WITNESS, msg); 2665} 2666#endif 2667