subr_witness.c revision 183955
1/*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35/* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41/* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61/* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87#include <sys/cdefs.h> 88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 183955 2008-10-16 12:42:56Z attilio $"); 89 90#include "opt_ddb.h" 91#include "opt_hwpmc_hooks.h" 92#include "opt_stack.h" 93#include "opt_witness.h" 94 95#include <sys/param.h> 96#include <sys/bus.h> 97#include <sys/kdb.h> 98#include <sys/kernel.h> 99#include <sys/ktr.h> 100#include <sys/lock.h> 101#include <sys/malloc.h> 102#include <sys/mutex.h> 103#include <sys/priv.h> 104#include <sys/proc.h> 105#include <sys/sbuf.h> 106#include <sys/sched.h> 107#include <sys/stack.h> 108#include <sys/sysctl.h> 109#include <sys/systm.h> 110 111#ifdef DDB 112#include <ddb/ddb.h> 113#endif 114 115#include <machine/stdarg.h> 116 117#if !defined(DDB) && !defined(STACK) 118#error "DDB or STACK options are required for WITNESS" 119#endif 120 121/* Note that these traces do not work with KTR_ALQ. */ 122#if 0 123#define KTR_WITNESS KTR_SUBSYS 124#else 125#define KTR_WITNESS 0 126#endif 127 128#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 129#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 130 131/* Define this to check for blessed mutexes */ 132#undef BLESSING 133 134#define WITNESS_COUNT 1024 135#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 136#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 137#define WITNESS_PENDLIST 512 138 139/* Allocate 256 KB of stack data space */ 140#define WITNESS_LO_DATA_COUNT 2048 141 142/* Prime, gives load factor of ~2 at full load */ 143#define WITNESS_LO_HASH_SIZE 1021 144 145/* 146 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 147 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 148 * probably be safe for the most part, but it's still a SWAG. 149 */ 150#define LOCK_NCHILDREN 5 151#define LOCK_CHILDCOUNT 2048 152 153#define MAX_W_NAME 64 154 155#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 156#define CYCLEGRAPH_SBUF_SIZE 8192 157#define FULLGRAPH_SBUF_SIZE 32768 158 159/* 160 * These flags go in the witness relationship matrix and describe the 161 * relationship between any two struct witness objects. 162 */ 163#define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 164#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 165#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 166#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 167#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 168#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 169#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 170#define WITNESS_RELATED_MASK \ 171 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 172#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 173 * observed. */ 174#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 175#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 176#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 177 178/* Descendant to ancestor flags */ 179#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 180 181/* Ancestor to descendant flags */ 182#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 183 184#define WITNESS_INDEX_ASSERT(i) \ 185 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 186 187MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 188 189/* 190 * Lock instances. A lock instance is the data associated with a lock while 191 * it is held by witness. For example, a lock instance will hold the 192 * recursion count of a lock. Lock instances are held in lists. Spin locks 193 * are held in a per-cpu list while sleep locks are held in per-thread list. 194 */ 195struct lock_instance { 196 struct lock_object *li_lock; 197 const char *li_file; 198 int li_line; 199 u_int li_flags; 200}; 201 202/* 203 * A simple list type used to build the list of locks held by a thread 204 * or CPU. We can't simply embed the list in struct lock_object since a 205 * lock may be held by more than one thread if it is a shared lock. Locks 206 * are added to the head of the list, so we fill up each list entry from 207 * "the back" logically. To ease some of the arithmetic, we actually fill 208 * in each list entry the normal way (children[0] then children[1], etc.) but 209 * when we traverse the list we read children[count-1] as the first entry 210 * down to children[0] as the final entry. 211 */ 212struct lock_list_entry { 213 struct lock_list_entry *ll_next; 214 struct lock_instance ll_children[LOCK_NCHILDREN]; 215 u_int ll_count; 216}; 217 218/* 219 * The main witness structure. One of these per named lock type in the system 220 * (for example, "vnode interlock"). 221 */ 222struct witness { 223 char w_name[MAX_W_NAME]; 224 uint32_t w_index; /* Index in the relationship matrix */ 225 struct lock_class *w_class; 226 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 227 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 228 struct witness *w_hash_next; /* Linked list in hash buckets. */ 229 const char *w_file; /* File where last acquired */ 230 uint32_t w_line; /* Line where last acquired */ 231 uint32_t w_refcount; 232 uint16_t w_num_ancestors; /* direct/indirect 233 * ancestor count */ 234 uint16_t w_num_descendants; /* direct/indirect 235 * descendant count */ 236 int16_t w_ddb_level; 237 int w_displayed:1; 238 int w_reversed:1; 239}; 240 241STAILQ_HEAD(witness_list, witness); 242 243/* 244 * The witness hash table. Keys are witness names (const char *), elements are 245 * witness objects (struct witness *). 246 */ 247struct witness_hash { 248 struct witness *wh_array[WITNESS_HASH_SIZE]; 249 uint32_t wh_size; 250 uint32_t wh_count; 251}; 252 253/* 254 * Key type for the lock order data hash table. 255 */ 256struct witness_lock_order_key { 257 uint16_t from; 258 uint16_t to; 259}; 260 261struct witness_lock_order_data { 262 struct stack wlod_stack; 263 struct witness_lock_order_key wlod_key; 264 struct witness_lock_order_data *wlod_next; 265}; 266 267/* 268 * The witness lock order data hash table. Keys are witness index tuples 269 * (struct witness_lock_order_key), elements are lock order data objects 270 * (struct witness_lock_order_data). 271 */ 272struct witness_lock_order_hash { 273 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 274 u_int wloh_size; 275 u_int wloh_count; 276}; 277 278#ifdef BLESSING 279struct witness_blessed { 280 const char *b_lock1; 281 const char *b_lock2; 282}; 283#endif 284 285struct witness_pendhelp { 286 const char *wh_type; 287 struct lock_object *wh_lock; 288}; 289 290struct witness_order_list_entry { 291 const char *w_name; 292 struct lock_class *w_class; 293}; 294 295/* 296 * Returns 0 if one of the locks is a spin lock and the other is not. 297 * Returns 1 otherwise. 298 */ 299static __inline int 300witness_lock_type_equal(struct witness *w1, struct witness *w2) 301{ 302 303 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 304 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 305} 306 307static __inline int 308witness_lock_order_key_empty(const struct witness_lock_order_key *key) 309{ 310 311 return (key->from == 0 && key->to == 0); 312} 313 314static __inline int 315witness_lock_order_key_equal(const struct witness_lock_order_key *a, 316 const struct witness_lock_order_key *b) 317{ 318 319 return (a->from == b->from && a->to == b->to); 320} 321 322static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 323 const char *fname); 324#ifdef KDB 325static void _witness_debugger(int cond, const char *msg); 326#endif 327static void adopt(struct witness *parent, struct witness *child); 328#ifdef BLESSING 329static int blessed(struct witness *, struct witness *); 330#endif 331static void depart(struct witness *w); 332static struct witness *enroll(const char *description, 333 struct lock_class *lock_class); 334static struct lock_instance *find_instance(struct lock_list_entry *list, 335 struct lock_object *lock); 336static int isitmychild(struct witness *parent, struct witness *child); 337static int isitmydescendant(struct witness *parent, struct witness *child); 338static void itismychild(struct witness *parent, struct witness *child); 339static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 340static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 341static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 342static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 343#ifdef DDB 344static void witness_ddb_compute_levels(void); 345static void witness_ddb_display(void(*)(const char *fmt, ...)); 346static void witness_ddb_display_descendants(void(*)(const char *fmt, ...), 347 struct witness *, int indent); 348static void witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 349 struct witness_list *list); 350static void witness_ddb_level_descendants(struct witness *parent, int l); 351static void witness_ddb_list(struct thread *td); 352#endif 353static void witness_free(struct witness *m); 354static struct witness *witness_get(void); 355static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 356static struct witness *witness_hash_get(const char *key); 357static void witness_hash_put(struct witness *w); 358static void witness_init_hash_tables(void); 359static void witness_increment_graph_generation(void); 360static void witness_lock_list_free(struct lock_list_entry *lle); 361static struct lock_list_entry *witness_lock_list_get(void); 362static int witness_lock_order_add(struct witness *parent, 363 struct witness *child); 364static int witness_lock_order_check(struct witness *parent, 365 struct witness *child); 366static struct witness_lock_order_data *witness_lock_order_get( 367 struct witness *parent, 368 struct witness *child); 369static void witness_list_lock(struct lock_instance *instance); 370 371#ifdef KDB 372#define witness_debugger(c) _witness_debugger(c, __func__) 373#else 374#define witness_debugger(c) 375#endif 376 377SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 378 379/* 380 * If set to 0, lock order checking is disabled. If set to -1, 381 * witness is completely disabled. Otherwise witness performs full 382 * lock order checking for all locks. At runtime, lock order checking 383 * may be toggled. However, witness cannot be reenabled once it is 384 * completely disabled. 385 */ 386static int witness_watch = 1; 387TUNABLE_INT("debug.witness.watch", &witness_watch); 388SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 389 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 390 391#ifdef KDB 392/* 393 * When KDB is enabled and witness_kdb is 1, it will cause the system 394 * to drop into kdebug() when: 395 * - a lock hierarchy violation occurs 396 * - locks are held when going to sleep. 397 */ 398#ifdef WITNESS_KDB 399int witness_kdb = 1; 400#else 401int witness_kdb = 0; 402#endif 403TUNABLE_INT("debug.witness.kdb", &witness_kdb); 404SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 405 406/* 407 * When KDB is enabled and witness_trace is 1, it will cause the system 408 * to print a stack trace: 409 * - a lock hierarchy violation occurs 410 * - locks are held when going to sleep. 411 */ 412int witness_trace = 1; 413TUNABLE_INT("debug.witness.trace", &witness_trace); 414SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 415#endif /* KDB */ 416 417#ifdef WITNESS_SKIPSPIN 418int witness_skipspin = 1; 419#else 420int witness_skipspin = 0; 421#endif 422TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 423SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 424 0, ""); 425 426/* 427 * Call this to print out the relations between locks. 428 */ 429SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 430 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 431 432/* 433 * Call this to print out the witness faulty stacks. 434 */ 435SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 436 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 437 438static struct mtx w_mtx; 439 440/* w_list */ 441static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 442static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 443 444/* w_typelist */ 445static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 446static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 447 448/* lock list */ 449static struct lock_list_entry *w_lock_list_free = NULL; 450static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 451static u_int pending_cnt; 452 453static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 454SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 455SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 456SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 457 ""); 458 459static struct witness *w_data; 460static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 461static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 462static struct witness_hash w_hash; /* The witness hash table. */ 463 464/* The lock order data hash */ 465static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 466static struct witness_lock_order_data *w_lofree = NULL; 467static struct witness_lock_order_hash w_lohash; 468static int w_max_used_index = 0; 469static unsigned int w_generation = 0; 470static const char *w_notrunning = "Witness not running\n"; 471static const char *w_stillcold = "Witness is still cold\n"; 472 473 474static struct witness_order_list_entry order_lists[] = { 475 /* 476 * sx locks 477 */ 478 { "proctree", &lock_class_sx }, 479 { "allproc", &lock_class_sx }, 480 { "allprison", &lock_class_sx }, 481 { NULL, NULL }, 482 /* 483 * Various mutexes 484 */ 485 { "Giant", &lock_class_mtx_sleep }, 486 { "pipe mutex", &lock_class_mtx_sleep }, 487 { "sigio lock", &lock_class_mtx_sleep }, 488 { "process group", &lock_class_mtx_sleep }, 489 { "process lock", &lock_class_mtx_sleep }, 490 { "session", &lock_class_mtx_sleep }, 491 { "uidinfo hash", &lock_class_rw }, 492#ifdef HWPMC_HOOKS 493 { "pmc-sleep", &lock_class_mtx_sleep }, 494#endif 495 { NULL, NULL }, 496 /* 497 * Sockets 498 */ 499 { "accept", &lock_class_mtx_sleep }, 500 { "so_snd", &lock_class_mtx_sleep }, 501 { "so_rcv", &lock_class_mtx_sleep }, 502 { "sellck", &lock_class_mtx_sleep }, 503 { NULL, NULL }, 504 /* 505 * Routing 506 */ 507 { "so_rcv", &lock_class_mtx_sleep }, 508 { "radix node head", &lock_class_mtx_sleep }, 509 { "rtentry", &lock_class_mtx_sleep }, 510 { "ifaddr", &lock_class_mtx_sleep }, 511 { NULL, NULL }, 512 /* 513 * Multicast - protocol locks before interface locks, after UDP locks. 514 */ 515 { "udpinp", &lock_class_rw }, 516 { "in_multi_mtx", &lock_class_mtx_sleep }, 517 { "igmp_mtx", &lock_class_mtx_sleep }, 518 { "if_addr_mtx", &lock_class_mtx_sleep }, 519 { NULL, NULL }, 520 /* 521 * UNIX Domain Sockets 522 */ 523 { "unp", &lock_class_mtx_sleep }, 524 { "so_snd", &lock_class_mtx_sleep }, 525 { NULL, NULL }, 526 /* 527 * UDP/IP 528 */ 529 { "udp", &lock_class_rw }, 530 { "udpinp", &lock_class_rw }, 531 { "so_snd", &lock_class_mtx_sleep }, 532 { NULL, NULL }, 533 /* 534 * TCP/IP 535 */ 536 { "tcp", &lock_class_rw }, 537 { "tcpinp", &lock_class_rw }, 538 { "so_snd", &lock_class_mtx_sleep }, 539 { NULL, NULL }, 540 /* 541 * SLIP 542 */ 543 { "slip_mtx", &lock_class_mtx_sleep }, 544 { "slip sc_mtx", &lock_class_mtx_sleep }, 545 { NULL, NULL }, 546 /* 547 * netatalk 548 */ 549 { "ddp_list_mtx", &lock_class_mtx_sleep }, 550 { "ddp_mtx", &lock_class_mtx_sleep }, 551 { NULL, NULL }, 552 /* 553 * BPF 554 */ 555 { "bpf global lock", &lock_class_mtx_sleep }, 556 { "bpf interface lock", &lock_class_mtx_sleep }, 557 { "bpf cdev lock", &lock_class_mtx_sleep }, 558 { NULL, NULL }, 559 /* 560 * NFS server 561 */ 562 { "nfsd_mtx", &lock_class_mtx_sleep }, 563 { "so_snd", &lock_class_mtx_sleep }, 564 { NULL, NULL }, 565 566 /* 567 * IEEE 802.11 568 */ 569 { "802.11 com lock", &lock_class_mtx_sleep}, 570 { NULL, NULL }, 571 /* 572 * Network drivers 573 */ 574 { "network driver", &lock_class_mtx_sleep}, 575 { NULL, NULL }, 576 577 /* 578 * Netgraph 579 */ 580 { "ng_node", &lock_class_mtx_sleep }, 581 { "ng_worklist", &lock_class_mtx_sleep }, 582 { NULL, NULL }, 583 /* 584 * CDEV 585 */ 586 { "system map", &lock_class_mtx_sleep }, 587 { "vm page queue mutex", &lock_class_mtx_sleep }, 588 { "vnode interlock", &lock_class_mtx_sleep }, 589 { "cdev", &lock_class_mtx_sleep }, 590 { NULL, NULL }, 591 /* 592 * kqueue/VFS interaction 593 */ 594 { "kqueue", &lock_class_mtx_sleep }, 595 { "struct mount mtx", &lock_class_mtx_sleep }, 596 { "vnode interlock", &lock_class_mtx_sleep }, 597 { NULL, NULL }, 598 /* 599 * spin locks 600 */ 601#ifdef SMP 602 { "ap boot", &lock_class_mtx_spin }, 603#endif 604 { "rm.mutex_mtx", &lock_class_mtx_spin }, 605 { "sio", &lock_class_mtx_spin }, 606 { "scrlock", &lock_class_mtx_spin }, 607#ifdef __i386__ 608 { "cy", &lock_class_mtx_spin }, 609#endif 610#ifdef __sparc64__ 611 { "pcib_mtx", &lock_class_mtx_spin }, 612 { "rtc_mtx", &lock_class_mtx_spin }, 613#endif 614 { "scc_hwmtx", &lock_class_mtx_spin }, 615 { "uart_hwmtx", &lock_class_mtx_spin }, 616 { "fast_taskqueue", &lock_class_mtx_spin }, 617 { "intr table", &lock_class_mtx_spin }, 618#ifdef HWPMC_HOOKS 619 { "pmc-per-proc", &lock_class_mtx_spin }, 620#endif 621 { "process slock", &lock_class_mtx_spin }, 622 { "sleepq chain", &lock_class_mtx_spin }, 623 { "umtx lock", &lock_class_mtx_spin }, 624 { "rm_spinlock", &lock_class_mtx_spin }, 625 { "turnstile chain", &lock_class_mtx_spin }, 626 { "turnstile lock", &lock_class_mtx_spin }, 627 { "sched lock", &lock_class_mtx_spin }, 628 { "td_contested", &lock_class_mtx_spin }, 629 { "callout", &lock_class_mtx_spin }, 630 { "entropy harvest mutex", &lock_class_mtx_spin }, 631 { "syscons video lock", &lock_class_mtx_spin }, 632 { "time lock", &lock_class_mtx_spin }, 633#ifdef SMP 634 { "smp rendezvous", &lock_class_mtx_spin }, 635#endif 636#ifdef __powerpc__ 637 { "tlb0", &lock_class_mtx_spin }, 638#endif 639 /* 640 * leaf locks 641 */ 642 { "intrcnt", &lock_class_mtx_spin }, 643 { "icu", &lock_class_mtx_spin }, 644#if defined(SMP) && defined(__sparc64__) 645 { "ipi", &lock_class_mtx_spin }, 646#endif 647#ifdef __i386__ 648 { "allpmaps", &lock_class_mtx_spin }, 649 { "descriptor tables", &lock_class_mtx_spin }, 650#endif 651 { "clk", &lock_class_mtx_spin }, 652 { "cpuset", &lock_class_mtx_spin }, 653 { "mprof lock", &lock_class_mtx_spin }, 654 { "zombie lock", &lock_class_mtx_spin }, 655 { "ALD Queue", &lock_class_mtx_spin }, 656#ifdef __ia64__ 657 { "MCA spin lock", &lock_class_mtx_spin }, 658#endif 659#if defined(__i386__) || defined(__amd64__) 660 { "pcicfg", &lock_class_mtx_spin }, 661 { "NDIS thread lock", &lock_class_mtx_spin }, 662#endif 663 { "tw_osl_io_lock", &lock_class_mtx_spin }, 664 { "tw_osl_q_lock", &lock_class_mtx_spin }, 665 { "tw_cl_io_lock", &lock_class_mtx_spin }, 666 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 667 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 668#ifdef HWPMC_HOOKS 669 { "pmc-leaf", &lock_class_mtx_spin }, 670#endif 671 { "blocked lock", &lock_class_mtx_spin }, 672 { NULL, NULL }, 673 { NULL, NULL } 674}; 675 676#ifdef BLESSING 677/* 678 * Pairs of locks which have been blessed 679 * Don't complain about order problems with blessed locks 680 */ 681static struct witness_blessed blessed_list[] = { 682}; 683static int blessed_count = 684 sizeof(blessed_list) / sizeof(struct witness_blessed); 685#endif 686 687/* 688 * This global is set to 0 once it becomes safe to use the witness code. 689 */ 690static int witness_cold = 1; 691 692/* 693 * This global is set to 1 once the static lock orders have been enrolled 694 * so that a warning can be issued for any spin locks enrolled later. 695 */ 696static int witness_spin_warn = 0; 697 698/* 699 * The WITNESS-enabled diagnostic code. Note that the witness code does 700 * assume that the early boot is single-threaded at least until after this 701 * routine is completed. 702 */ 703static void 704witness_initialize(void *dummy __unused) 705{ 706 struct lock_object *lock; 707 struct witness_order_list_entry *order; 708 struct witness *w, *w1; 709 int i; 710 711 MALLOC(w_data, struct witness *, 712 sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 713 M_NOWAIT | M_ZERO); 714 715 /* 716 * We have to release Giant before initializing its witness 717 * structure so that WITNESS doesn't get confused. 718 */ 719 mtx_unlock(&Giant); 720 mtx_assert(&Giant, MA_NOTOWNED); 721 722 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 723 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 724 MTX_NOWITNESS | MTX_NOPROFILE); 725 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 726 w = &w_data[i]; 727 memset(w, 0, sizeof(*w)); 728 w_data[i].w_index = i; /* Witness index never changes. */ 729 witness_free(w); 730 } 731 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 732 ("%s: Invalid list of free witness objects", __func__)); 733 734 /* Witness with index 0 is not used to aid in debugging. */ 735 STAILQ_REMOVE_HEAD(&w_free, w_list); 736 w_free_cnt--; 737 738 memset(w_rmatrix, 0, 739 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 740 741 for (i = 0; i < LOCK_CHILDCOUNT; i++) 742 witness_lock_list_free(&w_locklistdata[i]); 743 witness_init_hash_tables(); 744 745 /* First add in all the specified order lists. */ 746 for (order = order_lists; order->w_name != NULL; order++) { 747 w = enroll(order->w_name, order->w_class); 748 if (w == NULL) 749 continue; 750 w->w_file = "order list"; 751 for (order++; order->w_name != NULL; order++) { 752 w1 = enroll(order->w_name, order->w_class); 753 if (w1 == NULL) 754 continue; 755 w1->w_file = "order list"; 756 itismychild(w, w1); 757 w = w1; 758 } 759 } 760 witness_spin_warn = 1; 761 762 /* Iterate through all locks and add them to witness. */ 763 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 764 lock = pending_locks[i].wh_lock; 765 KASSERT(lock->lo_flags & LO_WITNESS, 766 ("%s: lock %s is on pending list but not LO_WITNESS", 767 __func__, lock->lo_name)); 768 lock->lo_witness = enroll(pending_locks[i].wh_type, 769 LOCK_CLASS(lock)); 770 } 771 772 /* Mark the witness code as being ready for use. */ 773 witness_cold = 0; 774 775 mtx_lock(&Giant); 776} 777SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 778 NULL); 779 780void 781witness_init(struct lock_object *lock, const char *type) 782{ 783 struct lock_class *class; 784 785 /* Various sanity checks. */ 786 class = LOCK_CLASS(lock); 787 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 788 (class->lc_flags & LC_RECURSABLE) == 0) 789 panic("%s: lock (%s) %s can not be recursable", __func__, 790 class->lc_name, lock->lo_name); 791 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 792 (class->lc_flags & LC_SLEEPABLE) == 0) 793 panic("%s: lock (%s) %s can not be sleepable", __func__, 794 class->lc_name, lock->lo_name); 795 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 796 (class->lc_flags & LC_UPGRADABLE) == 0) 797 panic("%s: lock (%s) %s can not be upgradable", __func__, 798 class->lc_name, lock->lo_name); 799 800 /* 801 * If we shouldn't watch this lock, then just clear lo_witness. 802 * Otherwise, if witness_cold is set, then it is too early to 803 * enroll this lock, so defer it to witness_initialize() by adding 804 * it to the pending_locks list. If it is not too early, then enroll 805 * the lock now. 806 */ 807 if (witness_watch < 1 || panicstr != NULL || 808 (lock->lo_flags & LO_WITNESS) == 0) 809 lock->lo_witness = NULL; 810 else if (witness_cold) { 811 pending_locks[pending_cnt].wh_lock = lock; 812 pending_locks[pending_cnt++].wh_type = type; 813 if (pending_cnt > WITNESS_PENDLIST) 814 panic("%s: pending locks list is too small, bump it\n", 815 __func__); 816 } else 817 lock->lo_witness = enroll(type, class); 818} 819 820void 821witness_destroy(struct lock_object *lock) 822{ 823 struct lock_class *class; 824 struct witness *w; 825 826 class = LOCK_CLASS(lock); 827 828 if (witness_cold) 829 panic("lock (%s) %s destroyed while witness_cold", 830 class->lc_name, lock->lo_name); 831 832 /* XXX: need to verify that no one holds the lock */ 833 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 834 return; 835 w = lock->lo_witness; 836 837 mtx_lock_spin(&w_mtx); 838 MPASS(w->w_refcount > 0); 839 w->w_refcount--; 840 841 if (w->w_refcount == 0) 842 depart(w); 843 mtx_unlock_spin(&w_mtx); 844} 845 846#ifdef DDB 847static void 848witness_ddb_compute_levels(void) 849{ 850 struct witness *w; 851 852 /* 853 * First clear all levels. 854 */ 855 STAILQ_FOREACH(w, &w_all, w_list) 856 w->w_ddb_level = -1; 857 858 /* 859 * Look for locks with no parents and level all their descendants. 860 */ 861 STAILQ_FOREACH(w, &w_all, w_list) { 862 863 /* If the witness has ancestors (is not a root), skip it. */ 864 if (w->w_num_ancestors > 0) 865 continue; 866 witness_ddb_level_descendants(w, 0); 867 } 868} 869 870static void 871witness_ddb_level_descendants(struct witness *w, int l) 872{ 873 int i; 874 875 if (w->w_ddb_level >= l) 876 return; 877 878 w->w_ddb_level = l; 879 l++; 880 881 for (i = 1; i <= w_max_used_index; i++) { 882 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 883 witness_ddb_level_descendants(&w_data[i], l); 884 } 885} 886 887static void 888witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...), 889 struct witness *w, int indent) 890{ 891 int i; 892 893 for (i = 0; i < indent; i++) 894 prnt(" "); 895 prnt("%s (type: %s, depth: %d, active refs: %d)", 896 w->w_name, w->w_class->lc_name, 897 w->w_ddb_level, w->w_refcount); 898 if (w->w_displayed) { 899 prnt(" -- (already displayed)\n"); 900 return; 901 } 902 w->w_displayed = 1; 903 if (w->w_file != NULL && w->w_line != 0) 904 prnt(" -- last acquired @ %s:%d\n", w->w_file, 905 w->w_line); 906 else 907 prnt(" -- never acquired\n"); 908 indent++; 909 WITNESS_INDEX_ASSERT(w->w_index); 910 for (i = 1; i <= w_max_used_index; i++) { 911 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 912 witness_ddb_display_descendants(prnt, &w_data[i], 913 indent); 914 } 915} 916 917static void 918witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 919 struct witness_list *list) 920{ 921 struct witness *w; 922 923 STAILQ_FOREACH(w, list, w_typelist) { 924 if (w->w_file == NULL || w->w_ddb_level > 0) 925 continue; 926 927 /* This lock has no anscestors - display its descendants. */ 928 witness_ddb_display_descendants(prnt, w, 0); 929 } 930} 931 932static void 933witness_ddb_display(void(*prnt)(const char *fmt, ...)) 934{ 935 struct witness *w; 936 937 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 938 witness_ddb_compute_levels(); 939 940 /* Clear all the displayed flags. */ 941 STAILQ_FOREACH(w, &w_all, w_list) 942 w->w_displayed = 0; 943 944 /* 945 * First, handle sleep locks which have been acquired at least 946 * once. 947 */ 948 prnt("Sleep locks:\n"); 949 witness_ddb_display_list(prnt, &w_sleep); 950 951 /* 952 * Now do spin locks which have been acquired at least once. 953 */ 954 prnt("\nSpin locks:\n"); 955 witness_ddb_display_list(prnt, &w_spin); 956 957 /* 958 * Finally, any locks which have not been acquired yet. 959 */ 960 prnt("\nLocks which were never acquired:\n"); 961 STAILQ_FOREACH(w, &w_all, w_list) { 962 if (w->w_file != NULL || w->w_refcount == 0) 963 continue; 964 prnt("%s (type: %s, depth: %d)\n", w->w_name, 965 w->w_class->lc_name, w->w_ddb_level); 966 } 967} 968#endif /* DDB */ 969 970/* Trim useless garbage from filenames. */ 971static const char * 972fixup_filename(const char *file) 973{ 974 975 if (file == NULL) 976 return (NULL); 977 while (strncmp(file, "../", 3) == 0) 978 file += 3; 979 return (file); 980} 981 982int 983witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 984{ 985 986 if (witness_watch == -1 || panicstr != NULL) 987 return (0); 988 989 /* Require locks that witness knows about. */ 990 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 991 lock2->lo_witness == NULL) 992 return (EINVAL); 993 994 mtx_assert(&w_mtx, MA_NOTOWNED); 995 mtx_lock_spin(&w_mtx); 996 997 /* 998 * If we already have either an explicit or implied lock order that 999 * is the other way around, then return an error. 1000 */ 1001 if (witness_watch && 1002 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1003 mtx_unlock_spin(&w_mtx); 1004 return (EDOOFUS); 1005 } 1006 1007 /* Try to add the new order. */ 1008 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1009 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1010 itismychild(lock1->lo_witness, lock2->lo_witness); 1011 mtx_unlock_spin(&w_mtx); 1012 return (0); 1013} 1014 1015void 1016witness_checkorder(struct lock_object *lock, int flags, const char *file, 1017 int line, struct lock_object *interlock) 1018{ 1019 struct lock_list_entry *lock_list, *lle; 1020 struct lock_instance *lock1, *lock2, *plock; 1021 struct lock_class *class; 1022 struct witness *w, *w1; 1023 struct thread *td; 1024 int i, j; 1025 1026 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1027 panicstr != NULL) 1028 return; 1029 1030 w = lock->lo_witness; 1031 class = LOCK_CLASS(lock); 1032 td = curthread; 1033 file = fixup_filename(file); 1034 1035 if (class->lc_flags & LC_SLEEPLOCK) { 1036 1037 /* 1038 * Since spin locks include a critical section, this check 1039 * implicitly enforces a lock order of all sleep locks before 1040 * all spin locks. 1041 */ 1042 if (td->td_critnest != 0 && !kdb_active) 1043 panic("blockable sleep lock (%s) %s @ %s:%d", 1044 class->lc_name, lock->lo_name, file, line); 1045 1046 /* 1047 * If this is the first lock acquired then just return as 1048 * no order checking is needed. 1049 */ 1050 lock_list = td->td_sleeplocks; 1051 if (lock_list == NULL || lock_list->ll_count == 0) 1052 return; 1053 } else { 1054 1055 /* 1056 * If this is the first lock, just return as no order 1057 * checking is needed. Avoid problems with thread 1058 * migration pinning the thread while checking if 1059 * spinlocks are held. If at least one spinlock is held 1060 * the thread is in a safe path and it is allowed to 1061 * unpin it. 1062 */ 1063 sched_pin(); 1064 lock_list = PCPU_GET(spinlocks); 1065 if (lock_list == NULL || lock_list->ll_count == 0) { 1066 sched_unpin(); 1067 return; 1068 } 1069 sched_unpin(); 1070 } 1071 1072 /* 1073 * Check to see if we are recursing on a lock we already own. If 1074 * so, make sure that we don't mismatch exclusive and shared lock 1075 * acquires. 1076 */ 1077 lock1 = find_instance(lock_list, lock); 1078 if (lock1 != NULL) { 1079 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1080 (flags & LOP_EXCLUSIVE) == 0) { 1081 printf("shared lock of (%s) %s @ %s:%d\n", 1082 class->lc_name, lock->lo_name, file, line); 1083 printf("while exclusively locked from %s:%d\n", 1084 lock1->li_file, lock1->li_line); 1085 panic("share->excl"); 1086 } 1087 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1088 (flags & LOP_EXCLUSIVE) != 0) { 1089 printf("exclusive lock of (%s) %s @ %s:%d\n", 1090 class->lc_name, lock->lo_name, file, line); 1091 printf("while share locked from %s:%d\n", 1092 lock1->li_file, lock1->li_line); 1093 panic("excl->share"); 1094 } 1095 return; 1096 } 1097 1098 /* 1099 * Find the previously acquired lock, but ignore interlocks. 1100 */ 1101 plock = &lock_list->ll_children[lock_list->ll_count - 1]; 1102 if (interlock != NULL && plock->li_lock == interlock) { 1103 if (lock_list->ll_count > 1) 1104 plock = 1105 &lock_list->ll_children[lock_list->ll_count - 2]; 1106 else { 1107 lle = lock_list->ll_next; 1108 1109 /* 1110 * The interlock is the only lock we hold, so 1111 * simply return. 1112 */ 1113 if (lle == NULL) 1114 return; 1115 plock = &lle->ll_children[lle->ll_count - 1]; 1116 } 1117 } 1118 1119 /* 1120 * Try to perform most checks without a lock. If this succeeds we 1121 * can skip acquiring the lock and return success. 1122 */ 1123 w1 = plock->li_lock->lo_witness; 1124 if (witness_lock_order_check(w1, w)) 1125 return; 1126 1127 /* 1128 * Check for duplicate locks of the same type. Note that we only 1129 * have to check for this on the last lock we just acquired. Any 1130 * other cases will be caught as lock order violations. 1131 */ 1132 mtx_lock_spin(&w_mtx); 1133 witness_lock_order_add(w1, w); 1134 if (w1 == w) { 1135 i = w->w_index; 1136 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1137 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1138 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1139 w->w_reversed = 1; 1140 mtx_unlock_spin(&w_mtx); 1141 printf( 1142 "acquiring duplicate lock of same type: \"%s\"\n", 1143 w->w_name); 1144 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name, 1145 plock->li_file, plock->li_line); 1146 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 1147 witness_debugger(1); 1148 } else 1149 mtx_unlock_spin(&w_mtx); 1150 return; 1151 } 1152 mtx_assert(&w_mtx, MA_OWNED); 1153 1154 /* 1155 * If we know that the the lock we are acquiring comes after 1156 * the lock we most recently acquired in the lock order tree, 1157 * then there is no need for any further checks. 1158 */ 1159 if (isitmychild(w1, w)) 1160 goto out; 1161 1162 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) { 1163 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1164 1165 MPASS(j < WITNESS_COUNT); 1166 lock1 = &lle->ll_children[i]; 1167 1168 /* 1169 * Ignore the interlock the first time we see it. 1170 */ 1171 if (interlock != NULL && interlock == lock1->li_lock) { 1172 interlock = NULL; 1173 continue; 1174 } 1175 1176 /* 1177 * If this lock doesn't undergo witness checking, 1178 * then skip it. 1179 */ 1180 w1 = lock1->li_lock->lo_witness; 1181 if (w1 == NULL) { 1182 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1183 ("lock missing witness structure")); 1184 continue; 1185 } 1186 1187 /* 1188 * If we are locking Giant and this is a sleepable 1189 * lock, then skip it. 1190 */ 1191 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1192 lock == &Giant.lock_object) 1193 continue; 1194 1195 /* 1196 * If we are locking a sleepable lock and this lock 1197 * is Giant, then skip it. 1198 */ 1199 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1200 lock1->li_lock == &Giant.lock_object) 1201 continue; 1202 1203 /* 1204 * If we are locking a sleepable lock and this lock 1205 * isn't sleepable, we want to treat it as a lock 1206 * order violation to enfore a general lock order of 1207 * sleepable locks before non-sleepable locks. 1208 */ 1209 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1210 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1211 goto reversal; 1212 1213 /* 1214 * If we are locking Giant and this is a non-sleepable 1215 * lock, then treat it as a reversal. 1216 */ 1217 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1218 lock == &Giant.lock_object) 1219 goto reversal; 1220 1221 /* 1222 * Check the lock order hierarchy for a reveresal. 1223 */ 1224 if (!isitmydescendant(w, w1)) 1225 continue; 1226 reversal: 1227 1228 /* 1229 * We have a lock order violation, check to see if it 1230 * is allowed or has already been yelled about. 1231 */ 1232#ifdef BLESSING 1233 1234 /* 1235 * If the lock order is blessed, just bail. We don't 1236 * look for other lock order violations though, which 1237 * may be a bug. 1238 */ 1239 if (blessed(w, w1)) 1240 goto out; 1241#endif 1242 1243 /* Bail if this violation is known */ 1244 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1245 goto out; 1246 1247 /* Record this as a violation */ 1248 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1249 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1250 w->w_reversed = w1->w_reversed = 1; 1251 witness_increment_graph_generation(); 1252 mtx_unlock_spin(&w_mtx); 1253 1254 /* 1255 * Ok, yell about it. 1256 */ 1257 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1258 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1259 printf( 1260 "lock order reversal: (sleepable after non-sleepable)\n"); 1261 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1262 && lock == &Giant.lock_object) 1263 printf( 1264 "lock order reversal: (Giant after non-sleepable)\n"); 1265 else 1266 printf("lock order reversal:\n"); 1267 1268 /* 1269 * Try to locate an earlier lock with 1270 * witness w in our list. 1271 */ 1272 do { 1273 lock2 = &lle->ll_children[i]; 1274 MPASS(lock2->li_lock != NULL); 1275 if (lock2->li_lock->lo_witness == w) 1276 break; 1277 if (i == 0 && lle->ll_next != NULL) { 1278 lle = lle->ll_next; 1279 i = lle->ll_count - 1; 1280 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1281 } else 1282 i--; 1283 } while (i >= 0); 1284 if (i < 0) { 1285 printf(" 1st %p %s (%s) @ %s:%d\n", 1286 lock1->li_lock, lock1->li_lock->lo_name, 1287 w1->w_name, lock1->li_file, lock1->li_line); 1288 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1289 lock->lo_name, w->w_name, file, line); 1290 } else { 1291 printf(" 1st %p %s (%s) @ %s:%d\n", 1292 lock2->li_lock, lock2->li_lock->lo_name, 1293 lock2->li_lock->lo_witness->w_name, 1294 lock2->li_file, lock2->li_line); 1295 printf(" 2nd %p %s (%s) @ %s:%d\n", 1296 lock1->li_lock, lock1->li_lock->lo_name, 1297 w1->w_name, lock1->li_file, lock1->li_line); 1298 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1299 lock->lo_name, w->w_name, file, line); 1300 } 1301 witness_debugger(1); 1302 return; 1303 } 1304 } 1305 1306 /* 1307 * If requested, build a new lock order. However, don't build a new 1308 * relationship between a sleepable lock and Giant if it is in the 1309 * wrong direction. The correct lock order is that sleepable locks 1310 * always come before Giant. 1311 */ 1312 if (flags & LOP_NEWORDER && 1313 !(plock->li_lock == &Giant.lock_object && 1314 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1315 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1316 w->w_name, plock->li_lock->lo_witness->w_name); 1317 itismychild(plock->li_lock->lo_witness, w); 1318 } 1319out: 1320 mtx_unlock_spin(&w_mtx); 1321} 1322 1323void 1324witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1325{ 1326 struct lock_list_entry **lock_list, *lle; 1327 struct lock_instance *instance; 1328 struct witness *w; 1329 struct thread *td; 1330 1331 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1332 panicstr != NULL) 1333 return; 1334 w = lock->lo_witness; 1335 td = curthread; 1336 file = fixup_filename(file); 1337 1338 /* Determine lock list for this lock. */ 1339 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1340 lock_list = &td->td_sleeplocks; 1341 else 1342 lock_list = PCPU_PTR(spinlocks); 1343 1344 /* Check to see if we are recursing on a lock we already own. */ 1345 instance = find_instance(*lock_list, lock); 1346 if (instance != NULL) { 1347 instance->li_flags++; 1348 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1349 td->td_proc->p_pid, lock->lo_name, 1350 instance->li_flags & LI_RECURSEMASK); 1351 instance->li_file = file; 1352 instance->li_line = line; 1353 return; 1354 } 1355 1356 /* Update per-witness last file and line acquire. */ 1357 w->w_file = file; 1358 w->w_line = line; 1359 1360 /* Find the next open lock instance in the list and fill it. */ 1361 lle = *lock_list; 1362 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1363 lle = witness_lock_list_get(); 1364 if (lle == NULL) 1365 return; 1366 lle->ll_next = *lock_list; 1367 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1368 td->td_proc->p_pid, lle); 1369 *lock_list = lle; 1370 } 1371 instance = &lle->ll_children[lle->ll_count++]; 1372 instance->li_lock = lock; 1373 instance->li_line = line; 1374 instance->li_file = file; 1375 if ((flags & LOP_EXCLUSIVE) != 0) 1376 instance->li_flags = LI_EXCLUSIVE; 1377 else 1378 instance->li_flags = 0; 1379 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1380 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1381} 1382 1383void 1384witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1385{ 1386 struct lock_instance *instance; 1387 struct lock_class *class; 1388 1389 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1390 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1391 return; 1392 class = LOCK_CLASS(lock); 1393 file = fixup_filename(file); 1394 if (witness_watch) { 1395 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1396 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1397 class->lc_name, lock->lo_name, file, line); 1398 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1399 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1400 class->lc_name, lock->lo_name, file, line); 1401 } 1402 instance = find_instance(curthread->td_sleeplocks, lock); 1403 if (instance == NULL) 1404 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1405 class->lc_name, lock->lo_name, file, line); 1406 if (witness_watch) { 1407 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1408 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1409 class->lc_name, lock->lo_name, file, line); 1410 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1411 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1412 class->lc_name, lock->lo_name, 1413 instance->li_flags & LI_RECURSEMASK, file, line); 1414 } 1415 instance->li_flags |= LI_EXCLUSIVE; 1416} 1417 1418void 1419witness_downgrade(struct lock_object *lock, int flags, const char *file, 1420 int line) 1421{ 1422 struct lock_instance *instance; 1423 struct lock_class *class; 1424 1425 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1426 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1427 return; 1428 class = LOCK_CLASS(lock); 1429 file = fixup_filename(file); 1430 if (witness_watch) { 1431 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1432 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1433 class->lc_name, lock->lo_name, file, line); 1434 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1435 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1436 class->lc_name, lock->lo_name, file, line); 1437 } 1438 instance = find_instance(curthread->td_sleeplocks, lock); 1439 if (instance == NULL) 1440 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1441 class->lc_name, lock->lo_name, file, line); 1442 if (witness_watch) { 1443 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1444 panic("downgrade of shared lock (%s) %s @ %s:%d", 1445 class->lc_name, lock->lo_name, file, line); 1446 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1447 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1448 class->lc_name, lock->lo_name, 1449 instance->li_flags & LI_RECURSEMASK, file, line); 1450 } 1451 instance->li_flags &= ~LI_EXCLUSIVE; 1452} 1453 1454void 1455witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1456{ 1457 struct lock_list_entry **lock_list, *lle; 1458 struct lock_instance *instance; 1459 struct lock_class *class; 1460 struct thread *td; 1461 register_t s; 1462 int i, j; 1463 1464 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1465 return; 1466 td = curthread; 1467 class = LOCK_CLASS(lock); 1468 file = fixup_filename(file); 1469 1470 /* Find lock instance associated with this lock. */ 1471 if (class->lc_flags & LC_SLEEPLOCK) 1472 lock_list = &td->td_sleeplocks; 1473 else 1474 lock_list = PCPU_PTR(spinlocks); 1475 lle = *lock_list; 1476 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1477 for (i = 0; i < (*lock_list)->ll_count; i++) { 1478 instance = &(*lock_list)->ll_children[i]; 1479 if (instance->li_lock == lock) 1480 goto found; 1481 } 1482 1483 /* 1484 * When disabling WITNESS through witness_watch we could end up in 1485 * having registered locks in the td_sleeplocks queue. 1486 * We have to make sure we flush these queues, so just search for 1487 * eventual register locks and remove them. 1488 */ 1489 if (witness_watch > 0) 1490 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1491 lock->lo_name, file, line); 1492 else 1493 return; 1494found: 1495 1496 /* First, check for shared/exclusive mismatches. */ 1497 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1498 (flags & LOP_EXCLUSIVE) == 0) { 1499 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1500 lock->lo_name, file, line); 1501 printf("while exclusively locked from %s:%d\n", 1502 instance->li_file, instance->li_line); 1503 panic("excl->ushare"); 1504 } 1505 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1506 (flags & LOP_EXCLUSIVE) != 0) { 1507 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1508 lock->lo_name, file, line); 1509 printf("while share locked from %s:%d\n", instance->li_file, 1510 instance->li_line); 1511 panic("share->uexcl"); 1512 } 1513 1514 /* If we are recursed, unrecurse. */ 1515 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1516 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1517 td->td_proc->p_pid, instance->li_lock->lo_name, 1518 instance->li_flags); 1519 instance->li_flags--; 1520 return; 1521 } 1522 1523 /* Otherwise, remove this item from the list. */ 1524 s = intr_disable(); 1525 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1526 td->td_proc->p_pid, instance->li_lock->lo_name, 1527 (*lock_list)->ll_count - 1); 1528 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1529 (*lock_list)->ll_children[j] = 1530 (*lock_list)->ll_children[j + 1]; 1531 (*lock_list)->ll_count--; 1532 intr_restore(s); 1533 1534 /* 1535 * In order to reduce contention on w_mtx, we want to keep always an 1536 * head object into lists so that frequent allocation from the 1537 * free witness pool (and subsequent locking) is avoided. 1538 * In order to maintain the current code simple, when the head 1539 * object is totally unloaded it means also that we do not have 1540 * further objects in the list, so the list ownership needs to be 1541 * hand over to another object if the current head needs to be freed. 1542 */ 1543 if ((*lock_list)->ll_count == 0) { 1544 if (*lock_list == lle) { 1545 if (lle->ll_next == NULL) 1546 return; 1547 } else 1548 lle = *lock_list; 1549 *lock_list = lle->ll_next; 1550 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1551 td->td_proc->p_pid, lle); 1552 witness_lock_list_free(lle); 1553 } 1554} 1555 1556void 1557witness_thread_exit(struct thread *td) 1558{ 1559 struct lock_list_entry *lle; 1560 int i, n; 1561 1562 lle = td->td_sleeplocks; 1563 if (lle == NULL || panicstr != NULL) 1564 return; 1565 if (lle->ll_count != 0) { 1566 for (n = 0; lle != NULL; lle = lle->ll_next) 1567 for (i = lle->ll_count - 1; i >= 0; i--) { 1568 if (n == 0) 1569 printf("Thread %p exiting with the following locks held:\n", 1570 td); 1571 n++; 1572 witness_list_lock(&lle->ll_children[i]); 1573 1574 } 1575 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1576 } 1577 witness_lock_list_free(lle); 1578} 1579 1580/* 1581 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1582 * exempt Giant and sleepable locks from the checks as well. If any 1583 * non-exempt locks are held, then a supplied message is printed to the 1584 * console along with a list of the offending locks. If indicated in the 1585 * flags then a failure results in a panic as well. 1586 */ 1587int 1588witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1589{ 1590 struct lock_list_entry *lock_list, *lle; 1591 struct lock_instance *lock1; 1592 struct thread *td; 1593 va_list ap; 1594 int i, n; 1595 1596 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1597 return (0); 1598 n = 0; 1599 td = curthread; 1600 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1601 for (i = lle->ll_count - 1; i >= 0; i--) { 1602 lock1 = &lle->ll_children[i]; 1603 if (lock1->li_lock == lock) 1604 continue; 1605 if (flags & WARN_GIANTOK && 1606 lock1->li_lock == &Giant.lock_object) 1607 continue; 1608 if (flags & WARN_SLEEPOK && 1609 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1610 continue; 1611 if (n == 0) { 1612 va_start(ap, fmt); 1613 vprintf(fmt, ap); 1614 va_end(ap); 1615 printf(" with the following"); 1616 if (flags & WARN_SLEEPOK) 1617 printf(" non-sleepable"); 1618 printf(" locks held:\n"); 1619 } 1620 n++; 1621 witness_list_lock(lock1); 1622 } 1623 1624 /* 1625 * Pin the thread in order to avoid problems with thread migration. 1626 * Once that all verifies are passed about spinlocks ownership, 1627 * the thread is in a safe path and it can be unpinned. 1628 */ 1629 sched_pin(); 1630 lock_list = PCPU_GET(spinlocks); 1631 if (lock_list != NULL) { 1632 1633 /* Empty list? */ 1634 if (lock_list->ll_count == 0) { 1635 sched_unpin(); 1636 return (n); 1637 } 1638 sched_unpin(); 1639 1640 /* 1641 * We should only have one spinlock and as long as 1642 * the flags cannot match for this locks class, 1643 * check if the first spinlock is the one curthread 1644 * should hold. 1645 */ 1646 lock1 = &lock_list->ll_children[lock_list->ll_count - 1]; 1647 if (lock1->li_lock == lock) 1648 return (n); 1649 1650 if (n == 0) { 1651 va_start(ap, fmt); 1652 vprintf(fmt, ap); 1653 va_end(ap); 1654 printf(" with the following"); 1655 if (flags & WARN_SLEEPOK) 1656 printf(" non-sleepable"); 1657 printf(" locks held:\n"); 1658 } 1659 n += witness_list_locks(&lock_list); 1660 } else 1661 sched_unpin(); 1662 if (flags & WARN_PANIC && n) 1663 panic("%s", __func__); 1664 else 1665 witness_debugger(n); 1666 return (n); 1667} 1668 1669const char * 1670witness_file(struct lock_object *lock) 1671{ 1672 struct witness *w; 1673 1674 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1675 return ("?"); 1676 w = lock->lo_witness; 1677 return (w->w_file); 1678} 1679 1680int 1681witness_line(struct lock_object *lock) 1682{ 1683 struct witness *w; 1684 1685 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1686 return (0); 1687 w = lock->lo_witness; 1688 return (w->w_line); 1689} 1690 1691static struct witness * 1692enroll(const char *description, struct lock_class *lock_class) 1693{ 1694 struct witness *w; 1695 struct witness_list *typelist; 1696 1697 MPASS(description != NULL); 1698 1699 if (witness_watch == -1 || panicstr != NULL) 1700 return (NULL); 1701 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1702 if (witness_skipspin) 1703 return (NULL); 1704 else 1705 typelist = &w_spin; 1706 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1707 typelist = &w_sleep; 1708 else 1709 panic("lock class %s is not sleep or spin", 1710 lock_class->lc_name); 1711 1712 mtx_lock_spin(&w_mtx); 1713 w = witness_hash_get(description); 1714 if (w) 1715 goto found; 1716 if ((w = witness_get()) == NULL) 1717 return (NULL); 1718 MPASS(strlen(description) < MAX_W_NAME); 1719 strcpy(w->w_name, description); 1720 w->w_class = lock_class; 1721 w->w_refcount = 1; 1722 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1723 if (lock_class->lc_flags & LC_SPINLOCK) { 1724 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1725 w_spin_cnt++; 1726 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1727 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1728 w_sleep_cnt++; 1729 } 1730 1731 /* Insert new witness into the hash */ 1732 witness_hash_put(w); 1733 witness_increment_graph_generation(); 1734 mtx_unlock_spin(&w_mtx); 1735 return (w); 1736found: 1737 w->w_refcount++; 1738 mtx_unlock_spin(&w_mtx); 1739 if (lock_class != w->w_class) 1740 panic( 1741 "lock (%s) %s does not match earlier (%s) lock", 1742 description, lock_class->lc_name, 1743 w->w_class->lc_name); 1744 return (w); 1745} 1746 1747static void 1748depart(struct witness *w) 1749{ 1750 struct witness_list *list; 1751 1752 MPASS(w->w_refcount == 0); 1753 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1754 list = &w_sleep; 1755 w_sleep_cnt--; 1756 } else { 1757 list = &w_spin; 1758 w_spin_cnt--; 1759 } 1760 /* 1761 * Set file to NULL as it may point into a loadable module. 1762 */ 1763 w->w_file = NULL; 1764 w->w_line = 0; 1765 witness_increment_graph_generation(); 1766} 1767 1768 1769static void 1770adopt(struct witness *parent, struct witness *child) 1771{ 1772 int pi, ci, i, j; 1773 1774 if (witness_cold == 0) 1775 mtx_assert(&w_mtx, MA_OWNED); 1776 1777 /* If the relationship is already known, there's no work to be done. */ 1778 if (isitmychild(parent, child)) 1779 return; 1780 1781 /* When the structure of the graph changes, bump up the generation. */ 1782 witness_increment_graph_generation(); 1783 1784 /* 1785 * The hard part ... create the direct relationship, then propagate all 1786 * indirect relationships. 1787 */ 1788 pi = parent->w_index; 1789 ci = child->w_index; 1790 WITNESS_INDEX_ASSERT(pi); 1791 WITNESS_INDEX_ASSERT(ci); 1792 MPASS(pi != ci); 1793 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1794 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1795 1796 /* 1797 * If parent was not already an ancestor of child, 1798 * then we increment the descendant and ancestor counters. 1799 */ 1800 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1801 parent->w_num_descendants++; 1802 child->w_num_ancestors++; 1803 } 1804 1805 /* 1806 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1807 * an ancestor of 'pi' during this loop. 1808 */ 1809 for (i = 1; i <= w_max_used_index; i++) { 1810 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1811 (i != pi)) 1812 continue; 1813 1814 /* Find each descendant of 'i' and mark it as a descendant. */ 1815 for (j = 1; j <= w_max_used_index; j++) { 1816 1817 /* 1818 * Skip children that are already marked as 1819 * descendants of 'i'. 1820 */ 1821 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1822 continue; 1823 1824 /* 1825 * We are only interested in descendants of 'ci'. Note 1826 * that 'ci' itself is counted as a descendant of 'ci'. 1827 */ 1828 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1829 (j != ci)) 1830 continue; 1831 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1832 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1833 w_data[i].w_num_descendants++; 1834 w_data[j].w_num_ancestors++; 1835 1836 /* 1837 * Make sure we aren't marking a node as both an 1838 * ancestor and descendant. We should have caught 1839 * this as a lock order reversal earlier. 1840 */ 1841 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1842 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1843 printf("witness rmatrix paradox! [%d][%d]=%d " 1844 "both ancestor and descendant\n", 1845 i, j, w_rmatrix[i][j]); 1846 kdb_backtrace(); 1847 printf("Witness disabled.\n"); 1848 witness_watch = -1; 1849 } 1850 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1851 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1852 printf("witness rmatrix paradox! [%d][%d]=%d " 1853 "both ancestor and descendant\n", 1854 j, i, w_rmatrix[j][i]); 1855 kdb_backtrace(); 1856 printf("Witness disabled.\n"); 1857 witness_watch = -1; 1858 } 1859 } 1860 } 1861} 1862 1863static void 1864itismychild(struct witness *parent, struct witness *child) 1865{ 1866 1867 MPASS(child != NULL && parent != NULL); 1868 if (witness_cold == 0) 1869 mtx_assert(&w_mtx, MA_OWNED); 1870 1871 if (!witness_lock_type_equal(parent, child)) { 1872 if (witness_cold == 0) 1873 mtx_unlock_spin(&w_mtx); 1874 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1875 "the same lock type", __func__, parent->w_name, 1876 parent->w_class->lc_name, child->w_name, 1877 child->w_class->lc_name); 1878 } 1879 adopt(parent, child); 1880} 1881 1882/* 1883 * Generic code for the isitmy*() functions. The rmask parameter is the 1884 * expected relationship of w1 to w2. 1885 */ 1886static int 1887_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1888{ 1889 unsigned char r1, r2; 1890 int i1, i2; 1891 1892 i1 = w1->w_index; 1893 i2 = w2->w_index; 1894 WITNESS_INDEX_ASSERT(i1); 1895 WITNESS_INDEX_ASSERT(i2); 1896 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1897 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1898 1899 /* The flags on one better be the inverse of the flags on the other */ 1900 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1901 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1902 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1903 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1904 "w_rmatrix[%d][%d] == %hhx\n", 1905 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1906 i2, i1, r2); 1907 kdb_backtrace(); 1908 printf("Witness disabled.\n"); 1909 witness_watch = -1; 1910 } 1911 return (r1 & rmask); 1912} 1913 1914/* 1915 * Checks if @child is a direct child of @parent. 1916 */ 1917static int 1918isitmychild(struct witness *parent, struct witness *child) 1919{ 1920 1921 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1922} 1923 1924/* 1925 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1926 */ 1927static int 1928isitmydescendant(struct witness *ancestor, struct witness *descendant) 1929{ 1930 1931 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1932 __func__)); 1933} 1934 1935#ifdef BLESSING 1936static int 1937blessed(struct witness *w1, struct witness *w2) 1938{ 1939 int i; 1940 struct witness_blessed *b; 1941 1942 for (i = 0; i < blessed_count; i++) { 1943 b = &blessed_list[i]; 1944 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1945 if (strcmp(w2->w_name, b->b_lock2) == 0) 1946 return (1); 1947 continue; 1948 } 1949 if (strcmp(w1->w_name, b->b_lock2) == 0) 1950 if (strcmp(w2->w_name, b->b_lock1) == 0) 1951 return (1); 1952 } 1953 return (0); 1954} 1955#endif 1956 1957static struct witness * 1958witness_get(void) 1959{ 1960 struct witness *w; 1961 int index; 1962 1963 if (witness_cold == 0) 1964 mtx_assert(&w_mtx, MA_OWNED); 1965 1966 if (witness_watch == -1) { 1967 mtx_unlock_spin(&w_mtx); 1968 return (NULL); 1969 } 1970 if (STAILQ_EMPTY(&w_free)) { 1971 witness_watch = -1; 1972 mtx_unlock_spin(&w_mtx); 1973 printf("WITNESS: unable to allocate a new witness object\n"); 1974 return (NULL); 1975 } 1976 w = STAILQ_FIRST(&w_free); 1977 STAILQ_REMOVE_HEAD(&w_free, w_list); 1978 w_free_cnt--; 1979 index = w->w_index; 1980 MPASS(index > 0 && index == w_max_used_index+1 && 1981 index < WITNESS_COUNT); 1982 bzero(w, sizeof(*w)); 1983 w->w_index = index; 1984 if (index > w_max_used_index) 1985 w_max_used_index = index; 1986 return (w); 1987} 1988 1989static void 1990witness_free(struct witness *w) 1991{ 1992 1993 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1994 w_free_cnt++; 1995} 1996 1997static struct lock_list_entry * 1998witness_lock_list_get(void) 1999{ 2000 struct lock_list_entry *lle; 2001 2002 if (witness_watch == -1) 2003 return (NULL); 2004 mtx_lock_spin(&w_mtx); 2005 lle = w_lock_list_free; 2006 if (lle == NULL) { 2007 witness_watch = -1; 2008 mtx_unlock_spin(&w_mtx); 2009 printf("%s: witness exhausted\n", __func__); 2010 return (NULL); 2011 } 2012 w_lock_list_free = lle->ll_next; 2013 mtx_unlock_spin(&w_mtx); 2014 bzero(lle, sizeof(*lle)); 2015 return (lle); 2016} 2017 2018static void 2019witness_lock_list_free(struct lock_list_entry *lle) 2020{ 2021 2022 mtx_lock_spin(&w_mtx); 2023 lle->ll_next = w_lock_list_free; 2024 w_lock_list_free = lle; 2025 mtx_unlock_spin(&w_mtx); 2026} 2027 2028static struct lock_instance * 2029find_instance(struct lock_list_entry *list, struct lock_object *lock) 2030{ 2031 struct lock_list_entry *lle; 2032 struct lock_instance *instance; 2033 int i; 2034 2035 for (lle = list; lle != NULL; lle = lle->ll_next) 2036 for (i = lle->ll_count - 1; i >= 0; i--) { 2037 instance = &lle->ll_children[i]; 2038 if (instance->li_lock == lock) 2039 return (instance); 2040 } 2041 return (NULL); 2042} 2043 2044static void 2045witness_list_lock(struct lock_instance *instance) 2046{ 2047 struct lock_object *lock; 2048 2049 lock = instance->li_lock; 2050 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2051 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2052 if (lock->lo_witness->w_name != lock->lo_name) 2053 printf(" (%s)", lock->lo_witness->w_name); 2054 printf(" r = %d (%p) locked @ %s:%d\n", 2055 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 2056 instance->li_line); 2057} 2058 2059#ifdef DDB 2060static int 2061witness_thread_has_locks(struct thread *td) 2062{ 2063 2064 if (td->td_sleeplocks == NULL) 2065 return (0); 2066 return (td->td_sleeplocks->ll_count != 0); 2067} 2068 2069static int 2070witness_proc_has_locks(struct proc *p) 2071{ 2072 struct thread *td; 2073 2074 FOREACH_THREAD_IN_PROC(p, td) { 2075 if (witness_thread_has_locks(td)) 2076 return (1); 2077 } 2078 return (0); 2079} 2080#endif 2081 2082int 2083witness_list_locks(struct lock_list_entry **lock_list) 2084{ 2085 struct lock_list_entry *lle; 2086 int i, nheld; 2087 2088 nheld = 0; 2089 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2090 for (i = lle->ll_count - 1; i >= 0; i--) { 2091 witness_list_lock(&lle->ll_children[i]); 2092 nheld++; 2093 } 2094 return (nheld); 2095} 2096 2097/* 2098 * This is a bit risky at best. We call this function when we have timed 2099 * out acquiring a spin lock, and we assume that the other CPU is stuck 2100 * with this lock held. So, we go groveling around in the other CPU's 2101 * per-cpu data to try to find the lock instance for this spin lock to 2102 * see when it was last acquired. 2103 */ 2104void 2105witness_display_spinlock(struct lock_object *lock, struct thread *owner) 2106{ 2107 struct lock_instance *instance; 2108 struct pcpu *pc; 2109 2110 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2111 return; 2112 pc = pcpu_find(owner->td_oncpu); 2113 instance = find_instance(pc->pc_spinlocks, lock); 2114 if (instance != NULL) 2115 witness_list_lock(instance); 2116} 2117 2118void 2119witness_save(struct lock_object *lock, const char **filep, int *linep) 2120{ 2121 struct lock_list_entry *lock_list; 2122 struct lock_instance *instance; 2123 struct lock_class *class; 2124 2125 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2126 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2127 return; 2128 class = LOCK_CLASS(lock); 2129 if (class->lc_flags & LC_SLEEPLOCK) 2130 lock_list = curthread->td_sleeplocks; 2131 else { 2132 if (witness_skipspin) 2133 return; 2134 lock_list = PCPU_GET(spinlocks); 2135 } 2136 instance = find_instance(lock_list, lock); 2137 if (instance == NULL) 2138 panic("%s: lock (%s) %s not locked", __func__, 2139 class->lc_name, lock->lo_name); 2140 *filep = instance->li_file; 2141 *linep = instance->li_line; 2142} 2143 2144void 2145witness_restore(struct lock_object *lock, const char *file, int line) 2146{ 2147 struct lock_list_entry *lock_list; 2148 struct lock_instance *instance; 2149 struct lock_class *class; 2150 2151 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2152 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2153 return; 2154 class = LOCK_CLASS(lock); 2155 if (class->lc_flags & LC_SLEEPLOCK) 2156 lock_list = curthread->td_sleeplocks; 2157 else { 2158 if (witness_skipspin) 2159 return; 2160 lock_list = PCPU_GET(spinlocks); 2161 } 2162 instance = find_instance(lock_list, lock); 2163 if (instance == NULL) 2164 panic("%s: lock (%s) %s not locked", __func__, 2165 class->lc_name, lock->lo_name); 2166 lock->lo_witness->w_file = file; 2167 lock->lo_witness->w_line = line; 2168 instance->li_file = file; 2169 instance->li_line = line; 2170} 2171 2172void 2173witness_assert(struct lock_object *lock, int flags, const char *file, int line) 2174{ 2175#ifdef INVARIANT_SUPPORT 2176 struct lock_instance *instance; 2177 struct lock_class *class; 2178 2179 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2180 return; 2181 class = LOCK_CLASS(lock); 2182 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2183 instance = find_instance(curthread->td_sleeplocks, lock); 2184 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2185 instance = find_instance(PCPU_GET(spinlocks), lock); 2186 else { 2187 panic("Lock (%s) %s is not sleep or spin!", 2188 class->lc_name, lock->lo_name); 2189 } 2190 file = fixup_filename(file); 2191 switch (flags) { 2192 case LA_UNLOCKED: 2193 if (instance != NULL) 2194 panic("Lock (%s) %s locked @ %s:%d.", 2195 class->lc_name, lock->lo_name, file, line); 2196 break; 2197 case LA_LOCKED: 2198 case LA_LOCKED | LA_RECURSED: 2199 case LA_LOCKED | LA_NOTRECURSED: 2200 case LA_SLOCKED: 2201 case LA_SLOCKED | LA_RECURSED: 2202 case LA_SLOCKED | LA_NOTRECURSED: 2203 case LA_XLOCKED: 2204 case LA_XLOCKED | LA_RECURSED: 2205 case LA_XLOCKED | LA_NOTRECURSED: 2206 if (instance == NULL) { 2207 panic("Lock (%s) %s not locked @ %s:%d.", 2208 class->lc_name, lock->lo_name, file, line); 2209 break; 2210 } 2211 if ((flags & LA_XLOCKED) != 0 && 2212 (instance->li_flags & LI_EXCLUSIVE) == 0) 2213 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2214 class->lc_name, lock->lo_name, file, line); 2215 if ((flags & LA_SLOCKED) != 0 && 2216 (instance->li_flags & LI_EXCLUSIVE) != 0) 2217 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2218 class->lc_name, lock->lo_name, file, line); 2219 if ((flags & LA_RECURSED) != 0 && 2220 (instance->li_flags & LI_RECURSEMASK) == 0) 2221 panic("Lock (%s) %s not recursed @ %s:%d.", 2222 class->lc_name, lock->lo_name, file, line); 2223 if ((flags & LA_NOTRECURSED) != 0 && 2224 (instance->li_flags & LI_RECURSEMASK) != 0) 2225 panic("Lock (%s) %s recursed @ %s:%d.", 2226 class->lc_name, lock->lo_name, file, line); 2227 break; 2228 default: 2229 panic("Invalid lock assertion at %s:%d.", file, line); 2230 2231 } 2232#endif /* INVARIANT_SUPPORT */ 2233} 2234 2235#ifdef DDB 2236static void 2237witness_ddb_list(struct thread *td) 2238{ 2239 2240 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2241 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2242 2243 if (witness_watch < 1) 2244 return; 2245 2246 witness_list_locks(&td->td_sleeplocks); 2247 2248 /* 2249 * We only handle spinlocks if td == curthread. This is somewhat broken 2250 * if td is currently executing on some other CPU and holds spin locks 2251 * as we won't display those locks. If we had a MI way of getting 2252 * the per-cpu data for a given cpu then we could use 2253 * td->td_oncpu to get the list of spinlocks for this thread 2254 * and "fix" this. 2255 * 2256 * That still wouldn't really fix this unless we locked the scheduler 2257 * lock or stopped the other CPU to make sure it wasn't changing the 2258 * list out from under us. It is probably best to just not try to 2259 * handle threads on other CPU's for now. 2260 */ 2261 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2262 witness_list_locks(PCPU_PTR(spinlocks)); 2263} 2264 2265DB_SHOW_COMMAND(locks, db_witness_list) 2266{ 2267 struct thread *td; 2268 2269 if (have_addr) 2270 td = db_lookup_thread(addr, TRUE); 2271 else 2272 td = kdb_thread; 2273 witness_ddb_list(td); 2274} 2275 2276DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2277{ 2278 struct thread *td; 2279 struct proc *p; 2280 2281 /* 2282 * It would be nice to list only threads and processes that actually 2283 * held sleep locks, but that information is currently not exported 2284 * by WITNESS. 2285 */ 2286 FOREACH_PROC_IN_SYSTEM(p) { 2287 if (!witness_proc_has_locks(p)) 2288 continue; 2289 FOREACH_THREAD_IN_PROC(p, td) { 2290 if (!witness_thread_has_locks(td)) 2291 continue; 2292 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2293 p->p_comm, td, td->td_tid); 2294 witness_ddb_list(td); 2295 } 2296 } 2297} 2298DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2299 2300DB_SHOW_COMMAND(witness, db_witness_display) 2301{ 2302 2303 witness_ddb_display(db_printf); 2304} 2305#endif 2306 2307static int 2308sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2309{ 2310 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2311 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2312 struct sbuf *sb; 2313 u_int w_rmatrix1, w_rmatrix2; 2314 int error, generation, i, j; 2315 2316 tmp_data1 = NULL; 2317 tmp_data2 = NULL; 2318 tmp_w1 = NULL; 2319 tmp_w2 = NULL; 2320 if (witness_watch < 1) { 2321 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2322 return (error); 2323 } 2324 if (witness_cold) { 2325 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2326 return (error); 2327 } 2328 error = 0; 2329 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2330 if (sb == NULL) 2331 return (ENOMEM); 2332 2333 /* Allocate and init temporary storage space. */ 2334 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2335 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2336 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2337 M_WAITOK | M_ZERO); 2338 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2339 M_WAITOK | M_ZERO); 2340 stack_zero(&tmp_data1->wlod_stack); 2341 stack_zero(&tmp_data2->wlod_stack); 2342 2343restart: 2344 mtx_lock_spin(&w_mtx); 2345 generation = w_generation; 2346 mtx_unlock_spin(&w_mtx); 2347 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2348 w_lohash.wloh_count); 2349 for (i = 1; i < w_max_used_index; i++) { 2350 mtx_lock_spin(&w_mtx); 2351 if (generation != w_generation) { 2352 mtx_unlock_spin(&w_mtx); 2353 2354 /* The graph has changed, try again. */ 2355 req->oldidx = 0; 2356 sbuf_clear(sb); 2357 goto restart; 2358 } 2359 2360 w1 = &w_data[i]; 2361 if (w1->w_reversed == 0) { 2362 mtx_unlock_spin(&w_mtx); 2363 continue; 2364 } 2365 2366 /* Copy w1 locally so we can release the spin lock. */ 2367 *tmp_w1 = *w1; 2368 mtx_unlock_spin(&w_mtx); 2369 2370 if (tmp_w1->w_reversed == 0) 2371 continue; 2372 for (j = 1; j < w_max_used_index; j++) { 2373 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2374 continue; 2375 2376 mtx_lock_spin(&w_mtx); 2377 if (generation != w_generation) { 2378 mtx_unlock_spin(&w_mtx); 2379 2380 /* The graph has changed, try again. */ 2381 req->oldidx = 0; 2382 sbuf_clear(sb); 2383 goto restart; 2384 } 2385 2386 w2 = &w_data[j]; 2387 data1 = witness_lock_order_get(w1, w2); 2388 data2 = witness_lock_order_get(w2, w1); 2389 2390 /* 2391 * Copy information locally so we can release the 2392 * spin lock. 2393 */ 2394 *tmp_w2 = *w2; 2395 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2396 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2397 2398 if (data1) { 2399 stack_zero(&tmp_data1->wlod_stack); 2400 stack_copy(&data1->wlod_stack, 2401 &tmp_data1->wlod_stack); 2402 } 2403 if (data2 && data2 != data1) { 2404 stack_zero(&tmp_data2->wlod_stack); 2405 stack_copy(&data2->wlod_stack, 2406 &tmp_data2->wlod_stack); 2407 } 2408 mtx_unlock_spin(&w_mtx); 2409 2410 sbuf_printf(sb, 2411 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2412 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2413 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2414#if 0 2415 sbuf_printf(sb, 2416 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2417 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2418 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2419#endif 2420 if (data1) { 2421 sbuf_printf(sb, 2422 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2423 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2424 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2425 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2426 sbuf_printf(sb, "\n"); 2427 } 2428 if (data2 && data2 != data1) { 2429 sbuf_printf(sb, 2430 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2431 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2432 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2433 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2434 sbuf_printf(sb, "\n"); 2435 } 2436 } 2437 } 2438 mtx_lock_spin(&w_mtx); 2439 if (generation != w_generation) { 2440 mtx_unlock_spin(&w_mtx); 2441 2442 /* 2443 * The graph changed while we were printing stack data, 2444 * try again. 2445 */ 2446 req->oldidx = 0; 2447 sbuf_clear(sb); 2448 goto restart; 2449 } 2450 mtx_unlock_spin(&w_mtx); 2451 2452 /* Free temporary storage space. */ 2453 free(tmp_data1, M_TEMP); 2454 free(tmp_data2, M_TEMP); 2455 free(tmp_w1, M_TEMP); 2456 free(tmp_w2, M_TEMP); 2457 2458 sbuf_finish(sb); 2459 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2460 sbuf_delete(sb); 2461 2462 return (error); 2463} 2464 2465static int 2466sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2467{ 2468 struct witness *w; 2469 struct sbuf *sb; 2470 int error; 2471 2472 if (witness_watch < 1) { 2473 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2474 return (error); 2475 } 2476 if (witness_cold) { 2477 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2478 return (error); 2479 } 2480 error = 0; 2481 sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN); 2482 if (sb == NULL) 2483 return (ENOMEM); 2484 sbuf_printf(sb, "\n"); 2485 2486 mtx_lock_spin(&w_mtx); 2487 STAILQ_FOREACH(w, &w_all, w_list) 2488 w->w_displayed = 0; 2489 STAILQ_FOREACH(w, &w_all, w_list) 2490 witness_add_fullgraph(sb, w); 2491 mtx_unlock_spin(&w_mtx); 2492 2493 /* 2494 * While using SBUF_FIXEDLEN, check if the sbuf overflowed. 2495 */ 2496 if (sbuf_overflowed(sb)) { 2497 sbuf_delete(sb); 2498 panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n", 2499 __func__); 2500 } 2501 2502 /* 2503 * Close the sbuf and return to userland. 2504 */ 2505 sbuf_finish(sb); 2506 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2507 sbuf_delete(sb); 2508 2509 return (error); 2510} 2511 2512static int 2513sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2514{ 2515 int error, value; 2516 2517 value = witness_watch; 2518 error = sysctl_handle_int(oidp, &value, 0, req); 2519 if (error != 0 || req->newptr == NULL) 2520 return (error); 2521 if (value > 1 || value < -1 || 2522 (witness_watch == -1 && value != witness_watch)) 2523 return (EINVAL); 2524 witness_watch = value; 2525 return (0); 2526} 2527 2528static void 2529witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2530{ 2531 int i; 2532 2533 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2534 return; 2535 w->w_displayed = 1; 2536 2537 WITNESS_INDEX_ASSERT(w->w_index); 2538 for (i = 1; i <= w_max_used_index; i++) { 2539 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2540 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2541 w_data[i].w_name); 2542 witness_add_fullgraph(sb, &w_data[i]); 2543 } 2544 } 2545} 2546 2547/* 2548 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2549 * interprets the key as a string and reads until the null 2550 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2551 * hash value computed from the key. 2552 */ 2553static uint32_t 2554witness_hash_djb2(const uint8_t *key, uint32_t size) 2555{ 2556 unsigned int hash = 5381; 2557 int i; 2558 2559 /* hash = hash * 33 + key[i] */ 2560 if (size) 2561 for (i = 0; i < size; i++) 2562 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2563 else 2564 for (i = 0; key[i] != 0; i++) 2565 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2566 2567 return (hash); 2568} 2569 2570 2571/* 2572 * Initializes the two witness hash tables. Called exactly once from 2573 * witness_initialize(). 2574 */ 2575static void 2576witness_init_hash_tables(void) 2577{ 2578 int i; 2579 2580 MPASS(witness_cold); 2581 2582 /* Initialize the hash tables. */ 2583 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2584 w_hash.wh_array[i] = NULL; 2585 2586 w_hash.wh_size = WITNESS_HASH_SIZE; 2587 w_hash.wh_count = 0; 2588 2589 /* Initialize the lock order data hash. */ 2590 w_lofree = NULL; 2591 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2592 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2593 w_lodata[i].wlod_next = w_lofree; 2594 w_lofree = &w_lodata[i]; 2595 } 2596 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2597 w_lohash.wloh_count = 0; 2598 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2599 w_lohash.wloh_array[i] = NULL; 2600} 2601 2602static struct witness * 2603witness_hash_get(const char *key) 2604{ 2605 struct witness *w; 2606 uint32_t hash; 2607 2608 MPASS(key != NULL); 2609 if (witness_cold == 0) 2610 mtx_assert(&w_mtx, MA_OWNED); 2611 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2612 w = w_hash.wh_array[hash]; 2613 while (w != NULL) { 2614 if (strcmp(w->w_name, key) == 0) 2615 goto out; 2616 w = w->w_hash_next; 2617 } 2618 2619out: 2620 return (w); 2621} 2622 2623static void 2624witness_hash_put(struct witness *w) 2625{ 2626 uint32_t hash; 2627 2628 MPASS(w != NULL); 2629 MPASS(w->w_name != NULL); 2630 if (witness_cold == 0) 2631 mtx_assert(&w_mtx, MA_OWNED); 2632 KASSERT(witness_hash_get(w->w_name) == NULL, 2633 ("%s: trying to add a hash entry that already exists!", __func__)); 2634 KASSERT(w->w_hash_next == NULL, 2635 ("%s: w->w_hash_next != NULL", __func__)); 2636 2637 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2638 w->w_hash_next = w_hash.wh_array[hash]; 2639 w_hash.wh_array[hash] = w; 2640 w_hash.wh_count++; 2641} 2642 2643 2644static struct witness_lock_order_data * 2645witness_lock_order_get(struct witness *parent, struct witness *child) 2646{ 2647 struct witness_lock_order_data *data = NULL; 2648 struct witness_lock_order_key key; 2649 unsigned int hash; 2650 2651 MPASS(parent != NULL && child != NULL); 2652 key.from = parent->w_index; 2653 key.to = child->w_index; 2654 WITNESS_INDEX_ASSERT(key.from); 2655 WITNESS_INDEX_ASSERT(key.to); 2656 if ((w_rmatrix[parent->w_index][child->w_index] 2657 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2658 goto out; 2659 2660 hash = witness_hash_djb2((const char*)&key, 2661 sizeof(key)) % w_lohash.wloh_size; 2662 data = w_lohash.wloh_array[hash]; 2663 while (data != NULL) { 2664 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2665 break; 2666 data = data->wlod_next; 2667 } 2668 2669out: 2670 return (data); 2671} 2672 2673/* 2674 * Verify that parent and child have a known relationship, are not the same, 2675 * and child is actually a child of parent. This is done without w_mtx 2676 * to avoid contention in the common case. 2677 */ 2678static int 2679witness_lock_order_check(struct witness *parent, struct witness *child) 2680{ 2681 2682 if (parent != child && 2683 w_rmatrix[parent->w_index][child->w_index] 2684 & WITNESS_LOCK_ORDER_KNOWN && 2685 isitmychild(parent, child)) 2686 return (1); 2687 2688 return (0); 2689} 2690 2691static int 2692witness_lock_order_add(struct witness *parent, struct witness *child) 2693{ 2694 struct witness_lock_order_data *data = NULL; 2695 struct witness_lock_order_key key; 2696 unsigned int hash; 2697 2698 MPASS(parent != NULL && child != NULL); 2699 key.from = parent->w_index; 2700 key.to = child->w_index; 2701 WITNESS_INDEX_ASSERT(key.from); 2702 WITNESS_INDEX_ASSERT(key.to); 2703 if (w_rmatrix[parent->w_index][child->w_index] 2704 & WITNESS_LOCK_ORDER_KNOWN) 2705 return (1); 2706 2707 hash = witness_hash_djb2((const char*)&key, 2708 sizeof(key)) % w_lohash.wloh_size; 2709 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2710 data = w_lofree; 2711 if (data == NULL) 2712 return (0); 2713 w_lofree = data->wlod_next; 2714 data->wlod_next = w_lohash.wloh_array[hash]; 2715 data->wlod_key = key; 2716 w_lohash.wloh_array[hash] = data; 2717 w_lohash.wloh_count++; 2718 stack_zero(&data->wlod_stack); 2719 stack_save(&data->wlod_stack); 2720 return (1); 2721} 2722 2723/* Call this whenver the structure of the witness graph changes. */ 2724static void 2725witness_increment_graph_generation(void) 2726{ 2727 2728 if (witness_cold == 0) 2729 mtx_assert(&w_mtx, MA_OWNED); 2730 w_generation++; 2731} 2732 2733#ifdef KDB 2734static void 2735_witness_debugger(int cond, const char *msg) 2736{ 2737 2738 if (witness_trace && cond) 2739 kdb_backtrace(); 2740 if (witness_kdb && cond) 2741 kdb_enter(KDB_WHY_WITNESS, msg); 2742} 2743#endif 2744