subr_witness.c revision 183054
1/*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35/* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41/* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61/* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87#include <sys/cdefs.h> 88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 183054 2008-09-15 22:45:14Z sam $"); 89 90#include "opt_ddb.h" 91#include "opt_hwpmc_hooks.h" 92#include "opt_stack.h" 93#include "opt_witness.h" 94 95#include <sys/param.h> 96#include <sys/bus.h> 97#include <sys/kdb.h> 98#include <sys/kernel.h> 99#include <sys/ktr.h> 100#include <sys/lock.h> 101#include <sys/malloc.h> 102#include <sys/mutex.h> 103#include <sys/priv.h> 104#include <sys/proc.h> 105#include <sys/sbuf.h> 106#include <sys/stack.h> 107#include <sys/sysctl.h> 108#include <sys/systm.h> 109 110#ifdef DDB 111#include <ddb/ddb.h> 112#endif 113 114#include <machine/stdarg.h> 115 116#if !defined(DDB) && !defined(STACK) 117#error "DDB or STACK options are required for WITNESS" 118#endif 119 120/* Note that these traces do not work with KTR_ALQ. */ 121#if 0 122#define KTR_WITNESS KTR_SUBSYS 123#else 124#define KTR_WITNESS 0 125#endif 126 127#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 128#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 129 130/* Define this to check for blessed mutexes */ 131#undef BLESSING 132 133#define WITNESS_COUNT 1024 134#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 135#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 136#define WITNESS_PENDLIST 512 137 138/* Allocate 256 KB of stack data space */ 139#define WITNESS_LO_DATA_COUNT 2048 140 141/* Prime, gives load factor of ~2 at full load */ 142#define WITNESS_LO_HASH_SIZE 1021 143 144/* 145 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 146 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 147 * probably be safe for the most part, but it's still a SWAG. 148 */ 149#define LOCK_NCHILDREN 5 150#define LOCK_CHILDCOUNT 2048 151 152#define MAX_W_NAME 64 153 154#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 155#define CYCLEGRAPH_SBUF_SIZE 8192 156#define FULLGRAPH_SBUF_SIZE 32768 157 158/* 159 * These flags go in the witness relationship matrix and describe the 160 * relationship between any two struct witness objects. 161 */ 162#define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 163#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 164#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 165#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 166#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 167#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 168#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 169#define WITNESS_RELATED_MASK \ 170 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 171#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 172 * observed. */ 173#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 174#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 175#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 176 177/* Descendant to ancestor flags */ 178#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 179 180/* Ancestor to descendant flags */ 181#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 182 183#define WITNESS_INDEX_ASSERT(i) \ 184 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 185 186MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 187 188/* 189 * Lock instances. A lock instance is the data associated with a lock while 190 * it is held by witness. For example, a lock instance will hold the 191 * recursion count of a lock. Lock instances are held in lists. Spin locks 192 * are held in a per-cpu list while sleep locks are held in per-thread list. 193 */ 194struct lock_instance { 195 struct lock_object *li_lock; 196 const char *li_file; 197 int li_line; 198 u_int li_flags; 199}; 200 201/* 202 * A simple list type used to build the list of locks held by a thread 203 * or CPU. We can't simply embed the list in struct lock_object since a 204 * lock may be held by more than one thread if it is a shared lock. Locks 205 * are added to the head of the list, so we fill up each list entry from 206 * "the back" logically. To ease some of the arithmetic, we actually fill 207 * in each list entry the normal way (children[0] then children[1], etc.) but 208 * when we traverse the list we read children[count-1] as the first entry 209 * down to children[0] as the final entry. 210 */ 211struct lock_list_entry { 212 struct lock_list_entry *ll_next; 213 struct lock_instance ll_children[LOCK_NCHILDREN]; 214 u_int ll_count; 215}; 216 217/* 218 * The main witness structure. One of these per named lock type in the system 219 * (for example, "vnode interlock"). 220 */ 221struct witness { 222 char w_name[MAX_W_NAME]; 223 uint32_t w_index; /* Index in the relationship matrix */ 224 struct lock_class *w_class; 225 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 226 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 227 struct witness *w_hash_next; /* Linked list in hash buckets. */ 228 const char *w_file; /* File where last acquired */ 229 uint32_t w_line; /* Line where last acquired */ 230 uint32_t w_refcount; 231 uint16_t w_num_ancestors; /* direct/indirect 232 * ancestor count */ 233 uint16_t w_num_descendants; /* direct/indirect 234 * descendant count */ 235 int16_t w_ddb_level; 236 int w_displayed:1; 237 int w_reversed:1; 238}; 239 240STAILQ_HEAD(witness_list, witness); 241 242/* 243 * The witness hash table. Keys are witness names (const char *), elements are 244 * witness objects (struct witness *). 245 */ 246struct witness_hash { 247 struct witness *wh_array[WITNESS_HASH_SIZE]; 248 uint32_t wh_size; 249 uint32_t wh_count; 250}; 251 252/* 253 * Key type for the lock order data hash table. 254 */ 255struct witness_lock_order_key { 256 uint16_t from; 257 uint16_t to; 258}; 259 260struct witness_lock_order_data { 261 struct stack wlod_stack; 262 struct witness_lock_order_key wlod_key; 263 struct witness_lock_order_data *wlod_next; 264}; 265 266/* 267 * The witness lock order data hash table. Keys are witness index tuples 268 * (struct witness_lock_order_key), elements are lock order data objects 269 * (struct witness_lock_order_data). 270 */ 271struct witness_lock_order_hash { 272 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 273 u_int wloh_size; 274 u_int wloh_count; 275}; 276 277#ifdef BLESSING 278struct witness_blessed { 279 const char *b_lock1; 280 const char *b_lock2; 281}; 282#endif 283 284struct witness_pendhelp { 285 const char *wh_type; 286 struct lock_object *wh_lock; 287}; 288 289struct witness_order_list_entry { 290 const char *w_name; 291 struct lock_class *w_class; 292}; 293 294/* 295 * Returns 0 if one of the locks is a spin lock and the other is not. 296 * Returns 1 otherwise. 297 */ 298static __inline int 299witness_lock_type_equal(struct witness *w1, struct witness *w2) 300{ 301 302 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 303 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 304} 305 306static __inline int 307witness_lock_order_key_empty(const struct witness_lock_order_key *key) 308{ 309 310 return (key->from == 0 && key->to == 0); 311} 312 313static __inline int 314witness_lock_order_key_equal(const struct witness_lock_order_key *a, 315 const struct witness_lock_order_key *b) 316{ 317 318 return (a->from == b->from && a->to == b->to); 319} 320 321static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 322 const char *fname); 323#ifdef KDB 324static void _witness_debugger(int cond, const char *msg); 325#endif 326static void adopt(struct witness *parent, struct witness *child); 327#ifdef BLESSING 328static int blessed(struct witness *, struct witness *); 329#endif 330static void depart(struct witness *w); 331static struct witness *enroll(const char *description, 332 struct lock_class *lock_class); 333static struct lock_instance *find_instance(struct lock_list_entry *list, 334 struct lock_object *lock); 335static int isitmychild(struct witness *parent, struct witness *child); 336static int isitmydescendant(struct witness *parent, struct witness *child); 337static void itismychild(struct witness *parent, struct witness *child); 338static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 339static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 340static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 341static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 342#ifdef DDB 343static void witness_ddb_compute_levels(void); 344static void witness_ddb_display(void(*)(const char *fmt, ...)); 345static void witness_ddb_display_descendants(void(*)(const char *fmt, ...), 346 struct witness *, int indent); 347static void witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 348 struct witness_list *list); 349static void witness_ddb_level_descendants(struct witness *parent, int l); 350static void witness_ddb_list(struct thread *td); 351#endif 352static void witness_free(struct witness *m); 353static struct witness *witness_get(void); 354static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 355static struct witness *witness_hash_get(const char *key); 356static void witness_hash_put(struct witness *w); 357static void witness_init_hash_tables(void); 358static void witness_increment_graph_generation(void); 359static void witness_lock_list_free(struct lock_list_entry *lle); 360static struct lock_list_entry *witness_lock_list_get(void); 361static int witness_lock_order_add(struct witness *parent, 362 struct witness *child); 363static int witness_lock_order_check(struct witness *parent, 364 struct witness *child); 365static struct witness_lock_order_data *witness_lock_order_get( 366 struct witness *parent, 367 struct witness *child); 368static void witness_list_lock(struct lock_instance *instance); 369 370#ifdef KDB 371#define witness_debugger(c) _witness_debugger(c, __func__) 372#else 373#define witness_debugger(c) 374#endif 375 376SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking"); 377 378/* 379 * If set to 0, witness is disabled. Otherwise witness performs full lock order 380 * checking for all locks. At runtime, witness is allowed to be turned off. 381 * witness is not allowed be turned on once it is turned off, however. 382 */ 383static int witness_watch = 1; 384TUNABLE_INT("debug.witness.watch", &witness_watch); 385SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 386 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 387 388#ifdef KDB 389/* 390 * When KDB is enabled and witness_kdb is 1, it will cause the system 391 * to drop into kdebug() when: 392 * - a lock hierarchy violation occurs 393 * - locks are held when going to sleep. 394 */ 395#ifdef WITNESS_KDB 396int witness_kdb = 1; 397#else 398int witness_kdb = 0; 399#endif 400TUNABLE_INT("debug.witness.kdb", &witness_kdb); 401SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 402 403/* 404 * When KDB is enabled and witness_trace is 1, it will cause the system 405 * to print a stack trace: 406 * - a lock hierarchy violation occurs 407 * - locks are held when going to sleep. 408 */ 409int witness_trace = 1; 410TUNABLE_INT("debug.witness.trace", &witness_trace); 411SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 412#endif /* KDB */ 413 414#ifdef WITNESS_SKIPSPIN 415int witness_skipspin = 1; 416#else 417int witness_skipspin = 0; 418#endif 419TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 420SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 421 0, ""); 422 423/* 424 * Call this to print out the relations between locks. 425 */ 426SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 427 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 428 429/* 430 * Call this to print out the witness faulty stacks. 431 */ 432SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 433 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 434 435static struct mtx w_mtx; 436 437/* w_list */ 438static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 439static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 440 441/* w_typelist */ 442static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 443static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 444 445/* lock list */ 446static struct lock_list_entry *w_lock_list_free = NULL; 447static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 448static u_int pending_cnt; 449 450static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 451SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 452SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 453SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 454 ""); 455 456static struct witness *w_data; 457static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 458static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 459static struct witness_hash w_hash; /* The witness hash table. */ 460 461/* The lock order data hash */ 462static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 463static struct witness_lock_order_data *w_lofree = NULL; 464static struct witness_lock_order_hash w_lohash; 465static int w_max_used_index = 0; 466static unsigned int w_generation = 0; 467static const char *w_notrunning = "Witness not running\n"; 468static const char *w_stillcold = "Witness is still cold\n"; 469 470 471static struct witness_order_list_entry order_lists[] = { 472 /* 473 * sx locks 474 */ 475 { "proctree", &lock_class_sx }, 476 { "allproc", &lock_class_sx }, 477 { "allprison", &lock_class_sx }, 478 { NULL, NULL }, 479 /* 480 * Various mutexes 481 */ 482 { "Giant", &lock_class_mtx_sleep }, 483 { "pipe mutex", &lock_class_mtx_sleep }, 484 { "sigio lock", &lock_class_mtx_sleep }, 485 { "process group", &lock_class_mtx_sleep }, 486 { "process lock", &lock_class_mtx_sleep }, 487 { "session", &lock_class_mtx_sleep }, 488 { "uidinfo hash", &lock_class_rw }, 489#ifdef HWPMC_HOOKS 490 { "pmc-sleep", &lock_class_mtx_sleep }, 491#endif 492 { NULL, NULL }, 493 /* 494 * Sockets 495 */ 496 { "accept", &lock_class_mtx_sleep }, 497 { "so_snd", &lock_class_mtx_sleep }, 498 { "so_rcv", &lock_class_mtx_sleep }, 499 { "sellck", &lock_class_mtx_sleep }, 500 { NULL, NULL }, 501 /* 502 * Routing 503 */ 504 { "so_rcv", &lock_class_mtx_sleep }, 505 { "radix node head", &lock_class_mtx_sleep }, 506 { "rtentry", &lock_class_mtx_sleep }, 507 { "ifaddr", &lock_class_mtx_sleep }, 508 { NULL, NULL }, 509 /* 510 * Multicast - protocol locks before interface locks, after UDP locks. 511 */ 512 { "udpinp", &lock_class_rw }, 513 { "in_multi_mtx", &lock_class_mtx_sleep }, 514 { "igmp_mtx", &lock_class_mtx_sleep }, 515 { "if_addr_mtx", &lock_class_mtx_sleep }, 516 { NULL, NULL }, 517 /* 518 * UNIX Domain Sockets 519 */ 520 { "unp", &lock_class_mtx_sleep }, 521 { "so_snd", &lock_class_mtx_sleep }, 522 { NULL, NULL }, 523 /* 524 * UDP/IP 525 */ 526 { "udp", &lock_class_rw }, 527 { "udpinp", &lock_class_rw }, 528 { "so_snd", &lock_class_mtx_sleep }, 529 { NULL, NULL }, 530 /* 531 * TCP/IP 532 */ 533 { "tcp", &lock_class_rw }, 534 { "tcpinp", &lock_class_rw }, 535 { "so_snd", &lock_class_mtx_sleep }, 536 { NULL, NULL }, 537 /* 538 * SLIP 539 */ 540 { "slip_mtx", &lock_class_mtx_sleep }, 541 { "slip sc_mtx", &lock_class_mtx_sleep }, 542 { NULL, NULL }, 543 /* 544 * netatalk 545 */ 546 { "ddp_list_mtx", &lock_class_mtx_sleep }, 547 { "ddp_mtx", &lock_class_mtx_sleep }, 548 { NULL, NULL }, 549 /* 550 * BPF 551 */ 552 { "bpf global lock", &lock_class_mtx_sleep }, 553 { "bpf interface lock", &lock_class_mtx_sleep }, 554 { "bpf cdev lock", &lock_class_mtx_sleep }, 555 { NULL, NULL }, 556 /* 557 * NFS server 558 */ 559 { "nfsd_mtx", &lock_class_mtx_sleep }, 560 { "so_snd", &lock_class_mtx_sleep }, 561 { NULL, NULL }, 562 563 /* 564 * IEEE 802.11 565 */ 566 { "802.11 com lock", &lock_class_mtx_sleep}, 567 { NULL, NULL }, 568 /* 569 * Network drivers 570 */ 571 { "network driver", &lock_class_mtx_sleep}, 572 { NULL, NULL }, 573 574 /* 575 * Netgraph 576 */ 577 { "ng_node", &lock_class_mtx_sleep }, 578 { "ng_worklist", &lock_class_mtx_sleep }, 579 { NULL, NULL }, 580 /* 581 * CDEV 582 */ 583 { "system map", &lock_class_mtx_sleep }, 584 { "vm page queue mutex", &lock_class_mtx_sleep }, 585 { "vnode interlock", &lock_class_mtx_sleep }, 586 { "cdev", &lock_class_mtx_sleep }, 587 { NULL, NULL }, 588 /* 589 * kqueue/VFS interaction 590 */ 591 { "kqueue", &lock_class_mtx_sleep }, 592 { "struct mount mtx", &lock_class_mtx_sleep }, 593 { "vnode interlock", &lock_class_mtx_sleep }, 594 { NULL, NULL }, 595 /* 596 * spin locks 597 */ 598#ifdef SMP 599 { "ap boot", &lock_class_mtx_spin }, 600#endif 601 { "rm.mutex_mtx", &lock_class_mtx_spin }, 602 { "sio", &lock_class_mtx_spin }, 603 { "scrlock", &lock_class_mtx_spin }, 604#ifdef __i386__ 605 { "cy", &lock_class_mtx_spin }, 606#endif 607#ifdef __sparc64__ 608 { "pcib_mtx", &lock_class_mtx_spin }, 609 { "rtc_mtx", &lock_class_mtx_spin }, 610#endif 611 { "scc_hwmtx", &lock_class_mtx_spin }, 612 { "uart_hwmtx", &lock_class_mtx_spin }, 613 { "fast_taskqueue", &lock_class_mtx_spin }, 614 { "intr table", &lock_class_mtx_spin }, 615#ifdef HWPMC_HOOKS 616 { "pmc-per-proc", &lock_class_mtx_spin }, 617#endif 618 { "process slock", &lock_class_mtx_spin }, 619 { "sleepq chain", &lock_class_mtx_spin }, 620 { "umtx lock", &lock_class_mtx_spin }, 621 { "rm_spinlock", &lock_class_mtx_spin }, 622 { "turnstile chain", &lock_class_mtx_spin }, 623 { "turnstile lock", &lock_class_mtx_spin }, 624 { "sched lock", &lock_class_mtx_spin }, 625 { "td_contested", &lock_class_mtx_spin }, 626 { "callout", &lock_class_mtx_spin }, 627 { "entropy harvest mutex", &lock_class_mtx_spin }, 628 { "syscons video lock", &lock_class_mtx_spin }, 629 { "time lock", &lock_class_mtx_spin }, 630#ifdef SMP 631 { "smp rendezvous", &lock_class_mtx_spin }, 632#endif 633#ifdef __powerpc__ 634 { "tlb0", &lock_class_mtx_spin }, 635#endif 636 /* 637 * leaf locks 638 */ 639 { "intrcnt", &lock_class_mtx_spin }, 640 { "icu", &lock_class_mtx_spin }, 641#if defined(SMP) && defined(__sparc64__) 642 { "ipi", &lock_class_mtx_spin }, 643#endif 644#ifdef __i386__ 645 { "allpmaps", &lock_class_mtx_spin }, 646 { "descriptor tables", &lock_class_mtx_spin }, 647#endif 648 { "clk", &lock_class_mtx_spin }, 649 { "cpuset", &lock_class_mtx_spin }, 650 { "mprof lock", &lock_class_mtx_spin }, 651 { "zombie lock", &lock_class_mtx_spin }, 652 { "ALD Queue", &lock_class_mtx_spin }, 653#ifdef __ia64__ 654 { "MCA spin lock", &lock_class_mtx_spin }, 655#endif 656#if defined(__i386__) || defined(__amd64__) 657 { "pcicfg", &lock_class_mtx_spin }, 658 { "NDIS thread lock", &lock_class_mtx_spin }, 659#endif 660 { "tw_osl_io_lock", &lock_class_mtx_spin }, 661 { "tw_osl_q_lock", &lock_class_mtx_spin }, 662 { "tw_cl_io_lock", &lock_class_mtx_spin }, 663 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 664 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 665#ifdef HWPMC_HOOKS 666 { "pmc-leaf", &lock_class_mtx_spin }, 667#endif 668 { "blocked lock", &lock_class_mtx_spin }, 669 { NULL, NULL }, 670 { NULL, NULL } 671}; 672 673#ifdef BLESSING 674/* 675 * Pairs of locks which have been blessed 676 * Don't complain about order problems with blessed locks 677 */ 678static struct witness_blessed blessed_list[] = { 679}; 680static int blessed_count = 681 sizeof(blessed_list) / sizeof(struct witness_blessed); 682#endif 683 684/* 685 * This global is set to 0 once it becomes safe to use the witness code. 686 */ 687static int witness_cold = 1; 688 689/* 690 * This global is set to 1 once the static lock orders have been enrolled 691 * so that a warning can be issued for any spin locks enrolled later. 692 */ 693static int witness_spin_warn = 0; 694 695/* 696 * The WITNESS-enabled diagnostic code. Note that the witness code does 697 * assume that the early boot is single-threaded at least until after this 698 * routine is completed. 699 */ 700static void 701witness_initialize(void *dummy __unused) 702{ 703 struct lock_object *lock; 704 struct witness_order_list_entry *order; 705 struct witness *w, *w1; 706 int i; 707 708 MALLOC(w_data, struct witness *, 709 sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 710 M_NOWAIT | M_ZERO); 711 712 /* 713 * We have to release Giant before initializing its witness 714 * structure so that WITNESS doesn't get confused. 715 */ 716 mtx_unlock(&Giant); 717 mtx_assert(&Giant, MA_NOTOWNED); 718 719 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 720 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 721 MTX_NOWITNESS | MTX_NOPROFILE); 722 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 723 w = &w_data[i]; 724 memset(w, 0, sizeof(*w)); 725 w_data[i].w_index = i; /* Witness index never changes. */ 726 witness_free(w); 727 } 728 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 729 ("%s: Invalid list of free witness objects", __func__)); 730 731 /* Witness with index 0 is not used to aid in debugging. */ 732 STAILQ_REMOVE_HEAD(&w_free, w_list); 733 w_free_cnt--; 734 735 memset(w_rmatrix, 0, 736 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 737 738 for (i = 0; i < LOCK_CHILDCOUNT; i++) 739 witness_lock_list_free(&w_locklistdata[i]); 740 witness_init_hash_tables(); 741 742 /* First add in all the specified order lists. */ 743 for (order = order_lists; order->w_name != NULL; order++) { 744 w = enroll(order->w_name, order->w_class); 745 if (w == NULL) 746 continue; 747 w->w_file = "order list"; 748 for (order++; order->w_name != NULL; order++) { 749 w1 = enroll(order->w_name, order->w_class); 750 if (w1 == NULL) 751 continue; 752 w1->w_file = "order list"; 753 itismychild(w, w1); 754 w = w1; 755 } 756 } 757 witness_spin_warn = 1; 758 759 /* Iterate through all locks and add them to witness. */ 760 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 761 lock = pending_locks[i].wh_lock; 762 KASSERT(lock->lo_flags & LO_WITNESS, 763 ("%s: lock %s is on pending list but not LO_WITNESS", 764 __func__, lock->lo_name)); 765 lock->lo_witness = enroll(pending_locks[i].wh_type, 766 LOCK_CLASS(lock)); 767 } 768 769 /* Mark the witness code as being ready for use. */ 770 witness_cold = 0; 771 772 mtx_lock(&Giant); 773} 774SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 775 NULL); 776 777void 778witness_init(struct lock_object *lock, const char *type) 779{ 780 struct lock_class *class; 781 782 /* Various sanity checks. */ 783 class = LOCK_CLASS(lock); 784 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 785 (class->lc_flags & LC_RECURSABLE) == 0) 786 panic("%s: lock (%s) %s can not be recursable", __func__, 787 class->lc_name, lock->lo_name); 788 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 789 (class->lc_flags & LC_SLEEPABLE) == 0) 790 panic("%s: lock (%s) %s can not be sleepable", __func__, 791 class->lc_name, lock->lo_name); 792 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 793 (class->lc_flags & LC_UPGRADABLE) == 0) 794 panic("%s: lock (%s) %s can not be upgradable", __func__, 795 class->lc_name, lock->lo_name); 796 797 /* 798 * If we shouldn't watch this lock, then just clear lo_witness. 799 * Otherwise, if witness_cold is set, then it is too early to 800 * enroll this lock, so defer it to witness_initialize() by adding 801 * it to the pending_locks list. If it is not too early, then enroll 802 * the lock now. 803 */ 804 if (witness_watch < 1 || panicstr != NULL || 805 (lock->lo_flags & LO_WITNESS) == 0) 806 lock->lo_witness = NULL; 807 else if (witness_cold) { 808 pending_locks[pending_cnt].wh_lock = lock; 809 pending_locks[pending_cnt++].wh_type = type; 810 if (pending_cnt > WITNESS_PENDLIST) 811 panic("%s: pending locks list is too small, bump it\n", 812 __func__); 813 } else 814 lock->lo_witness = enroll(type, class); 815} 816 817void 818witness_destroy(struct lock_object *lock) 819{ 820 struct lock_class *class; 821 struct witness *w; 822 823 class = LOCK_CLASS(lock); 824 825 if (witness_cold) 826 panic("lock (%s) %s destroyed while witness_cold", 827 class->lc_name, lock->lo_name); 828 829 /* XXX: need to verify that no one holds the lock */ 830 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 831 return; 832 w = lock->lo_witness; 833 834 mtx_lock_spin(&w_mtx); 835 MPASS(w->w_refcount > 0); 836 w->w_refcount--; 837 838 if (w->w_refcount == 0) 839 depart(w); 840 mtx_unlock_spin(&w_mtx); 841} 842 843#ifdef DDB 844static void 845witness_ddb_compute_levels(void) 846{ 847 struct witness *w; 848 849 /* 850 * First clear all levels. 851 */ 852 STAILQ_FOREACH(w, &w_all, w_list) 853 w->w_ddb_level = -1; 854 855 /* 856 * Look for locks with no parents and level all their descendants. 857 */ 858 STAILQ_FOREACH(w, &w_all, w_list) { 859 860 /* If the witness has ancestors (is not a root), skip it. */ 861 if (w->w_num_ancestors > 0) 862 continue; 863 witness_ddb_level_descendants(w, 0); 864 } 865} 866 867static void 868witness_ddb_level_descendants(struct witness *w, int l) 869{ 870 int i; 871 872 if (w->w_ddb_level >= l) 873 return; 874 875 w->w_ddb_level = l; 876 l++; 877 878 for (i = 1; i <= w_max_used_index; i++) { 879 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 880 witness_ddb_level_descendants(&w_data[i], l); 881 } 882} 883 884static void 885witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...), 886 struct witness *w, int indent) 887{ 888 int i; 889 890 for (i = 0; i < indent; i++) 891 prnt(" "); 892 prnt("%s (type: %s, depth: %d, active refs: %d)", 893 w->w_name, w->w_class->lc_name, 894 w->w_ddb_level, w->w_refcount); 895 if (w->w_displayed) { 896 prnt(" -- (already displayed)\n"); 897 return; 898 } 899 w->w_displayed = 1; 900 if (w->w_file != NULL && w->w_line != 0) 901 prnt(" -- last acquired @ %s:%d\n", w->w_file, 902 w->w_line); 903 else 904 prnt(" -- never acquired\n"); 905 indent++; 906 WITNESS_INDEX_ASSERT(w->w_index); 907 for (i = 1; i <= w_max_used_index; i++) { 908 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 909 witness_ddb_display_descendants(prnt, &w_data[i], 910 indent); 911 } 912} 913 914static void 915witness_ddb_display_list(void(*prnt)(const char *fmt, ...), 916 struct witness_list *list) 917{ 918 struct witness *w; 919 920 STAILQ_FOREACH(w, list, w_typelist) { 921 if (w->w_file == NULL || w->w_ddb_level > 0) 922 continue; 923 924 /* This lock has no anscestors - display its descendants. */ 925 witness_ddb_display_descendants(prnt, w, 0); 926 } 927} 928 929static void 930witness_ddb_display(void(*prnt)(const char *fmt, ...)) 931{ 932 struct witness *w; 933 934 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 935 witness_ddb_compute_levels(); 936 937 /* Clear all the displayed flags. */ 938 STAILQ_FOREACH(w, &w_all, w_list) 939 w->w_displayed = 0; 940 941 /* 942 * First, handle sleep locks which have been acquired at least 943 * once. 944 */ 945 prnt("Sleep locks:\n"); 946 witness_ddb_display_list(prnt, &w_sleep); 947 948 /* 949 * Now do spin locks which have been acquired at least once. 950 */ 951 prnt("\nSpin locks:\n"); 952 witness_ddb_display_list(prnt, &w_spin); 953 954 /* 955 * Finally, any locks which have not been acquired yet. 956 */ 957 prnt("\nLocks which were never acquired:\n"); 958 STAILQ_FOREACH(w, &w_all, w_list) { 959 if (w->w_file != NULL || w->w_refcount == 0) 960 continue; 961 prnt("%s (type: %s, depth: %d)\n", w->w_name, 962 w->w_class->lc_name, w->w_ddb_level); 963 } 964} 965#endif /* DDB */ 966 967/* Trim useless garbage from filenames. */ 968static const char * 969fixup_filename(const char *file) 970{ 971 972 if (file == NULL) 973 return (NULL); 974 while (strncmp(file, "../", 3) == 0) 975 file += 3; 976 return (file); 977} 978 979int 980witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 981{ 982 983 if (witness_watch == -1 || panicstr != NULL) 984 return (0); 985 986 /* Require locks that witness knows about. */ 987 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 988 lock2->lo_witness == NULL) 989 return (EINVAL); 990 991 mtx_assert(&w_mtx, MA_NOTOWNED); 992 mtx_lock_spin(&w_mtx); 993 994 /* 995 * If we already have either an explicit or implied lock order that 996 * is the other way around, then return an error. 997 */ 998 if (witness_watch && 999 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1000 mtx_unlock_spin(&w_mtx); 1001 return (EDOOFUS); 1002 } 1003 1004 /* Try to add the new order. */ 1005 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1006 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1007 itismychild(lock1->lo_witness, lock2->lo_witness); 1008 mtx_unlock_spin(&w_mtx); 1009 return (0); 1010} 1011 1012void 1013witness_checkorder(struct lock_object *lock, int flags, const char *file, 1014 int line, struct lock_object *interlock) 1015{ 1016 struct lock_list_entry **lock_list, *lle; 1017 struct lock_instance *lock1, *lock2, *plock; 1018 struct lock_class *class; 1019 struct witness *w, *w1; 1020 struct thread *td; 1021 int i, j; 1022 1023 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1024 panicstr != NULL) 1025 return; 1026 1027 w = lock->lo_witness; 1028 class = LOCK_CLASS(lock); 1029 td = curthread; 1030 file = fixup_filename(file); 1031 1032 if (class->lc_flags & LC_SLEEPLOCK) { 1033 1034 /* 1035 * Since spin locks include a critical section, this check 1036 * implicitly enforces a lock order of all sleep locks before 1037 * all spin locks. 1038 */ 1039 if (td->td_critnest != 0 && !kdb_active) 1040 panic("blockable sleep lock (%s) %s @ %s:%d", 1041 class->lc_name, lock->lo_name, file, line); 1042 1043 /* 1044 * If this is the first lock acquired then just return as 1045 * no order checking is needed. 1046 */ 1047 if (td->td_sleeplocks == NULL) 1048 return; 1049 lock_list = &td->td_sleeplocks; 1050 } else { 1051 1052 /* 1053 * If this is the first lock, just return as no order 1054 * checking is needed. We check this in both if clauses 1055 * here as unifying the check would require us to use a 1056 * critical section to ensure we don't migrate while doing 1057 * the check. Note that if this is not the first lock, we 1058 * are already in a critical section and are safe for the 1059 * rest of the check. 1060 */ 1061 if (PCPU_GET(spinlocks) == NULL) 1062 return; 1063 lock_list = PCPU_PTR(spinlocks); 1064 } 1065 1066 /* Empty list? */ 1067 if ((*lock_list)->ll_count == 0) 1068 return; 1069 1070 /* 1071 * Check to see if we are recursing on a lock we already own. If 1072 * so, make sure that we don't mismatch exclusive and shared lock 1073 * acquires. 1074 */ 1075 lock1 = find_instance(*lock_list, lock); 1076 if (lock1 != NULL) { 1077 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1078 (flags & LOP_EXCLUSIVE) == 0) { 1079 printf("shared lock of (%s) %s @ %s:%d\n", 1080 class->lc_name, lock->lo_name, file, line); 1081 printf("while exclusively locked from %s:%d\n", 1082 lock1->li_file, lock1->li_line); 1083 panic("share->excl"); 1084 } 1085 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1086 (flags & LOP_EXCLUSIVE) != 0) { 1087 printf("exclusive lock of (%s) %s @ %s:%d\n", 1088 class->lc_name, lock->lo_name, file, line); 1089 printf("while share locked from %s:%d\n", 1090 lock1->li_file, lock1->li_line); 1091 panic("excl->share"); 1092 } 1093 return; 1094 } 1095 1096 /* 1097 * Find the previously acquired lock, but ignore interlocks. 1098 */ 1099 plock = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1]; 1100 if (interlock != NULL && plock->li_lock == interlock) { 1101 if ((*lock_list)->ll_count == 1) { 1102 1103 /* 1104 * The interlock is the only lock we hold, so 1105 * nothing to do. 1106 */ 1107 return; 1108 } 1109 plock = &(*lock_list)->ll_children[(*lock_list)->ll_count - 2]; 1110 } 1111 1112 /* 1113 * Try to perform most checks without a lock. If this succeeds we 1114 * can skip acquiring the lock and return success. 1115 */ 1116 w1 = plock->li_lock->lo_witness; 1117 if (witness_lock_order_check(w1, w)) 1118 return; 1119 1120 /* 1121 * Check for duplicate locks of the same type. Note that we only 1122 * have to check for this on the last lock we just acquired. Any 1123 * other cases will be caught as lock order violations. 1124 */ 1125 mtx_lock_spin(&w_mtx); 1126 witness_lock_order_add(w1, w); 1127 if (w1 == w) { 1128 i = w->w_index; 1129 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1130 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1131 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1132 w->w_reversed = 1; 1133 mtx_unlock_spin(&w_mtx); 1134 printf("acquiring duplicate lock of same type: \"%s\"\n", 1135 w->w_name); 1136 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name, 1137 lock1->li_file, lock1->li_line); 1138 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line); 1139 witness_debugger(1); 1140 } else 1141 mtx_unlock_spin(&w_mtx); 1142 return; 1143 } 1144 mtx_assert(&w_mtx, MA_OWNED); 1145 1146 /* 1147 * If we know that the the lock we are acquiring comes after 1148 * the lock we most recently acquired in the lock order tree, 1149 * then there is no need for any further checks. 1150 */ 1151 if (isitmychild(w1, w)) 1152 goto out; 1153 1154 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) { 1155 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1156 1157 MPASS(j < WITNESS_COUNT); 1158 lock1 = &lle->ll_children[i]; 1159 1160 /* 1161 * Ignore the interlock the first time we see it. 1162 */ 1163 if (interlock != NULL && interlock == lock1->li_lock) { 1164 interlock = NULL; 1165 continue; 1166 } 1167 1168 /* 1169 * If this lock doesn't undergo witness checking, 1170 * then skip it. 1171 */ 1172 w1 = lock1->li_lock->lo_witness; 1173 if (w1 == NULL) { 1174 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1175 ("lock missing witness structure")); 1176 continue; 1177 } 1178 1179 /* 1180 * If we are locking Giant and this is a sleepable 1181 * lock, then skip it. 1182 */ 1183 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1184 lock == &Giant.lock_object) 1185 continue; 1186 1187 /* 1188 * If we are locking a sleepable lock and this lock 1189 * is Giant, then skip it. 1190 */ 1191 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1192 lock1->li_lock == &Giant.lock_object) 1193 continue; 1194 1195 /* 1196 * If we are locking a sleepable lock and this lock 1197 * isn't sleepable, we want to treat it as a lock 1198 * order violation to enfore a general lock order of 1199 * sleepable locks before non-sleepable locks. 1200 */ 1201 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1202 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1203 goto reversal; 1204 1205 /* 1206 * If we are locking Giant and this is a non-sleepable 1207 * lock, then treat it as a reversal. 1208 */ 1209 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1210 lock == &Giant.lock_object) 1211 goto reversal; 1212 1213 /* 1214 * Check the lock order hierarchy for a reveresal. 1215 */ 1216 if (!isitmydescendant(w, w1)) 1217 continue; 1218 reversal: 1219 1220 /* 1221 * We have a lock order violation, check to see if it 1222 * is allowed or has already been yelled about. 1223 */ 1224#ifdef BLESSING 1225 1226 /* 1227 * If the lock order is blessed, just bail. We don't 1228 * look for other lock order violations though, which 1229 * may be a bug. 1230 */ 1231 if (blessed(w, w1)) 1232 goto out; 1233#endif 1234 1235 /* Bail if this violation is known */ 1236 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1237 goto out; 1238 1239 /* Record this as a violation */ 1240 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1241 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1242 w->w_reversed = w1->w_reversed = 1; 1243 witness_increment_graph_generation(); 1244 mtx_unlock_spin(&w_mtx); 1245 1246 /* 1247 * Ok, yell about it. 1248 */ 1249 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1250 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1251 printf( 1252 "lock order reversal: (sleepable after non-sleepable)\n"); 1253 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1254 && lock == &Giant.lock_object) 1255 printf( 1256 "lock order reversal: (Giant after non-sleepable)\n"); 1257 else 1258 printf("lock order reversal:\n"); 1259 1260 /* 1261 * Try to locate an earlier lock with 1262 * witness w in our list. 1263 */ 1264 do { 1265 lock2 = &lle->ll_children[i]; 1266 MPASS(lock2->li_lock != NULL); 1267 if (lock2->li_lock->lo_witness == w) 1268 break; 1269 if (i == 0 && lle->ll_next != NULL) { 1270 lle = lle->ll_next; 1271 i = lle->ll_count - 1; 1272 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1273 } else 1274 i--; 1275 } while (i >= 0); 1276 if (i < 0) { 1277 printf(" 1st %p %s (%s) @ %s:%d\n", 1278 lock1->li_lock, lock1->li_lock->lo_name, 1279 w1->w_name, lock1->li_file, lock1->li_line); 1280 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1281 lock->lo_name, w->w_name, file, line); 1282 } else { 1283 printf(" 1st %p %s (%s) @ %s:%d\n", 1284 lock2->li_lock, lock2->li_lock->lo_name, 1285 lock2->li_lock->lo_witness->w_name, 1286 lock2->li_file, lock2->li_line); 1287 printf(" 2nd %p %s (%s) @ %s:%d\n", 1288 lock1->li_lock, lock1->li_lock->lo_name, 1289 w1->w_name, lock1->li_file, lock1->li_line); 1290 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1291 lock->lo_name, w->w_name, file, line); 1292 } 1293 witness_debugger(1); 1294 return; 1295 } 1296 } 1297 1298 /* 1299 * If requested, build a new lock order. However, don't build a new 1300 * relationship between a sleepable lock and Giant if it is in the 1301 * wrong direction. The correct lock order is that sleepable locks 1302 * always come before Giant. 1303 */ 1304 if (flags & LOP_NEWORDER && 1305 !(plock->li_lock == &Giant.lock_object && 1306 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1307 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1308 w->w_name, plock->li_lock->lo_witness->w_name); 1309 itismychild(plock->li_lock->lo_witness, w); 1310 } 1311out: 1312 mtx_unlock_spin(&w_mtx); 1313} 1314 1315void 1316witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1317{ 1318 struct lock_list_entry **lock_list, *lle; 1319 struct lock_instance *instance; 1320 struct witness *w; 1321 struct thread *td; 1322 1323 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1324 panicstr != NULL) 1325 return; 1326 w = lock->lo_witness; 1327 td = curthread; 1328 file = fixup_filename(file); 1329 1330 /* Determine lock list for this lock. */ 1331 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1332 lock_list = &td->td_sleeplocks; 1333 else 1334 lock_list = PCPU_PTR(spinlocks); 1335 1336 /* Check to see if we are recursing on a lock we already own. */ 1337 instance = find_instance(*lock_list, lock); 1338 if (instance != NULL) { 1339 instance->li_flags++; 1340 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1341 td->td_proc->p_pid, lock->lo_name, 1342 instance->li_flags & LI_RECURSEMASK); 1343 instance->li_file = file; 1344 instance->li_line = line; 1345 return; 1346 } 1347 1348 /* Update per-witness last file and line acquire. */ 1349 w->w_file = file; 1350 w->w_line = line; 1351 1352 /* Find the next open lock instance in the list and fill it. */ 1353 lle = *lock_list; 1354 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1355 lle = witness_lock_list_get(); 1356 if (lle == NULL) 1357 return; 1358 lle->ll_next = *lock_list; 1359 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1360 td->td_proc->p_pid, lle); 1361 *lock_list = lle; 1362 } 1363 instance = &lle->ll_children[lle->ll_count++]; 1364 instance->li_lock = lock; 1365 instance->li_line = line; 1366 instance->li_file = file; 1367 if ((flags & LOP_EXCLUSIVE) != 0) 1368 instance->li_flags = LI_EXCLUSIVE; 1369 else 1370 instance->li_flags = 0; 1371 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1372 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1373} 1374 1375void 1376witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1377{ 1378 struct lock_instance *instance; 1379 struct lock_class *class; 1380 1381 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1382 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1383 return; 1384 class = LOCK_CLASS(lock); 1385 file = fixup_filename(file); 1386 if (witness_watch) { 1387 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1388 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1389 class->lc_name, lock->lo_name, file, line); 1390 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1391 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1392 class->lc_name, lock->lo_name, file, line); 1393 } 1394 instance = find_instance(curthread->td_sleeplocks, lock); 1395 if (instance == NULL) 1396 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1397 class->lc_name, lock->lo_name, file, line); 1398 if (witness_watch) { 1399 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1400 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1401 class->lc_name, lock->lo_name, file, line); 1402 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1403 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1404 class->lc_name, lock->lo_name, 1405 instance->li_flags & LI_RECURSEMASK, file, line); 1406 } 1407 instance->li_flags |= LI_EXCLUSIVE; 1408} 1409 1410void 1411witness_downgrade(struct lock_object *lock, int flags, const char *file, 1412 int line) 1413{ 1414 struct lock_instance *instance; 1415 struct lock_class *class; 1416 1417 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1418 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1419 return; 1420 class = LOCK_CLASS(lock); 1421 file = fixup_filename(file); 1422 if (witness_watch) { 1423 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1424 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1425 class->lc_name, lock->lo_name, file, line); 1426 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1427 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1428 class->lc_name, lock->lo_name, file, line); 1429 } 1430 instance = find_instance(curthread->td_sleeplocks, lock); 1431 if (instance == NULL) 1432 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1433 class->lc_name, lock->lo_name, file, line); 1434 if (witness_watch) { 1435 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1436 panic("downgrade of shared lock (%s) %s @ %s:%d", 1437 class->lc_name, lock->lo_name, file, line); 1438 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1439 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1440 class->lc_name, lock->lo_name, 1441 instance->li_flags & LI_RECURSEMASK, file, line); 1442 } 1443 instance->li_flags &= ~LI_EXCLUSIVE; 1444} 1445 1446void 1447witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1448{ 1449 struct lock_list_entry **lock_list, *lle; 1450 struct lock_instance *instance; 1451 struct lock_class *class; 1452 struct thread *td; 1453 register_t s; 1454 int i, j; 1455 1456 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1457 return; 1458 td = curthread; 1459 class = LOCK_CLASS(lock); 1460 file = fixup_filename(file); 1461 1462 /* Find lock instance associated with this lock. */ 1463 if (class->lc_flags & LC_SLEEPLOCK) 1464 lock_list = &td->td_sleeplocks; 1465 else 1466 lock_list = PCPU_PTR(spinlocks); 1467 lle = *lock_list; 1468 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1469 for (i = 0; i < (*lock_list)->ll_count; i++) { 1470 instance = &(*lock_list)->ll_children[i]; 1471 if (instance->li_lock == lock) 1472 goto found; 1473 } 1474 1475 /* 1476 * When disabling WITNESS through witness_watch we could end up in 1477 * having registered locks in the td_sleeplocks queue. 1478 * We have to make sure we flush these queues, so just search for 1479 * eventual register locks and remove them. 1480 */ 1481 if (witness_watch > 0) 1482 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1483 lock->lo_name, file, line); 1484 else 1485 return; 1486found: 1487 1488 /* First, check for shared/exclusive mismatches. */ 1489 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1490 (flags & LOP_EXCLUSIVE) == 0) { 1491 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1492 lock->lo_name, file, line); 1493 printf("while exclusively locked from %s:%d\n", 1494 instance->li_file, instance->li_line); 1495 panic("excl->ushare"); 1496 } 1497 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1498 (flags & LOP_EXCLUSIVE) != 0) { 1499 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1500 lock->lo_name, file, line); 1501 printf("while share locked from %s:%d\n", instance->li_file, 1502 instance->li_line); 1503 panic("share->uexcl"); 1504 } 1505 1506 /* If we are recursed, unrecurse. */ 1507 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1508 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1509 td->td_proc->p_pid, instance->li_lock->lo_name, 1510 instance->li_flags); 1511 instance->li_flags--; 1512 return; 1513 } 1514 1515 /* Otherwise, remove this item from the list. */ 1516 s = intr_disable(); 1517 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1518 td->td_proc->p_pid, instance->li_lock->lo_name, 1519 (*lock_list)->ll_count - 1); 1520 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1521 (*lock_list)->ll_children[j] = 1522 (*lock_list)->ll_children[j + 1]; 1523 (*lock_list)->ll_count--; 1524 intr_restore(s); 1525 1526 /* 1527 * In order to reduce contention on w_mtx, we want to keep always an 1528 * head object into lists so that frequent allocation from the 1529 * free witness pool (and subsequent locking) is avoided. 1530 * In order to maintain the current code simple, when the head 1531 * object is totally unloaded it means also that we do not have 1532 * further objects in the list, so the list ownership needs to be 1533 * hand over to another object if the current head needs to be freed. 1534 */ 1535 if ((*lock_list)->ll_count == 0) { 1536 if (*lock_list == lle) { 1537 if (lle->ll_next == NULL) 1538 return; 1539 } else 1540 lle = *lock_list; 1541 *lock_list = lle->ll_next; 1542 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1543 td->td_proc->p_pid, lle); 1544 witness_lock_list_free(lle); 1545 } 1546} 1547 1548void 1549witness_thread_exit(struct thread *td) 1550{ 1551 struct lock_list_entry *lle; 1552 int i, n; 1553 1554 lle = td->td_sleeplocks; 1555 if (lle == NULL || panicstr != NULL) 1556 return; 1557 if (lle->ll_count != 0) { 1558 for (n = 0; lle != NULL; lle = lle->ll_next) 1559 for (i = lle->ll_count - 1; i >= 0; i--) { 1560 if (n == 0) 1561 printf("Thread %p exiting with the following locks held:\n", 1562 td); 1563 n++; 1564 witness_list_lock(&lle->ll_children[i]); 1565 1566 } 1567 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1568 } 1569 witness_lock_list_free(lle); 1570} 1571 1572/* 1573 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1574 * exempt Giant and sleepable locks from the checks as well. If any 1575 * non-exempt locks are held, then a supplied message is printed to the 1576 * console along with a list of the offending locks. If indicated in the 1577 * flags then a failure results in a panic as well. 1578 */ 1579int 1580witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1581{ 1582 struct lock_list_entry **lock_list, *lle; 1583 struct lock_instance *lock1; 1584 struct thread *td; 1585 va_list ap; 1586 int i, n; 1587 1588 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1589 return (0); 1590 n = 0; 1591 td = curthread; 1592 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1593 for (i = lle->ll_count - 1; i >= 0; i--) { 1594 lock1 = &lle->ll_children[i]; 1595 if (lock1->li_lock == lock) 1596 continue; 1597 if (flags & WARN_GIANTOK && 1598 lock1->li_lock == &Giant.lock_object) 1599 continue; 1600 if (flags & WARN_SLEEPOK && 1601 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1602 continue; 1603 if (n == 0) { 1604 va_start(ap, fmt); 1605 vprintf(fmt, ap); 1606 va_end(ap); 1607 printf(" with the following"); 1608 if (flags & WARN_SLEEPOK) 1609 printf(" non-sleepable"); 1610 printf(" locks held:\n"); 1611 } 1612 n++; 1613 witness_list_lock(lock1); 1614 } 1615 if (PCPU_GET(spinlocks) != NULL) { 1616 lock_list = PCPU_PTR(spinlocks); 1617 1618 /* Empty list? */ 1619 if ((*lock_list)->ll_count == 0) 1620 return (n); 1621 1622 /* 1623 * Since we already hold a spinlock preemption is 1624 * already blocked. 1625 */ 1626 if (n == 0) { 1627 va_start(ap, fmt); 1628 vprintf(fmt, ap); 1629 va_end(ap); 1630 printf(" with the following"); 1631 if (flags & WARN_SLEEPOK) 1632 printf(" non-sleepable"); 1633 printf(" locks held:\n"); 1634 } 1635 n += witness_list_locks(PCPU_PTR(spinlocks)); 1636 } 1637 if (flags & WARN_PANIC && n) 1638 panic("%s", __func__); 1639 else 1640 witness_debugger(n); 1641 return (n); 1642} 1643 1644const char * 1645witness_file(struct lock_object *lock) 1646{ 1647 struct witness *w; 1648 1649 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1650 return ("?"); 1651 w = lock->lo_witness; 1652 return (w->w_file); 1653} 1654 1655int 1656witness_line(struct lock_object *lock) 1657{ 1658 struct witness *w; 1659 1660 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1661 return (0); 1662 w = lock->lo_witness; 1663 return (w->w_line); 1664} 1665 1666static struct witness * 1667enroll(const char *description, struct lock_class *lock_class) 1668{ 1669 struct witness *w; 1670 struct witness_list *typelist; 1671 1672 MPASS(description != NULL); 1673 1674 if (witness_watch == -1 || panicstr != NULL) 1675 return (NULL); 1676 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1677 if (witness_skipspin) 1678 return (NULL); 1679 else 1680 typelist = &w_spin; 1681 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1682 typelist = &w_sleep; 1683 else 1684 panic("lock class %s is not sleep or spin", 1685 lock_class->lc_name); 1686 1687 mtx_lock_spin(&w_mtx); 1688 w = witness_hash_get(description); 1689 if (w) 1690 goto found; 1691 if ((w = witness_get()) == NULL) 1692 return (NULL); 1693 MPASS(strlen(description) < MAX_W_NAME); 1694 strcpy(w->w_name, description); 1695 w->w_class = lock_class; 1696 w->w_refcount = 1; 1697 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1698 if (lock_class->lc_flags & LC_SPINLOCK) { 1699 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1700 w_spin_cnt++; 1701 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1702 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1703 w_sleep_cnt++; 1704 } 1705 1706 /* Insert new witness into the hash */ 1707 witness_hash_put(w); 1708 witness_increment_graph_generation(); 1709 mtx_unlock_spin(&w_mtx); 1710 return (w); 1711found: 1712 w->w_refcount++; 1713 mtx_unlock_spin(&w_mtx); 1714 if (lock_class != w->w_class) 1715 panic( 1716 "lock (%s) %s does not match earlier (%s) lock", 1717 description, lock_class->lc_name, 1718 w->w_class->lc_name); 1719 return (w); 1720} 1721 1722static void 1723depart(struct witness *w) 1724{ 1725 struct witness_list *list; 1726 1727 MPASS(w->w_refcount == 0); 1728 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1729 list = &w_sleep; 1730 w_sleep_cnt--; 1731 } else { 1732 list = &w_spin; 1733 w_spin_cnt--; 1734 } 1735 /* 1736 * Set file to NULL as it may point into a loadable module. 1737 */ 1738 w->w_file = NULL; 1739 w->w_line = 0; 1740 witness_increment_graph_generation(); 1741} 1742 1743 1744static void 1745adopt(struct witness *parent, struct witness *child) 1746{ 1747 int pi, ci, i, j; 1748 1749 if (witness_cold == 0) 1750 mtx_assert(&w_mtx, MA_OWNED); 1751 1752 /* If the relationship is already known, there's no work to be done. */ 1753 if (isitmychild(parent, child)) 1754 return; 1755 1756 /* When the structure of the graph changes, bump up the generation. */ 1757 witness_increment_graph_generation(); 1758 1759 /* 1760 * The hard part ... create the direct relationship, then propagate all 1761 * indirect relationships. 1762 */ 1763 pi = parent->w_index; 1764 ci = child->w_index; 1765 WITNESS_INDEX_ASSERT(pi); 1766 WITNESS_INDEX_ASSERT(ci); 1767 MPASS(pi != ci); 1768 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1769 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1770 1771 /* 1772 * If parent was not already an ancestor of child, 1773 * then we increment the descendant and ancestor counters. 1774 */ 1775 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1776 parent->w_num_descendants++; 1777 child->w_num_ancestors++; 1778 } 1779 1780 /* 1781 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1782 * an ancestor of 'pi' during this loop. 1783 */ 1784 for (i = 1; i <= w_max_used_index; i++) { 1785 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1786 (i != pi)) 1787 continue; 1788 1789 /* Find each descendant of 'i' and mark it as a descendant. */ 1790 for (j = 1; j <= w_max_used_index; j++) { 1791 1792 /* 1793 * Skip children that are already marked as 1794 * descendants of 'i'. 1795 */ 1796 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1797 continue; 1798 1799 /* 1800 * We are only interested in descendants of 'ci'. Note 1801 * that 'ci' itself is counted as a descendant of 'ci'. 1802 */ 1803 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1804 (j != ci)) 1805 continue; 1806 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1807 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1808 w_data[i].w_num_descendants++; 1809 w_data[j].w_num_ancestors++; 1810 1811 /* 1812 * Make sure we aren't marking a node as both an 1813 * ancestor and descendant. We should have caught 1814 * this as a lock order reversal earlier. 1815 */ 1816 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1817 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1818 printf("witness rmatrix paradox! [%d][%d]=%d " 1819 "both ancestor and descendant\n", 1820 i, j, w_rmatrix[i][j]); 1821 kdb_backtrace(); 1822 printf("Witness disabled.\n"); 1823 witness_watch = -1; 1824 } 1825 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1826 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1827 printf("witness rmatrix paradox! [%d][%d]=%d " 1828 "both ancestor and descendant\n", 1829 j, i, w_rmatrix[j][i]); 1830 kdb_backtrace(); 1831 printf("Witness disabled.\n"); 1832 witness_watch = -1; 1833 } 1834 } 1835 } 1836} 1837 1838static void 1839itismychild(struct witness *parent, struct witness *child) 1840{ 1841 1842 MPASS(child != NULL && parent != NULL); 1843 if (witness_cold == 0) 1844 mtx_assert(&w_mtx, MA_OWNED); 1845 1846 if (!witness_lock_type_equal(parent, child)) { 1847 if (witness_cold == 0) 1848 mtx_unlock_spin(&w_mtx); 1849 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1850 "the same lock type", __func__, parent->w_name, 1851 parent->w_class->lc_name, child->w_name, 1852 child->w_class->lc_name); 1853 } 1854 adopt(parent, child); 1855} 1856 1857/* 1858 * Generic code for the isitmy*() functions. The rmask parameter is the 1859 * expected relationship of w1 to w2. 1860 */ 1861static int 1862_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1863{ 1864 unsigned char r1, r2; 1865 int i1, i2; 1866 1867 i1 = w1->w_index; 1868 i2 = w2->w_index; 1869 WITNESS_INDEX_ASSERT(i1); 1870 WITNESS_INDEX_ASSERT(i2); 1871 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1872 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1873 1874 /* The flags on one better be the inverse of the flags on the other */ 1875 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1876 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1877 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1878 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1879 "w_rmatrix[%d][%d] == %hhx\n", 1880 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1881 i2, i1, r2); 1882 kdb_backtrace(); 1883 printf("Witness disabled.\n"); 1884 witness_watch = -1; 1885 } 1886 return (r1 & rmask); 1887} 1888 1889/* 1890 * Checks if @child is a direct child of @parent. 1891 */ 1892static int 1893isitmychild(struct witness *parent, struct witness *child) 1894{ 1895 1896 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1897} 1898 1899/* 1900 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1901 */ 1902static int 1903isitmydescendant(struct witness *ancestor, struct witness *descendant) 1904{ 1905 1906 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1907 __func__)); 1908} 1909 1910#ifdef BLESSING 1911static int 1912blessed(struct witness *w1, struct witness *w2) 1913{ 1914 int i; 1915 struct witness_blessed *b; 1916 1917 for (i = 0; i < blessed_count; i++) { 1918 b = &blessed_list[i]; 1919 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1920 if (strcmp(w2->w_name, b->b_lock2) == 0) 1921 return (1); 1922 continue; 1923 } 1924 if (strcmp(w1->w_name, b->b_lock2) == 0) 1925 if (strcmp(w2->w_name, b->b_lock1) == 0) 1926 return (1); 1927 } 1928 return (0); 1929} 1930#endif 1931 1932static struct witness * 1933witness_get(void) 1934{ 1935 struct witness *w; 1936 int index; 1937 1938 if (witness_cold == 0) 1939 mtx_assert(&w_mtx, MA_OWNED); 1940 1941 if (witness_watch == -1) { 1942 mtx_unlock_spin(&w_mtx); 1943 return (NULL); 1944 } 1945 if (STAILQ_EMPTY(&w_free)) { 1946 witness_watch = -1; 1947 mtx_unlock_spin(&w_mtx); 1948 printf("WITNESS: unable to allocate a new witness object\n"); 1949 return (NULL); 1950 } 1951 w = STAILQ_FIRST(&w_free); 1952 STAILQ_REMOVE_HEAD(&w_free, w_list); 1953 w_free_cnt--; 1954 index = w->w_index; 1955 MPASS(index > 0 && index == w_max_used_index+1 && 1956 index < WITNESS_COUNT); 1957 bzero(w, sizeof(*w)); 1958 w->w_index = index; 1959 if (index > w_max_used_index) 1960 w_max_used_index = index; 1961 return (w); 1962} 1963 1964static void 1965witness_free(struct witness *w) 1966{ 1967 1968 STAILQ_INSERT_HEAD(&w_free, w, w_list); 1969 w_free_cnt++; 1970} 1971 1972static struct lock_list_entry * 1973witness_lock_list_get(void) 1974{ 1975 struct lock_list_entry *lle; 1976 1977 if (witness_watch == -1) 1978 return (NULL); 1979 mtx_lock_spin(&w_mtx); 1980 lle = w_lock_list_free; 1981 if (lle == NULL) { 1982 witness_watch = -1; 1983 mtx_unlock_spin(&w_mtx); 1984 printf("%s: witness exhausted\n", __func__); 1985 return (NULL); 1986 } 1987 w_lock_list_free = lle->ll_next; 1988 mtx_unlock_spin(&w_mtx); 1989 bzero(lle, sizeof(*lle)); 1990 return (lle); 1991} 1992 1993static void 1994witness_lock_list_free(struct lock_list_entry *lle) 1995{ 1996 1997 mtx_lock_spin(&w_mtx); 1998 lle->ll_next = w_lock_list_free; 1999 w_lock_list_free = lle; 2000 mtx_unlock_spin(&w_mtx); 2001} 2002 2003static struct lock_instance * 2004find_instance(struct lock_list_entry *list, struct lock_object *lock) 2005{ 2006 struct lock_list_entry *lle; 2007 struct lock_instance *instance; 2008 int i; 2009 2010 for (lle = list; lle != NULL; lle = lle->ll_next) 2011 for (i = lle->ll_count - 1; i >= 0; i--) { 2012 instance = &lle->ll_children[i]; 2013 if (instance->li_lock == lock) 2014 return (instance); 2015 } 2016 return (NULL); 2017} 2018 2019static void 2020witness_list_lock(struct lock_instance *instance) 2021{ 2022 struct lock_object *lock; 2023 2024 lock = instance->li_lock; 2025 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2026 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2027 if (lock->lo_witness->w_name != lock->lo_name) 2028 printf(" (%s)", lock->lo_witness->w_name); 2029 printf(" r = %d (%p) locked @ %s:%d\n", 2030 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file, 2031 instance->li_line); 2032} 2033 2034#ifdef DDB 2035static int 2036witness_thread_has_locks(struct thread *td) 2037{ 2038 2039 if (td->td_sleeplocks == NULL) 2040 return (0); 2041 return (td->td_sleeplocks->ll_count != 0); 2042} 2043 2044static int 2045witness_proc_has_locks(struct proc *p) 2046{ 2047 struct thread *td; 2048 2049 FOREACH_THREAD_IN_PROC(p, td) { 2050 if (witness_thread_has_locks(td)) 2051 return (1); 2052 } 2053 return (0); 2054} 2055#endif 2056 2057int 2058witness_list_locks(struct lock_list_entry **lock_list) 2059{ 2060 struct lock_list_entry *lle; 2061 int i, nheld; 2062 2063 nheld = 0; 2064 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2065 for (i = lle->ll_count - 1; i >= 0; i--) { 2066 witness_list_lock(&lle->ll_children[i]); 2067 nheld++; 2068 } 2069 return (nheld); 2070} 2071 2072/* 2073 * This is a bit risky at best. We call this function when we have timed 2074 * out acquiring a spin lock, and we assume that the other CPU is stuck 2075 * with this lock held. So, we go groveling around in the other CPU's 2076 * per-cpu data to try to find the lock instance for this spin lock to 2077 * see when it was last acquired. 2078 */ 2079void 2080witness_display_spinlock(struct lock_object *lock, struct thread *owner) 2081{ 2082 struct lock_instance *instance; 2083 struct pcpu *pc; 2084 2085 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2086 return; 2087 pc = pcpu_find(owner->td_oncpu); 2088 instance = find_instance(pc->pc_spinlocks, lock); 2089 if (instance != NULL) 2090 witness_list_lock(instance); 2091} 2092 2093void 2094witness_save(struct lock_object *lock, const char **filep, int *linep) 2095{ 2096 struct lock_list_entry *lock_list; 2097 struct lock_instance *instance; 2098 struct lock_class *class; 2099 2100 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2101 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2102 return; 2103 class = LOCK_CLASS(lock); 2104 if (class->lc_flags & LC_SLEEPLOCK) 2105 lock_list = curthread->td_sleeplocks; 2106 else { 2107 if (witness_skipspin) 2108 return; 2109 lock_list = PCPU_GET(spinlocks); 2110 } 2111 instance = find_instance(lock_list, lock); 2112 if (instance == NULL) 2113 panic("%s: lock (%s) %s not locked", __func__, 2114 class->lc_name, lock->lo_name); 2115 *filep = instance->li_file; 2116 *linep = instance->li_line; 2117} 2118 2119void 2120witness_restore(struct lock_object *lock, const char *file, int line) 2121{ 2122 struct lock_list_entry *lock_list; 2123 struct lock_instance *instance; 2124 struct lock_class *class; 2125 2126 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2127 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2128 return; 2129 class = LOCK_CLASS(lock); 2130 if (class->lc_flags & LC_SLEEPLOCK) 2131 lock_list = curthread->td_sleeplocks; 2132 else { 2133 if (witness_skipspin) 2134 return; 2135 lock_list = PCPU_GET(spinlocks); 2136 } 2137 instance = find_instance(lock_list, lock); 2138 if (instance == NULL) 2139 panic("%s: lock (%s) %s not locked", __func__, 2140 class->lc_name, lock->lo_name); 2141 lock->lo_witness->w_file = file; 2142 lock->lo_witness->w_line = line; 2143 instance->li_file = file; 2144 instance->li_line = line; 2145} 2146 2147void 2148witness_assert(struct lock_object *lock, int flags, const char *file, int line) 2149{ 2150#ifdef INVARIANT_SUPPORT 2151 struct lock_instance *instance; 2152 struct lock_class *class; 2153 2154 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2155 return; 2156 class = LOCK_CLASS(lock); 2157 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2158 instance = find_instance(curthread->td_sleeplocks, lock); 2159 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2160 instance = find_instance(PCPU_GET(spinlocks), lock); 2161 else { 2162 panic("Lock (%s) %s is not sleep or spin!", 2163 class->lc_name, lock->lo_name); 2164 } 2165 file = fixup_filename(file); 2166 switch (flags) { 2167 case LA_UNLOCKED: 2168 if (instance != NULL) 2169 panic("Lock (%s) %s locked @ %s:%d.", 2170 class->lc_name, lock->lo_name, file, line); 2171 break; 2172 case LA_LOCKED: 2173 case LA_LOCKED | LA_RECURSED: 2174 case LA_LOCKED | LA_NOTRECURSED: 2175 case LA_SLOCKED: 2176 case LA_SLOCKED | LA_RECURSED: 2177 case LA_SLOCKED | LA_NOTRECURSED: 2178 case LA_XLOCKED: 2179 case LA_XLOCKED | LA_RECURSED: 2180 case LA_XLOCKED | LA_NOTRECURSED: 2181 if (instance == NULL) { 2182 panic("Lock (%s) %s not locked @ %s:%d.", 2183 class->lc_name, lock->lo_name, file, line); 2184 break; 2185 } 2186 if ((flags & LA_XLOCKED) != 0 && 2187 (instance->li_flags & LI_EXCLUSIVE) == 0) 2188 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2189 class->lc_name, lock->lo_name, file, line); 2190 if ((flags & LA_SLOCKED) != 0 && 2191 (instance->li_flags & LI_EXCLUSIVE) != 0) 2192 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2193 class->lc_name, lock->lo_name, file, line); 2194 if ((flags & LA_RECURSED) != 0 && 2195 (instance->li_flags & LI_RECURSEMASK) == 0) 2196 panic("Lock (%s) %s not recursed @ %s:%d.", 2197 class->lc_name, lock->lo_name, file, line); 2198 if ((flags & LA_NOTRECURSED) != 0 && 2199 (instance->li_flags & LI_RECURSEMASK) != 0) 2200 panic("Lock (%s) %s recursed @ %s:%d.", 2201 class->lc_name, lock->lo_name, file, line); 2202 break; 2203 default: 2204 panic("Invalid lock assertion at %s:%d.", file, line); 2205 2206 } 2207#endif /* INVARIANT_SUPPORT */ 2208} 2209 2210#ifdef DDB 2211static void 2212witness_ddb_list(struct thread *td) 2213{ 2214 2215 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2216 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2217 2218 if (witness_watch < 1) 2219 return; 2220 2221 witness_list_locks(&td->td_sleeplocks); 2222 2223 /* 2224 * We only handle spinlocks if td == curthread. This is somewhat broken 2225 * if td is currently executing on some other CPU and holds spin locks 2226 * as we won't display those locks. If we had a MI way of getting 2227 * the per-cpu data for a given cpu then we could use 2228 * td->td_oncpu to get the list of spinlocks for this thread 2229 * and "fix" this. 2230 * 2231 * That still wouldn't really fix this unless we locked the scheduler 2232 * lock or stopped the other CPU to make sure it wasn't changing the 2233 * list out from under us. It is probably best to just not try to 2234 * handle threads on other CPU's for now. 2235 */ 2236 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2237 witness_list_locks(PCPU_PTR(spinlocks)); 2238} 2239 2240DB_SHOW_COMMAND(locks, db_witness_list) 2241{ 2242 struct thread *td; 2243 2244 if (have_addr) 2245 td = db_lookup_thread(addr, TRUE); 2246 else 2247 td = kdb_thread; 2248 witness_ddb_list(td); 2249} 2250 2251DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2252{ 2253 struct thread *td; 2254 struct proc *p; 2255 2256 /* 2257 * It would be nice to list only threads and processes that actually 2258 * held sleep locks, but that information is currently not exported 2259 * by WITNESS. 2260 */ 2261 FOREACH_PROC_IN_SYSTEM(p) { 2262 if (!witness_proc_has_locks(p)) 2263 continue; 2264 FOREACH_THREAD_IN_PROC(p, td) { 2265 if (!witness_thread_has_locks(td)) 2266 continue; 2267 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2268 p->p_comm, td, td->td_tid); 2269 witness_ddb_list(td); 2270 } 2271 } 2272} 2273DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2274 2275DB_SHOW_COMMAND(witness, db_witness_display) 2276{ 2277 2278 witness_ddb_display(db_printf); 2279} 2280#endif 2281 2282static int 2283sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2284{ 2285 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2286 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2287 struct sbuf *sb; 2288 u_int w_rmatrix1, w_rmatrix2; 2289 int error, generation, i, j; 2290 2291 tmp_data1 = NULL; 2292 tmp_data2 = NULL; 2293 tmp_w1 = NULL; 2294 tmp_w2 = NULL; 2295 if (witness_watch < 1) { 2296 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2297 return (error); 2298 } 2299 if (witness_cold) { 2300 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2301 return (error); 2302 } 2303 error = 0; 2304 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2305 if (sb == NULL) 2306 return (ENOMEM); 2307 2308 /* Allocate and init temporary storage space. */ 2309 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2310 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2311 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2312 M_WAITOK | M_ZERO); 2313 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2314 M_WAITOK | M_ZERO); 2315 stack_zero(&tmp_data1->wlod_stack); 2316 stack_zero(&tmp_data2->wlod_stack); 2317 2318restart: 2319 mtx_lock_spin(&w_mtx); 2320 generation = w_generation; 2321 mtx_unlock_spin(&w_mtx); 2322 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2323 w_lohash.wloh_count); 2324 for (i = 1; i < w_max_used_index; i++) { 2325 mtx_lock_spin(&w_mtx); 2326 if (generation != w_generation) { 2327 mtx_unlock_spin(&w_mtx); 2328 2329 /* The graph has changed, try again. */ 2330 req->oldidx = 0; 2331 sbuf_clear(sb); 2332 goto restart; 2333 } 2334 2335 w1 = &w_data[i]; 2336 if (w1->w_reversed == 0) { 2337 mtx_unlock_spin(&w_mtx); 2338 continue; 2339 } 2340 2341 /* Copy w1 locally so we can release the spin lock. */ 2342 *tmp_w1 = *w1; 2343 mtx_unlock_spin(&w_mtx); 2344 2345 if (tmp_w1->w_reversed == 0) 2346 continue; 2347 for (j = 1; j < w_max_used_index; j++) { 2348 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2349 continue; 2350 2351 mtx_lock_spin(&w_mtx); 2352 if (generation != w_generation) { 2353 mtx_unlock_spin(&w_mtx); 2354 2355 /* The graph has changed, try again. */ 2356 req->oldidx = 0; 2357 sbuf_clear(sb); 2358 goto restart; 2359 } 2360 2361 w2 = &w_data[j]; 2362 data1 = witness_lock_order_get(w1, w2); 2363 data2 = witness_lock_order_get(w2, w1); 2364 2365 /* 2366 * Copy information locally so we can release the 2367 * spin lock. 2368 */ 2369 *tmp_w2 = *w2; 2370 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2371 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2372 2373 if (data1) { 2374 stack_zero(&tmp_data1->wlod_stack); 2375 stack_copy(&data1->wlod_stack, 2376 &tmp_data1->wlod_stack); 2377 } 2378 if (data2 && data2 != data1) { 2379 stack_zero(&tmp_data2->wlod_stack); 2380 stack_copy(&data2->wlod_stack, 2381 &tmp_data2->wlod_stack); 2382 } 2383 mtx_unlock_spin(&w_mtx); 2384 2385 sbuf_printf(sb, 2386 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2387 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2388 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2389#if 0 2390 sbuf_printf(sb, 2391 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2392 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2393 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2394#endif 2395 if (data1) { 2396 sbuf_printf(sb, 2397 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2398 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2399 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2400 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2401 sbuf_printf(sb, "\n"); 2402 } 2403 if (data2 && data2 != data1) { 2404 sbuf_printf(sb, 2405 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2406 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2407 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2408 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2409 sbuf_printf(sb, "\n"); 2410 } 2411 } 2412 } 2413 mtx_lock_spin(&w_mtx); 2414 if (generation != w_generation) { 2415 mtx_unlock_spin(&w_mtx); 2416 2417 /* 2418 * The graph changed while we were printing stack data, 2419 * try again. 2420 */ 2421 req->oldidx = 0; 2422 sbuf_clear(sb); 2423 goto restart; 2424 } 2425 mtx_unlock_spin(&w_mtx); 2426 2427 /* Free temporary storage space. */ 2428 free(tmp_data1, M_TEMP); 2429 free(tmp_data2, M_TEMP); 2430 free(tmp_w1, M_TEMP); 2431 free(tmp_w2, M_TEMP); 2432 2433 sbuf_finish(sb); 2434 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2435 sbuf_delete(sb); 2436 2437 return (error); 2438} 2439 2440static int 2441sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2442{ 2443 struct witness *w; 2444 struct sbuf *sb; 2445 int error; 2446 2447 if (witness_watch < 1) { 2448 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2449 return (error); 2450 } 2451 if (witness_cold) { 2452 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2453 return (error); 2454 } 2455 error = 0; 2456 sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN); 2457 if (sb == NULL) 2458 return (ENOMEM); 2459 sbuf_printf(sb, "\n"); 2460 2461 mtx_lock_spin(&w_mtx); 2462 STAILQ_FOREACH(w, &w_all, w_list) 2463 w->w_displayed = 0; 2464 STAILQ_FOREACH(w, &w_all, w_list) 2465 witness_add_fullgraph(sb, w); 2466 mtx_unlock_spin(&w_mtx); 2467 2468 /* 2469 * While using SBUF_FIXEDLEN, check if the sbuf overflowed. 2470 */ 2471 if (sbuf_overflowed(sb)) { 2472 sbuf_delete(sb); 2473 panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n", 2474 __func__); 2475 } 2476 2477 /* 2478 * Close the sbuf and return to userland. 2479 */ 2480 sbuf_finish(sb); 2481 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2482 sbuf_delete(sb); 2483 2484 return (error); 2485} 2486 2487static int 2488sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2489{ 2490 int error, value; 2491 2492 value = witness_watch; 2493 error = sysctl_handle_int(oidp, &value, 0, req); 2494 if (error != 0 || req->newptr == NULL) 2495 return (error); 2496 if (value > 1 || value < -1 || 2497 (witness_watch == -1 && value != witness_watch)) 2498 return (EINVAL); 2499 witness_watch = value; 2500 return (0); 2501} 2502 2503static void 2504witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2505{ 2506 int i; 2507 2508 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2509 return; 2510 w->w_displayed = 1; 2511 2512 WITNESS_INDEX_ASSERT(w->w_index); 2513 for (i = 1; i <= w_max_used_index; i++) { 2514 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2515 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2516 w_data[i].w_name); 2517 witness_add_fullgraph(sb, &w_data[i]); 2518 } 2519 } 2520} 2521 2522/* 2523 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2524 * interprets the key as a string and reads until the null 2525 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2526 * hash value computed from the key. 2527 */ 2528static uint32_t 2529witness_hash_djb2(const uint8_t *key, uint32_t size) 2530{ 2531 unsigned int hash = 5381; 2532 int i; 2533 2534 /* hash = hash * 33 + key[i] */ 2535 if (size) 2536 for (i = 0; i < size; i++) 2537 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2538 else 2539 for (i = 0; key[i] != 0; i++) 2540 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2541 2542 return (hash); 2543} 2544 2545 2546/* 2547 * Initializes the two witness hash tables. Called exactly once from 2548 * witness_initialize(). 2549 */ 2550static void 2551witness_init_hash_tables(void) 2552{ 2553 int i; 2554 2555 MPASS(witness_cold); 2556 2557 /* Initialize the hash tables. */ 2558 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2559 w_hash.wh_array[i] = NULL; 2560 2561 w_hash.wh_size = WITNESS_HASH_SIZE; 2562 w_hash.wh_count = 0; 2563 2564 /* Initialize the lock order data hash. */ 2565 w_lofree = NULL; 2566 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2567 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2568 w_lodata[i].wlod_next = w_lofree; 2569 w_lofree = &w_lodata[i]; 2570 } 2571 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2572 w_lohash.wloh_count = 0; 2573 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2574 w_lohash.wloh_array[i] = NULL; 2575} 2576 2577static struct witness * 2578witness_hash_get(const char *key) 2579{ 2580 struct witness *w; 2581 uint32_t hash; 2582 2583 MPASS(key != NULL); 2584 if (witness_cold == 0) 2585 mtx_assert(&w_mtx, MA_OWNED); 2586 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2587 w = w_hash.wh_array[hash]; 2588 while (w != NULL) { 2589 if (strcmp(w->w_name, key) == 0) 2590 goto out; 2591 w = w->w_hash_next; 2592 } 2593 2594out: 2595 return (w); 2596} 2597 2598static void 2599witness_hash_put(struct witness *w) 2600{ 2601 uint32_t hash; 2602 2603 MPASS(w != NULL); 2604 MPASS(w->w_name != NULL); 2605 if (witness_cold == 0) 2606 mtx_assert(&w_mtx, MA_OWNED); 2607 KASSERT(witness_hash_get(w->w_name) == NULL, 2608 ("%s: trying to add a hash entry that already exists!", __func__)); 2609 KASSERT(w->w_hash_next == NULL, 2610 ("%s: w->w_hash_next != NULL", __func__)); 2611 2612 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2613 w->w_hash_next = w_hash.wh_array[hash]; 2614 w_hash.wh_array[hash] = w; 2615 w_hash.wh_count++; 2616} 2617 2618 2619static struct witness_lock_order_data * 2620witness_lock_order_get(struct witness *parent, struct witness *child) 2621{ 2622 struct witness_lock_order_data *data = NULL; 2623 struct witness_lock_order_key key; 2624 unsigned int hash; 2625 2626 MPASS(parent != NULL && child != NULL); 2627 key.from = parent->w_index; 2628 key.to = child->w_index; 2629 WITNESS_INDEX_ASSERT(key.from); 2630 WITNESS_INDEX_ASSERT(key.to); 2631 if ((w_rmatrix[parent->w_index][child->w_index] 2632 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2633 goto out; 2634 2635 hash = witness_hash_djb2((const char*)&key, 2636 sizeof(key)) % w_lohash.wloh_size; 2637 data = w_lohash.wloh_array[hash]; 2638 while (data != NULL) { 2639 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2640 break; 2641 data = data->wlod_next; 2642 } 2643 2644out: 2645 return (data); 2646} 2647 2648/* 2649 * Verify that parent and child have a known relationship, are not the same, 2650 * and child is actually a child of parent. This is done without w_mtx 2651 * to avoid contention in the common case. 2652 */ 2653static int 2654witness_lock_order_check(struct witness *parent, struct witness *child) 2655{ 2656 2657 if (parent != child && 2658 w_rmatrix[parent->w_index][child->w_index] 2659 & WITNESS_LOCK_ORDER_KNOWN && 2660 isitmychild(parent, child)) 2661 return (1); 2662 2663 return (0); 2664} 2665 2666static int 2667witness_lock_order_add(struct witness *parent, struct witness *child) 2668{ 2669 struct witness_lock_order_data *data = NULL; 2670 struct witness_lock_order_key key; 2671 unsigned int hash; 2672 2673 MPASS(parent != NULL && child != NULL); 2674 key.from = parent->w_index; 2675 key.to = child->w_index; 2676 WITNESS_INDEX_ASSERT(key.from); 2677 WITNESS_INDEX_ASSERT(key.to); 2678 if (w_rmatrix[parent->w_index][child->w_index] 2679 & WITNESS_LOCK_ORDER_KNOWN) 2680 return (1); 2681 2682 hash = witness_hash_djb2((const char*)&key, 2683 sizeof(key)) % w_lohash.wloh_size; 2684 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2685 data = w_lofree; 2686 if (data == NULL) 2687 return (0); 2688 w_lofree = data->wlod_next; 2689 data->wlod_next = w_lohash.wloh_array[hash]; 2690 data->wlod_key = key; 2691 w_lohash.wloh_array[hash] = data; 2692 w_lohash.wloh_count++; 2693 stack_zero(&data->wlod_stack); 2694 stack_save(&data->wlod_stack); 2695 return (1); 2696} 2697 2698/* Call this whenver the structure of the witness graph changes. */ 2699static void 2700witness_increment_graph_generation(void) 2701{ 2702 2703 if (witness_cold == 0) 2704 mtx_assert(&w_mtx, MA_OWNED); 2705 w_generation++; 2706} 2707 2708#ifdef KDB 2709static void 2710_witness_debugger(int cond, const char *msg) 2711{ 2712 2713 if (witness_trace && cond) 2714 kdb_backtrace(); 2715 if (witness_kdb && cond) 2716 kdb_enter(KDB_WHY_WITNESS, msg); 2717} 2718#endif 2719