subr_witness.c revision 239584
1/*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35/* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41/* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61/* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87#include <sys/cdefs.h> 88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 239584 2012-08-22 20:00:41Z jhb $"); 89 90#include "opt_ddb.h" 91#include "opt_hwpmc_hooks.h" 92#include "opt_stack.h" 93#include "opt_witness.h" 94 95#include <sys/param.h> 96#include <sys/bus.h> 97#include <sys/kdb.h> 98#include <sys/kernel.h> 99#include <sys/ktr.h> 100#include <sys/lock.h> 101#include <sys/malloc.h> 102#include <sys/mutex.h> 103#include <sys/priv.h> 104#include <sys/proc.h> 105#include <sys/sbuf.h> 106#include <sys/sched.h> 107#include <sys/stack.h> 108#include <sys/sysctl.h> 109#include <sys/systm.h> 110 111#ifdef DDB 112#include <ddb/ddb.h> 113#endif 114 115#include <machine/stdarg.h> 116 117#if !defined(DDB) && !defined(STACK) 118#error "DDB or STACK options are required for WITNESS" 119#endif 120 121/* Note that these traces do not work with KTR_ALQ. */ 122#if 0 123#define KTR_WITNESS KTR_SUBSYS 124#else 125#define KTR_WITNESS 0 126#endif 127 128#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 129#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 130#define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */ 131 132/* Define this to check for blessed mutexes */ 133#undef BLESSING 134 135#define WITNESS_COUNT 1024 136#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 137#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 138#define WITNESS_PENDLIST 768 139 140/* Allocate 256 KB of stack data space */ 141#define WITNESS_LO_DATA_COUNT 2048 142 143/* Prime, gives load factor of ~2 at full load */ 144#define WITNESS_LO_HASH_SIZE 1021 145 146/* 147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 149 * probably be safe for the most part, but it's still a SWAG. 150 */ 151#define LOCK_NCHILDREN 5 152#define LOCK_CHILDCOUNT 2048 153 154#define MAX_W_NAME 64 155 156#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 157#define FULLGRAPH_SBUF_SIZE 512 158 159/* 160 * These flags go in the witness relationship matrix and describe the 161 * relationship between any two struct witness objects. 162 */ 163#define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 164#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 165#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 166#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 167#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 168#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 169#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 170#define WITNESS_RELATED_MASK \ 171 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 172#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 173 * observed. */ 174#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 175#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 176#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 177 178/* Descendant to ancestor flags */ 179#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 180 181/* Ancestor to descendant flags */ 182#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 183 184#define WITNESS_INDEX_ASSERT(i) \ 185 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 186 187static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 188 189/* 190 * Lock instances. A lock instance is the data associated with a lock while 191 * it is held by witness. For example, a lock instance will hold the 192 * recursion count of a lock. Lock instances are held in lists. Spin locks 193 * are held in a per-cpu list while sleep locks are held in per-thread list. 194 */ 195struct lock_instance { 196 struct lock_object *li_lock; 197 const char *li_file; 198 int li_line; 199 u_int li_flags; 200}; 201 202/* 203 * A simple list type used to build the list of locks held by a thread 204 * or CPU. We can't simply embed the list in struct lock_object since a 205 * lock may be held by more than one thread if it is a shared lock. Locks 206 * are added to the head of the list, so we fill up each list entry from 207 * "the back" logically. To ease some of the arithmetic, we actually fill 208 * in each list entry the normal way (children[0] then children[1], etc.) but 209 * when we traverse the list we read children[count-1] as the first entry 210 * down to children[0] as the final entry. 211 */ 212struct lock_list_entry { 213 struct lock_list_entry *ll_next; 214 struct lock_instance ll_children[LOCK_NCHILDREN]; 215 u_int ll_count; 216}; 217 218/* 219 * The main witness structure. One of these per named lock type in the system 220 * (for example, "vnode interlock"). 221 */ 222struct witness { 223 char w_name[MAX_W_NAME]; 224 uint32_t w_index; /* Index in the relationship matrix */ 225 struct lock_class *w_class; 226 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 227 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 228 struct witness *w_hash_next; /* Linked list in hash buckets. */ 229 const char *w_file; /* File where last acquired */ 230 uint32_t w_line; /* Line where last acquired */ 231 uint32_t w_refcount; 232 uint16_t w_num_ancestors; /* direct/indirect 233 * ancestor count */ 234 uint16_t w_num_descendants; /* direct/indirect 235 * descendant count */ 236 int16_t w_ddb_level; 237 unsigned w_displayed:1; 238 unsigned w_reversed:1; 239}; 240 241STAILQ_HEAD(witness_list, witness); 242 243/* 244 * The witness hash table. Keys are witness names (const char *), elements are 245 * witness objects (struct witness *). 246 */ 247struct witness_hash { 248 struct witness *wh_array[WITNESS_HASH_SIZE]; 249 uint32_t wh_size; 250 uint32_t wh_count; 251}; 252 253/* 254 * Key type for the lock order data hash table. 255 */ 256struct witness_lock_order_key { 257 uint16_t from; 258 uint16_t to; 259}; 260 261struct witness_lock_order_data { 262 struct stack wlod_stack; 263 struct witness_lock_order_key wlod_key; 264 struct witness_lock_order_data *wlod_next; 265}; 266 267/* 268 * The witness lock order data hash table. Keys are witness index tuples 269 * (struct witness_lock_order_key), elements are lock order data objects 270 * (struct witness_lock_order_data). 271 */ 272struct witness_lock_order_hash { 273 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 274 u_int wloh_size; 275 u_int wloh_count; 276}; 277 278#ifdef BLESSING 279struct witness_blessed { 280 const char *b_lock1; 281 const char *b_lock2; 282}; 283#endif 284 285struct witness_pendhelp { 286 const char *wh_type; 287 struct lock_object *wh_lock; 288}; 289 290struct witness_order_list_entry { 291 const char *w_name; 292 struct lock_class *w_class; 293}; 294 295/* 296 * Returns 0 if one of the locks is a spin lock and the other is not. 297 * Returns 1 otherwise. 298 */ 299static __inline int 300witness_lock_type_equal(struct witness *w1, struct witness *w2) 301{ 302 303 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 304 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 305} 306 307static __inline int 308witness_lock_order_key_empty(const struct witness_lock_order_key *key) 309{ 310 311 return (key->from == 0 && key->to == 0); 312} 313 314static __inline int 315witness_lock_order_key_equal(const struct witness_lock_order_key *a, 316 const struct witness_lock_order_key *b) 317{ 318 319 return (a->from == b->from && a->to == b->to); 320} 321 322static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 323 const char *fname); 324#ifdef KDB 325static void _witness_debugger(int cond, const char *msg); 326#endif 327static void adopt(struct witness *parent, struct witness *child); 328#ifdef BLESSING 329static int blessed(struct witness *, struct witness *); 330#endif 331static void depart(struct witness *w); 332static struct witness *enroll(const char *description, 333 struct lock_class *lock_class); 334static struct lock_instance *find_instance(struct lock_list_entry *list, 335 const struct lock_object *lock); 336static int isitmychild(struct witness *parent, struct witness *child); 337static int isitmydescendant(struct witness *parent, struct witness *child); 338static void itismychild(struct witness *parent, struct witness *child); 339static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 340static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 341static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 342static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 343#ifdef DDB 344static void witness_ddb_compute_levels(void); 345static void witness_ddb_display(int(*)(const char *fmt, ...)); 346static void witness_ddb_display_descendants(int(*)(const char *fmt, ...), 347 struct witness *, int indent); 348static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 349 struct witness_list *list); 350static void witness_ddb_level_descendants(struct witness *parent, int l); 351static void witness_ddb_list(struct thread *td); 352#endif 353static void witness_free(struct witness *m); 354static struct witness *witness_get(void); 355static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 356static struct witness *witness_hash_get(const char *key); 357static void witness_hash_put(struct witness *w); 358static void witness_init_hash_tables(void); 359static void witness_increment_graph_generation(void); 360static void witness_lock_list_free(struct lock_list_entry *lle); 361static struct lock_list_entry *witness_lock_list_get(void); 362static int witness_lock_order_add(struct witness *parent, 363 struct witness *child); 364static int witness_lock_order_check(struct witness *parent, 365 struct witness *child); 366static struct witness_lock_order_data *witness_lock_order_get( 367 struct witness *parent, 368 struct witness *child); 369static void witness_list_lock(struct lock_instance *instance, 370 int (*prnt)(const char *fmt, ...)); 371static void witness_setflag(struct lock_object *lock, int flag, int set); 372 373#ifdef KDB 374#define witness_debugger(c) _witness_debugger(c, __func__) 375#else 376#define witness_debugger(c) 377#endif 378 379static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, 380 "Witness Locking"); 381 382/* 383 * If set to 0, lock order checking is disabled. If set to -1, 384 * witness is completely disabled. Otherwise witness performs full 385 * lock order checking for all locks. At runtime, lock order checking 386 * may be toggled. However, witness cannot be reenabled once it is 387 * completely disabled. 388 */ 389static int witness_watch = 1; 390TUNABLE_INT("debug.witness.watch", &witness_watch); 391SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 392 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 393 394#ifdef KDB 395/* 396 * When KDB is enabled and witness_kdb is 1, it will cause the system 397 * to drop into kdebug() when: 398 * - a lock hierarchy violation occurs 399 * - locks are held when going to sleep. 400 */ 401#ifdef WITNESS_KDB 402int witness_kdb = 1; 403#else 404int witness_kdb = 0; 405#endif 406TUNABLE_INT("debug.witness.kdb", &witness_kdb); 407SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 408 409/* 410 * When KDB is enabled and witness_trace is 1, it will cause the system 411 * to print a stack trace: 412 * - a lock hierarchy violation occurs 413 * - locks are held when going to sleep. 414 */ 415int witness_trace = 1; 416TUNABLE_INT("debug.witness.trace", &witness_trace); 417SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 418#endif /* KDB */ 419 420#ifdef WITNESS_SKIPSPIN 421int witness_skipspin = 1; 422#else 423int witness_skipspin = 0; 424#endif 425TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 426SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 427 0, ""); 428 429/* 430 * Call this to print out the relations between locks. 431 */ 432SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 433 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 434 435/* 436 * Call this to print out the witness faulty stacks. 437 */ 438SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 439 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 440 441static struct mtx w_mtx; 442 443/* w_list */ 444static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 445static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 446 447/* w_typelist */ 448static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 449static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 450 451/* lock list */ 452static struct lock_list_entry *w_lock_list_free = NULL; 453static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 454static u_int pending_cnt; 455 456static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 457SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 458SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 459SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 460 ""); 461 462static struct witness *w_data; 463static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 464static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 465static struct witness_hash w_hash; /* The witness hash table. */ 466 467/* The lock order data hash */ 468static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 469static struct witness_lock_order_data *w_lofree = NULL; 470static struct witness_lock_order_hash w_lohash; 471static int w_max_used_index = 0; 472static unsigned int w_generation = 0; 473static const char w_notrunning[] = "Witness not running\n"; 474static const char w_stillcold[] = "Witness is still cold\n"; 475 476 477static struct witness_order_list_entry order_lists[] = { 478 /* 479 * sx locks 480 */ 481 { "proctree", &lock_class_sx }, 482 { "allproc", &lock_class_sx }, 483 { "allprison", &lock_class_sx }, 484 { NULL, NULL }, 485 /* 486 * Various mutexes 487 */ 488 { "Giant", &lock_class_mtx_sleep }, 489 { "pipe mutex", &lock_class_mtx_sleep }, 490 { "sigio lock", &lock_class_mtx_sleep }, 491 { "process group", &lock_class_mtx_sleep }, 492 { "process lock", &lock_class_mtx_sleep }, 493 { "session", &lock_class_mtx_sleep }, 494 { "uidinfo hash", &lock_class_rw }, 495#ifdef HWPMC_HOOKS 496 { "pmc-sleep", &lock_class_mtx_sleep }, 497#endif 498 { "time lock", &lock_class_mtx_sleep }, 499 { NULL, NULL }, 500 /* 501 * Sockets 502 */ 503 { "accept", &lock_class_mtx_sleep }, 504 { "so_snd", &lock_class_mtx_sleep }, 505 { "so_rcv", &lock_class_mtx_sleep }, 506 { "sellck", &lock_class_mtx_sleep }, 507 { NULL, NULL }, 508 /* 509 * Routing 510 */ 511 { "so_rcv", &lock_class_mtx_sleep }, 512 { "radix node head", &lock_class_rw }, 513 { "rtentry", &lock_class_mtx_sleep }, 514 { "ifaddr", &lock_class_mtx_sleep }, 515 { NULL, NULL }, 516 /* 517 * IPv4 multicast: 518 * protocol locks before interface locks, after UDP locks. 519 */ 520 { "udpinp", &lock_class_rw }, 521 { "in_multi_mtx", &lock_class_mtx_sleep }, 522 { "igmp_mtx", &lock_class_mtx_sleep }, 523 { "if_addr_lock", &lock_class_rw }, 524 { NULL, NULL }, 525 /* 526 * IPv6 multicast: 527 * protocol locks before interface locks, after UDP locks. 528 */ 529 { "udpinp", &lock_class_rw }, 530 { "in6_multi_mtx", &lock_class_mtx_sleep }, 531 { "mld_mtx", &lock_class_mtx_sleep }, 532 { "if_addr_lock", &lock_class_rw }, 533 { NULL, NULL }, 534 /* 535 * UNIX Domain Sockets 536 */ 537 { "unp_global_rwlock", &lock_class_rw }, 538 { "unp_list_lock", &lock_class_mtx_sleep }, 539 { "unp", &lock_class_mtx_sleep }, 540 { "so_snd", &lock_class_mtx_sleep }, 541 { NULL, NULL }, 542 /* 543 * UDP/IP 544 */ 545 { "udp", &lock_class_rw }, 546 { "udpinp", &lock_class_rw }, 547 { "so_snd", &lock_class_mtx_sleep }, 548 { NULL, NULL }, 549 /* 550 * TCP/IP 551 */ 552 { "tcp", &lock_class_rw }, 553 { "tcpinp", &lock_class_rw }, 554 { "so_snd", &lock_class_mtx_sleep }, 555 { NULL, NULL }, 556 /* 557 * netatalk 558 */ 559 { "ddp_list_mtx", &lock_class_mtx_sleep }, 560 { "ddp_mtx", &lock_class_mtx_sleep }, 561 { NULL, NULL }, 562 /* 563 * BPF 564 */ 565 { "bpf global lock", &lock_class_mtx_sleep }, 566 { "bpf interface lock", &lock_class_rw }, 567 { "bpf cdev lock", &lock_class_mtx_sleep }, 568 { NULL, NULL }, 569 /* 570 * NFS server 571 */ 572 { "nfsd_mtx", &lock_class_mtx_sleep }, 573 { "so_snd", &lock_class_mtx_sleep }, 574 { NULL, NULL }, 575 576 /* 577 * IEEE 802.11 578 */ 579 { "802.11 com lock", &lock_class_mtx_sleep}, 580 { NULL, NULL }, 581 /* 582 * Network drivers 583 */ 584 { "network driver", &lock_class_mtx_sleep}, 585 { NULL, NULL }, 586 587 /* 588 * Netgraph 589 */ 590 { "ng_node", &lock_class_mtx_sleep }, 591 { "ng_worklist", &lock_class_mtx_sleep }, 592 { NULL, NULL }, 593 /* 594 * CDEV 595 */ 596 { "vm map (system)", &lock_class_mtx_sleep }, 597 { "vm page queue", &lock_class_mtx_sleep }, 598 { "vnode interlock", &lock_class_mtx_sleep }, 599 { "cdev", &lock_class_mtx_sleep }, 600 { NULL, NULL }, 601 /* 602 * VM 603 */ 604 { "vm map (user)", &lock_class_sx }, 605 { "vm object", &lock_class_mtx_sleep }, 606 { "vm page", &lock_class_mtx_sleep }, 607 { "vm page queue", &lock_class_mtx_sleep }, 608 { "pmap pv global", &lock_class_rw }, 609 { "pmap", &lock_class_mtx_sleep }, 610 { "pmap pv list", &lock_class_rw }, 611 { "vm page free queue", &lock_class_mtx_sleep }, 612 { NULL, NULL }, 613 /* 614 * kqueue/VFS interaction 615 */ 616 { "kqueue", &lock_class_mtx_sleep }, 617 { "struct mount mtx", &lock_class_mtx_sleep }, 618 { "vnode interlock", &lock_class_mtx_sleep }, 619 { NULL, NULL }, 620 /* 621 * ZFS locking 622 */ 623 { "dn->dn_mtx", &lock_class_sx }, 624 { "dr->dt.di.dr_mtx", &lock_class_sx }, 625 { "db->db_mtx", &lock_class_sx }, 626 { NULL, NULL }, 627 /* 628 * spin locks 629 */ 630#ifdef SMP 631 { "ap boot", &lock_class_mtx_spin }, 632#endif 633 { "rm.mutex_mtx", &lock_class_mtx_spin }, 634 { "sio", &lock_class_mtx_spin }, 635 { "scrlock", &lock_class_mtx_spin }, 636#ifdef __i386__ 637 { "cy", &lock_class_mtx_spin }, 638#endif 639#ifdef __sparc64__ 640 { "pcib_mtx", &lock_class_mtx_spin }, 641 { "rtc_mtx", &lock_class_mtx_spin }, 642#endif 643 { "scc_hwmtx", &lock_class_mtx_spin }, 644 { "uart_hwmtx", &lock_class_mtx_spin }, 645 { "fast_taskqueue", &lock_class_mtx_spin }, 646 { "intr table", &lock_class_mtx_spin }, 647#ifdef HWPMC_HOOKS 648 { "pmc-per-proc", &lock_class_mtx_spin }, 649#endif 650 { "process slock", &lock_class_mtx_spin }, 651 { "sleepq chain", &lock_class_mtx_spin }, 652 { "umtx lock", &lock_class_mtx_spin }, 653 { "rm_spinlock", &lock_class_mtx_spin }, 654 { "turnstile chain", &lock_class_mtx_spin }, 655 { "turnstile lock", &lock_class_mtx_spin }, 656 { "sched lock", &lock_class_mtx_spin }, 657 { "td_contested", &lock_class_mtx_spin }, 658 { "callout", &lock_class_mtx_spin }, 659 { "entropy harvest mutex", &lock_class_mtx_spin }, 660 { "syscons video lock", &lock_class_mtx_spin }, 661#ifdef SMP 662 { "smp rendezvous", &lock_class_mtx_spin }, 663#endif 664#ifdef __powerpc__ 665 { "tlb0", &lock_class_mtx_spin }, 666#endif 667 /* 668 * leaf locks 669 */ 670 { "intrcnt", &lock_class_mtx_spin }, 671 { "icu", &lock_class_mtx_spin }, 672#if defined(SMP) && defined(__sparc64__) 673 { "ipi", &lock_class_mtx_spin }, 674#endif 675#ifdef __i386__ 676 { "allpmaps", &lock_class_mtx_spin }, 677 { "descriptor tables", &lock_class_mtx_spin }, 678#endif 679 { "clk", &lock_class_mtx_spin }, 680 { "cpuset", &lock_class_mtx_spin }, 681 { "mprof lock", &lock_class_mtx_spin }, 682 { "zombie lock", &lock_class_mtx_spin }, 683 { "ALD Queue", &lock_class_mtx_spin }, 684#ifdef __ia64__ 685 { "MCA spin lock", &lock_class_mtx_spin }, 686#endif 687#if defined(__i386__) || defined(__amd64__) 688 { "pcicfg", &lock_class_mtx_spin }, 689 { "NDIS thread lock", &lock_class_mtx_spin }, 690#endif 691 { "tw_osl_io_lock", &lock_class_mtx_spin }, 692 { "tw_osl_q_lock", &lock_class_mtx_spin }, 693 { "tw_cl_io_lock", &lock_class_mtx_spin }, 694 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 695 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 696#ifdef HWPMC_HOOKS 697 { "pmc-leaf", &lock_class_mtx_spin }, 698#endif 699 { "blocked lock", &lock_class_mtx_spin }, 700 { NULL, NULL }, 701 { NULL, NULL } 702}; 703 704#ifdef BLESSING 705/* 706 * Pairs of locks which have been blessed 707 * Don't complain about order problems with blessed locks 708 */ 709static struct witness_blessed blessed_list[] = { 710}; 711static int blessed_count = 712 sizeof(blessed_list) / sizeof(struct witness_blessed); 713#endif 714 715/* 716 * This global is set to 0 once it becomes safe to use the witness code. 717 */ 718static int witness_cold = 1; 719 720/* 721 * This global is set to 1 once the static lock orders have been enrolled 722 * so that a warning can be issued for any spin locks enrolled later. 723 */ 724static int witness_spin_warn = 0; 725 726/* Trim useless garbage from filenames. */ 727static const char * 728fixup_filename(const char *file) 729{ 730 731 if (file == NULL) 732 return (NULL); 733 while (strncmp(file, "../", 3) == 0) 734 file += 3; 735 return (file); 736} 737 738/* 739 * The WITNESS-enabled diagnostic code. Note that the witness code does 740 * assume that the early boot is single-threaded at least until after this 741 * routine is completed. 742 */ 743static void 744witness_initialize(void *dummy __unused) 745{ 746 struct lock_object *lock; 747 struct witness_order_list_entry *order; 748 struct witness *w, *w1; 749 int i; 750 751 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 752 M_NOWAIT | M_ZERO); 753 754 /* 755 * We have to release Giant before initializing its witness 756 * structure so that WITNESS doesn't get confused. 757 */ 758 mtx_unlock(&Giant); 759 mtx_assert(&Giant, MA_NOTOWNED); 760 761 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 762 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 763 MTX_NOWITNESS | MTX_NOPROFILE); 764 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 765 w = &w_data[i]; 766 memset(w, 0, sizeof(*w)); 767 w_data[i].w_index = i; /* Witness index never changes. */ 768 witness_free(w); 769 } 770 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 771 ("%s: Invalid list of free witness objects", __func__)); 772 773 /* Witness with index 0 is not used to aid in debugging. */ 774 STAILQ_REMOVE_HEAD(&w_free, w_list); 775 w_free_cnt--; 776 777 memset(w_rmatrix, 0, 778 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 779 780 for (i = 0; i < LOCK_CHILDCOUNT; i++) 781 witness_lock_list_free(&w_locklistdata[i]); 782 witness_init_hash_tables(); 783 784 /* First add in all the specified order lists. */ 785 for (order = order_lists; order->w_name != NULL; order++) { 786 w = enroll(order->w_name, order->w_class); 787 if (w == NULL) 788 continue; 789 w->w_file = "order list"; 790 for (order++; order->w_name != NULL; order++) { 791 w1 = enroll(order->w_name, order->w_class); 792 if (w1 == NULL) 793 continue; 794 w1->w_file = "order list"; 795 itismychild(w, w1); 796 w = w1; 797 } 798 } 799 witness_spin_warn = 1; 800 801 /* Iterate through all locks and add them to witness. */ 802 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 803 lock = pending_locks[i].wh_lock; 804 KASSERT(lock->lo_flags & LO_WITNESS, 805 ("%s: lock %s is on pending list but not LO_WITNESS", 806 __func__, lock->lo_name)); 807 lock->lo_witness = enroll(pending_locks[i].wh_type, 808 LOCK_CLASS(lock)); 809 } 810 811 /* Mark the witness code as being ready for use. */ 812 witness_cold = 0; 813 814 mtx_lock(&Giant); 815} 816SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 817 NULL); 818 819void 820witness_init(struct lock_object *lock, const char *type) 821{ 822 struct lock_class *class; 823 824 /* Various sanity checks. */ 825 class = LOCK_CLASS(lock); 826 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 827 (class->lc_flags & LC_RECURSABLE) == 0) 828 panic("%s: lock (%s) %s can not be recursable", __func__, 829 class->lc_name, lock->lo_name); 830 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 831 (class->lc_flags & LC_SLEEPABLE) == 0) 832 panic("%s: lock (%s) %s can not be sleepable", __func__, 833 class->lc_name, lock->lo_name); 834 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 835 (class->lc_flags & LC_UPGRADABLE) == 0) 836 panic("%s: lock (%s) %s can not be upgradable", __func__, 837 class->lc_name, lock->lo_name); 838 839 /* 840 * If we shouldn't watch this lock, then just clear lo_witness. 841 * Otherwise, if witness_cold is set, then it is too early to 842 * enroll this lock, so defer it to witness_initialize() by adding 843 * it to the pending_locks list. If it is not too early, then enroll 844 * the lock now. 845 */ 846 if (witness_watch < 1 || panicstr != NULL || 847 (lock->lo_flags & LO_WITNESS) == 0) 848 lock->lo_witness = NULL; 849 else if (witness_cold) { 850 pending_locks[pending_cnt].wh_lock = lock; 851 pending_locks[pending_cnt++].wh_type = type; 852 if (pending_cnt > WITNESS_PENDLIST) 853 panic("%s: pending locks list is too small, bump it\n", 854 __func__); 855 } else 856 lock->lo_witness = enroll(type, class); 857} 858 859void 860witness_destroy(struct lock_object *lock) 861{ 862 struct lock_class *class; 863 struct witness *w; 864 865 class = LOCK_CLASS(lock); 866 867 if (witness_cold) 868 panic("lock (%s) %s destroyed while witness_cold", 869 class->lc_name, lock->lo_name); 870 871 /* XXX: need to verify that no one holds the lock */ 872 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 873 return; 874 w = lock->lo_witness; 875 876 mtx_lock_spin(&w_mtx); 877 MPASS(w->w_refcount > 0); 878 w->w_refcount--; 879 880 if (w->w_refcount == 0) 881 depart(w); 882 mtx_unlock_spin(&w_mtx); 883} 884 885#ifdef DDB 886static void 887witness_ddb_compute_levels(void) 888{ 889 struct witness *w; 890 891 /* 892 * First clear all levels. 893 */ 894 STAILQ_FOREACH(w, &w_all, w_list) 895 w->w_ddb_level = -1; 896 897 /* 898 * Look for locks with no parents and level all their descendants. 899 */ 900 STAILQ_FOREACH(w, &w_all, w_list) { 901 902 /* If the witness has ancestors (is not a root), skip it. */ 903 if (w->w_num_ancestors > 0) 904 continue; 905 witness_ddb_level_descendants(w, 0); 906 } 907} 908 909static void 910witness_ddb_level_descendants(struct witness *w, int l) 911{ 912 int i; 913 914 if (w->w_ddb_level >= l) 915 return; 916 917 w->w_ddb_level = l; 918 l++; 919 920 for (i = 1; i <= w_max_used_index; i++) { 921 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 922 witness_ddb_level_descendants(&w_data[i], l); 923 } 924} 925 926static void 927witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...), 928 struct witness *w, int indent) 929{ 930 int i; 931 932 for (i = 0; i < indent; i++) 933 prnt(" "); 934 prnt("%s (type: %s, depth: %d, active refs: %d)", 935 w->w_name, w->w_class->lc_name, 936 w->w_ddb_level, w->w_refcount); 937 if (w->w_displayed) { 938 prnt(" -- (already displayed)\n"); 939 return; 940 } 941 w->w_displayed = 1; 942 if (w->w_file != NULL && w->w_line != 0) 943 prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file), 944 w->w_line); 945 else 946 prnt(" -- never acquired\n"); 947 indent++; 948 WITNESS_INDEX_ASSERT(w->w_index); 949 for (i = 1; i <= w_max_used_index; i++) { 950 if (db_pager_quit) 951 return; 952 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 953 witness_ddb_display_descendants(prnt, &w_data[i], 954 indent); 955 } 956} 957 958static void 959witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 960 struct witness_list *list) 961{ 962 struct witness *w; 963 964 STAILQ_FOREACH(w, list, w_typelist) { 965 if (w->w_file == NULL || w->w_ddb_level > 0) 966 continue; 967 968 /* This lock has no anscestors - display its descendants. */ 969 witness_ddb_display_descendants(prnt, w, 0); 970 if (db_pager_quit) 971 return; 972 } 973} 974 975static void 976witness_ddb_display(int(*prnt)(const char *fmt, ...)) 977{ 978 struct witness *w; 979 980 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 981 witness_ddb_compute_levels(); 982 983 /* Clear all the displayed flags. */ 984 STAILQ_FOREACH(w, &w_all, w_list) 985 w->w_displayed = 0; 986 987 /* 988 * First, handle sleep locks which have been acquired at least 989 * once. 990 */ 991 prnt("Sleep locks:\n"); 992 witness_ddb_display_list(prnt, &w_sleep); 993 if (db_pager_quit) 994 return; 995 996 /* 997 * Now do spin locks which have been acquired at least once. 998 */ 999 prnt("\nSpin locks:\n"); 1000 witness_ddb_display_list(prnt, &w_spin); 1001 if (db_pager_quit) 1002 return; 1003 1004 /* 1005 * Finally, any locks which have not been acquired yet. 1006 */ 1007 prnt("\nLocks which were never acquired:\n"); 1008 STAILQ_FOREACH(w, &w_all, w_list) { 1009 if (w->w_file != NULL || w->w_refcount == 0) 1010 continue; 1011 prnt("%s (type: %s, depth: %d)\n", w->w_name, 1012 w->w_class->lc_name, w->w_ddb_level); 1013 if (db_pager_quit) 1014 return; 1015 } 1016} 1017#endif /* DDB */ 1018 1019int 1020witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 1021{ 1022 1023 if (witness_watch == -1 || panicstr != NULL) 1024 return (0); 1025 1026 /* Require locks that witness knows about. */ 1027 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 1028 lock2->lo_witness == NULL) 1029 return (EINVAL); 1030 1031 mtx_assert(&w_mtx, MA_NOTOWNED); 1032 mtx_lock_spin(&w_mtx); 1033 1034 /* 1035 * If we already have either an explicit or implied lock order that 1036 * is the other way around, then return an error. 1037 */ 1038 if (witness_watch && 1039 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1040 mtx_unlock_spin(&w_mtx); 1041 return (EDOOFUS); 1042 } 1043 1044 /* Try to add the new order. */ 1045 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1046 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1047 itismychild(lock1->lo_witness, lock2->lo_witness); 1048 mtx_unlock_spin(&w_mtx); 1049 return (0); 1050} 1051 1052void 1053witness_checkorder(struct lock_object *lock, int flags, const char *file, 1054 int line, struct lock_object *interlock) 1055{ 1056 struct lock_list_entry *lock_list, *lle; 1057 struct lock_instance *lock1, *lock2, *plock; 1058 struct lock_class *class; 1059 struct witness *w, *w1; 1060 struct thread *td; 1061 int i, j; 1062 1063 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1064 panicstr != NULL) 1065 return; 1066 1067 w = lock->lo_witness; 1068 class = LOCK_CLASS(lock); 1069 td = curthread; 1070 1071 if (class->lc_flags & LC_SLEEPLOCK) { 1072 1073 /* 1074 * Since spin locks include a critical section, this check 1075 * implicitly enforces a lock order of all sleep locks before 1076 * all spin locks. 1077 */ 1078 if (td->td_critnest != 0 && !kdb_active) 1079 panic("blockable sleep lock (%s) %s @ %s:%d", 1080 class->lc_name, lock->lo_name, 1081 fixup_filename(file), line); 1082 1083 /* 1084 * If this is the first lock acquired then just return as 1085 * no order checking is needed. 1086 */ 1087 lock_list = td->td_sleeplocks; 1088 if (lock_list == NULL || lock_list->ll_count == 0) 1089 return; 1090 } else { 1091 1092 /* 1093 * If this is the first lock, just return as no order 1094 * checking is needed. Avoid problems with thread 1095 * migration pinning the thread while checking if 1096 * spinlocks are held. If at least one spinlock is held 1097 * the thread is in a safe path and it is allowed to 1098 * unpin it. 1099 */ 1100 sched_pin(); 1101 lock_list = PCPU_GET(spinlocks); 1102 if (lock_list == NULL || lock_list->ll_count == 0) { 1103 sched_unpin(); 1104 return; 1105 } 1106 sched_unpin(); 1107 } 1108 1109 /* 1110 * Check to see if we are recursing on a lock we already own. If 1111 * so, make sure that we don't mismatch exclusive and shared lock 1112 * acquires. 1113 */ 1114 lock1 = find_instance(lock_list, lock); 1115 if (lock1 != NULL) { 1116 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1117 (flags & LOP_EXCLUSIVE) == 0) { 1118 printf("shared lock of (%s) %s @ %s:%d\n", 1119 class->lc_name, lock->lo_name, 1120 fixup_filename(file), line); 1121 printf("while exclusively locked from %s:%d\n", 1122 fixup_filename(lock1->li_file), lock1->li_line); 1123 panic("share->excl"); 1124 } 1125 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1126 (flags & LOP_EXCLUSIVE) != 0) { 1127 printf("exclusive lock of (%s) %s @ %s:%d\n", 1128 class->lc_name, lock->lo_name, 1129 fixup_filename(file), line); 1130 printf("while share locked from %s:%d\n", 1131 fixup_filename(lock1->li_file), lock1->li_line); 1132 panic("excl->share"); 1133 } 1134 return; 1135 } 1136 1137 /* 1138 * Find the previously acquired lock, but ignore interlocks. 1139 */ 1140 plock = &lock_list->ll_children[lock_list->ll_count - 1]; 1141 if (interlock != NULL && plock->li_lock == interlock) { 1142 if (lock_list->ll_count > 1) 1143 plock = 1144 &lock_list->ll_children[lock_list->ll_count - 2]; 1145 else { 1146 lle = lock_list->ll_next; 1147 1148 /* 1149 * The interlock is the only lock we hold, so 1150 * simply return. 1151 */ 1152 if (lle == NULL) 1153 return; 1154 plock = &lle->ll_children[lle->ll_count - 1]; 1155 } 1156 } 1157 1158 /* 1159 * Try to perform most checks without a lock. If this succeeds we 1160 * can skip acquiring the lock and return success. 1161 */ 1162 w1 = plock->li_lock->lo_witness; 1163 if (witness_lock_order_check(w1, w)) 1164 return; 1165 1166 /* 1167 * Check for duplicate locks of the same type. Note that we only 1168 * have to check for this on the last lock we just acquired. Any 1169 * other cases will be caught as lock order violations. 1170 */ 1171 mtx_lock_spin(&w_mtx); 1172 witness_lock_order_add(w1, w); 1173 if (w1 == w) { 1174 i = w->w_index; 1175 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1176 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1177 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1178 w->w_reversed = 1; 1179 mtx_unlock_spin(&w_mtx); 1180 printf( 1181 "acquiring duplicate lock of same type: \"%s\"\n", 1182 w->w_name); 1183 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name, 1184 fixup_filename(plock->li_file), plock->li_line); 1185 printf(" 2nd %s @ %s:%d\n", lock->lo_name, 1186 fixup_filename(file), line); 1187 witness_debugger(1); 1188 } else 1189 mtx_unlock_spin(&w_mtx); 1190 return; 1191 } 1192 mtx_assert(&w_mtx, MA_OWNED); 1193 1194 /* 1195 * If we know that the lock we are acquiring comes after 1196 * the lock we most recently acquired in the lock order tree, 1197 * then there is no need for any further checks. 1198 */ 1199 if (isitmychild(w1, w)) 1200 goto out; 1201 1202 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) { 1203 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1204 1205 MPASS(j < WITNESS_COUNT); 1206 lock1 = &lle->ll_children[i]; 1207 1208 /* 1209 * Ignore the interlock the first time we see it. 1210 */ 1211 if (interlock != NULL && interlock == lock1->li_lock) { 1212 interlock = NULL; 1213 continue; 1214 } 1215 1216 /* 1217 * If this lock doesn't undergo witness checking, 1218 * then skip it. 1219 */ 1220 w1 = lock1->li_lock->lo_witness; 1221 if (w1 == NULL) { 1222 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1223 ("lock missing witness structure")); 1224 continue; 1225 } 1226 1227 /* 1228 * If we are locking Giant and this is a sleepable 1229 * lock, then skip it. 1230 */ 1231 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1232 lock == &Giant.lock_object) 1233 continue; 1234 1235 /* 1236 * If we are locking a sleepable lock and this lock 1237 * is Giant, then skip it. 1238 */ 1239 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1240 lock1->li_lock == &Giant.lock_object) 1241 continue; 1242 1243 /* 1244 * If we are locking a sleepable lock and this lock 1245 * isn't sleepable, we want to treat it as a lock 1246 * order violation to enfore a general lock order of 1247 * sleepable locks before non-sleepable locks. 1248 */ 1249 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1250 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1251 goto reversal; 1252 1253 /* 1254 * If we are locking Giant and this is a non-sleepable 1255 * lock, then treat it as a reversal. 1256 */ 1257 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1258 lock == &Giant.lock_object) 1259 goto reversal; 1260 1261 /* 1262 * Check the lock order hierarchy for a reveresal. 1263 */ 1264 if (!isitmydescendant(w, w1)) 1265 continue; 1266 reversal: 1267 1268 /* 1269 * We have a lock order violation, check to see if it 1270 * is allowed or has already been yelled about. 1271 */ 1272#ifdef BLESSING 1273 1274 /* 1275 * If the lock order is blessed, just bail. We don't 1276 * look for other lock order violations though, which 1277 * may be a bug. 1278 */ 1279 if (blessed(w, w1)) 1280 goto out; 1281#endif 1282 1283 /* Bail if this violation is known */ 1284 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1285 goto out; 1286 1287 /* Record this as a violation */ 1288 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1289 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1290 w->w_reversed = w1->w_reversed = 1; 1291 witness_increment_graph_generation(); 1292 mtx_unlock_spin(&w_mtx); 1293 1294 /* 1295 * Ok, yell about it. 1296 */ 1297 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1298 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1299 printf( 1300 "lock order reversal: (sleepable after non-sleepable)\n"); 1301 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1302 && lock == &Giant.lock_object) 1303 printf( 1304 "lock order reversal: (Giant after non-sleepable)\n"); 1305 else 1306 printf("lock order reversal:\n"); 1307 1308 /* 1309 * Try to locate an earlier lock with 1310 * witness w in our list. 1311 */ 1312 do { 1313 lock2 = &lle->ll_children[i]; 1314 MPASS(lock2->li_lock != NULL); 1315 if (lock2->li_lock->lo_witness == w) 1316 break; 1317 if (i == 0 && lle->ll_next != NULL) { 1318 lle = lle->ll_next; 1319 i = lle->ll_count - 1; 1320 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1321 } else 1322 i--; 1323 } while (i >= 0); 1324 if (i < 0) { 1325 printf(" 1st %p %s (%s) @ %s:%d\n", 1326 lock1->li_lock, lock1->li_lock->lo_name, 1327 w1->w_name, fixup_filename(lock1->li_file), 1328 lock1->li_line); 1329 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1330 lock->lo_name, w->w_name, 1331 fixup_filename(file), line); 1332 } else { 1333 printf(" 1st %p %s (%s) @ %s:%d\n", 1334 lock2->li_lock, lock2->li_lock->lo_name, 1335 lock2->li_lock->lo_witness->w_name, 1336 fixup_filename(lock2->li_file), 1337 lock2->li_line); 1338 printf(" 2nd %p %s (%s) @ %s:%d\n", 1339 lock1->li_lock, lock1->li_lock->lo_name, 1340 w1->w_name, fixup_filename(lock1->li_file), 1341 lock1->li_line); 1342 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1343 lock->lo_name, w->w_name, 1344 fixup_filename(file), line); 1345 } 1346 witness_debugger(1); 1347 return; 1348 } 1349 } 1350 1351 /* 1352 * If requested, build a new lock order. However, don't build a new 1353 * relationship between a sleepable lock and Giant if it is in the 1354 * wrong direction. The correct lock order is that sleepable locks 1355 * always come before Giant. 1356 */ 1357 if (flags & LOP_NEWORDER && 1358 !(plock->li_lock == &Giant.lock_object && 1359 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1360 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1361 w->w_name, plock->li_lock->lo_witness->w_name); 1362 itismychild(plock->li_lock->lo_witness, w); 1363 } 1364out: 1365 mtx_unlock_spin(&w_mtx); 1366} 1367 1368void 1369witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1370{ 1371 struct lock_list_entry **lock_list, *lle; 1372 struct lock_instance *instance; 1373 struct witness *w; 1374 struct thread *td; 1375 1376 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1377 panicstr != NULL) 1378 return; 1379 w = lock->lo_witness; 1380 td = curthread; 1381 1382 /* Determine lock list for this lock. */ 1383 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1384 lock_list = &td->td_sleeplocks; 1385 else 1386 lock_list = PCPU_PTR(spinlocks); 1387 1388 /* Check to see if we are recursing on a lock we already own. */ 1389 instance = find_instance(*lock_list, lock); 1390 if (instance != NULL) { 1391 instance->li_flags++; 1392 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1393 td->td_proc->p_pid, lock->lo_name, 1394 instance->li_flags & LI_RECURSEMASK); 1395 instance->li_file = file; 1396 instance->li_line = line; 1397 return; 1398 } 1399 1400 /* Update per-witness last file and line acquire. */ 1401 w->w_file = file; 1402 w->w_line = line; 1403 1404 /* Find the next open lock instance in the list and fill it. */ 1405 lle = *lock_list; 1406 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1407 lle = witness_lock_list_get(); 1408 if (lle == NULL) 1409 return; 1410 lle->ll_next = *lock_list; 1411 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1412 td->td_proc->p_pid, lle); 1413 *lock_list = lle; 1414 } 1415 instance = &lle->ll_children[lle->ll_count++]; 1416 instance->li_lock = lock; 1417 instance->li_line = line; 1418 instance->li_file = file; 1419 if ((flags & LOP_EXCLUSIVE) != 0) 1420 instance->li_flags = LI_EXCLUSIVE; 1421 else 1422 instance->li_flags = 0; 1423 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1424 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1425} 1426 1427void 1428witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1429{ 1430 struct lock_instance *instance; 1431 struct lock_class *class; 1432 1433 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1434 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1435 return; 1436 class = LOCK_CLASS(lock); 1437 if (witness_watch) { 1438 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1439 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1440 class->lc_name, lock->lo_name, 1441 fixup_filename(file), line); 1442 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1443 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1444 class->lc_name, lock->lo_name, 1445 fixup_filename(file), line); 1446 } 1447 instance = find_instance(curthread->td_sleeplocks, lock); 1448 if (instance == NULL) 1449 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1450 class->lc_name, lock->lo_name, 1451 fixup_filename(file), line); 1452 if (witness_watch) { 1453 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1454 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1455 class->lc_name, lock->lo_name, 1456 fixup_filename(file), line); 1457 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1458 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1459 class->lc_name, lock->lo_name, 1460 instance->li_flags & LI_RECURSEMASK, 1461 fixup_filename(file), line); 1462 } 1463 instance->li_flags |= LI_EXCLUSIVE; 1464} 1465 1466void 1467witness_downgrade(struct lock_object *lock, int flags, const char *file, 1468 int line) 1469{ 1470 struct lock_instance *instance; 1471 struct lock_class *class; 1472 1473 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1474 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1475 return; 1476 class = LOCK_CLASS(lock); 1477 if (witness_watch) { 1478 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1479 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1480 class->lc_name, lock->lo_name, 1481 fixup_filename(file), line); 1482 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1483 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1484 class->lc_name, lock->lo_name, 1485 fixup_filename(file), line); 1486 } 1487 instance = find_instance(curthread->td_sleeplocks, lock); 1488 if (instance == NULL) 1489 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1490 class->lc_name, lock->lo_name, 1491 fixup_filename(file), line); 1492 if (witness_watch) { 1493 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1494 panic("downgrade of shared lock (%s) %s @ %s:%d", 1495 class->lc_name, lock->lo_name, 1496 fixup_filename(file), line); 1497 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1498 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1499 class->lc_name, lock->lo_name, 1500 instance->li_flags & LI_RECURSEMASK, 1501 fixup_filename(file), line); 1502 } 1503 instance->li_flags &= ~LI_EXCLUSIVE; 1504} 1505 1506void 1507witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1508{ 1509 struct lock_list_entry **lock_list, *lle; 1510 struct lock_instance *instance; 1511 struct lock_class *class; 1512 struct thread *td; 1513 register_t s; 1514 int i, j; 1515 1516 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1517 return; 1518 td = curthread; 1519 class = LOCK_CLASS(lock); 1520 1521 /* Find lock instance associated with this lock. */ 1522 if (class->lc_flags & LC_SLEEPLOCK) 1523 lock_list = &td->td_sleeplocks; 1524 else 1525 lock_list = PCPU_PTR(spinlocks); 1526 lle = *lock_list; 1527 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1528 for (i = 0; i < (*lock_list)->ll_count; i++) { 1529 instance = &(*lock_list)->ll_children[i]; 1530 if (instance->li_lock == lock) 1531 goto found; 1532 } 1533 1534 /* 1535 * When disabling WITNESS through witness_watch we could end up in 1536 * having registered locks in the td_sleeplocks queue. 1537 * We have to make sure we flush these queues, so just search for 1538 * eventual register locks and remove them. 1539 */ 1540 if (witness_watch > 0) 1541 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1542 lock->lo_name, fixup_filename(file), line); 1543 else 1544 return; 1545found: 1546 1547 /* First, check for shared/exclusive mismatches. */ 1548 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1549 (flags & LOP_EXCLUSIVE) == 0) { 1550 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1551 lock->lo_name, fixup_filename(file), line); 1552 printf("while exclusively locked from %s:%d\n", 1553 fixup_filename(instance->li_file), instance->li_line); 1554 panic("excl->ushare"); 1555 } 1556 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1557 (flags & LOP_EXCLUSIVE) != 0) { 1558 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1559 lock->lo_name, fixup_filename(file), line); 1560 printf("while share locked from %s:%d\n", 1561 fixup_filename(instance->li_file), 1562 instance->li_line); 1563 panic("share->uexcl"); 1564 } 1565 /* If we are recursed, unrecurse. */ 1566 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1567 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1568 td->td_proc->p_pid, instance->li_lock->lo_name, 1569 instance->li_flags); 1570 instance->li_flags--; 1571 return; 1572 } 1573 /* The lock is now being dropped, check for NORELEASE flag */ 1574 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) { 1575 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name, 1576 lock->lo_name, fixup_filename(file), line); 1577 panic("lock marked norelease"); 1578 } 1579 1580 /* Otherwise, remove this item from the list. */ 1581 s = intr_disable(); 1582 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1583 td->td_proc->p_pid, instance->li_lock->lo_name, 1584 (*lock_list)->ll_count - 1); 1585 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1586 (*lock_list)->ll_children[j] = 1587 (*lock_list)->ll_children[j + 1]; 1588 (*lock_list)->ll_count--; 1589 intr_restore(s); 1590 1591 /* 1592 * In order to reduce contention on w_mtx, we want to keep always an 1593 * head object into lists so that frequent allocation from the 1594 * free witness pool (and subsequent locking) is avoided. 1595 * In order to maintain the current code simple, when the head 1596 * object is totally unloaded it means also that we do not have 1597 * further objects in the list, so the list ownership needs to be 1598 * hand over to another object if the current head needs to be freed. 1599 */ 1600 if ((*lock_list)->ll_count == 0) { 1601 if (*lock_list == lle) { 1602 if (lle->ll_next == NULL) 1603 return; 1604 } else 1605 lle = *lock_list; 1606 *lock_list = lle->ll_next; 1607 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1608 td->td_proc->p_pid, lle); 1609 witness_lock_list_free(lle); 1610 } 1611} 1612 1613void 1614witness_thread_exit(struct thread *td) 1615{ 1616 struct lock_list_entry *lle; 1617 int i, n; 1618 1619 lle = td->td_sleeplocks; 1620 if (lle == NULL || panicstr != NULL) 1621 return; 1622 if (lle->ll_count != 0) { 1623 for (n = 0; lle != NULL; lle = lle->ll_next) 1624 for (i = lle->ll_count - 1; i >= 0; i--) { 1625 if (n == 0) 1626 printf("Thread %p exiting with the following locks held:\n", 1627 td); 1628 n++; 1629 witness_list_lock(&lle->ll_children[i], printf); 1630 1631 } 1632 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1633 } 1634 witness_lock_list_free(lle); 1635} 1636 1637/* 1638 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1639 * exempt Giant and sleepable locks from the checks as well. If any 1640 * non-exempt locks are held, then a supplied message is printed to the 1641 * console along with a list of the offending locks. If indicated in the 1642 * flags then a failure results in a panic as well. 1643 */ 1644int 1645witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1646{ 1647 struct lock_list_entry *lock_list, *lle; 1648 struct lock_instance *lock1; 1649 struct thread *td; 1650 va_list ap; 1651 int i, n; 1652 1653 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1654 return (0); 1655 n = 0; 1656 td = curthread; 1657 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1658 for (i = lle->ll_count - 1; i >= 0; i--) { 1659 lock1 = &lle->ll_children[i]; 1660 if (lock1->li_lock == lock) 1661 continue; 1662 if (flags & WARN_GIANTOK && 1663 lock1->li_lock == &Giant.lock_object) 1664 continue; 1665 if (flags & WARN_SLEEPOK && 1666 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1667 continue; 1668 if (n == 0) { 1669 va_start(ap, fmt); 1670 vprintf(fmt, ap); 1671 va_end(ap); 1672 printf(" with the following"); 1673 if (flags & WARN_SLEEPOK) 1674 printf(" non-sleepable"); 1675 printf(" locks held:\n"); 1676 } 1677 n++; 1678 witness_list_lock(lock1, printf); 1679 } 1680 1681 /* 1682 * Pin the thread in order to avoid problems with thread migration. 1683 * Once that all verifies are passed about spinlocks ownership, 1684 * the thread is in a safe path and it can be unpinned. 1685 */ 1686 sched_pin(); 1687 lock_list = PCPU_GET(spinlocks); 1688 if (lock_list != NULL && lock_list->ll_count != 0) { 1689 sched_unpin(); 1690 1691 /* 1692 * We should only have one spinlock and as long as 1693 * the flags cannot match for this locks class, 1694 * check if the first spinlock is the one curthread 1695 * should hold. 1696 */ 1697 lock1 = &lock_list->ll_children[lock_list->ll_count - 1]; 1698 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL && 1699 lock1->li_lock == lock && n == 0) 1700 return (0); 1701 1702 va_start(ap, fmt); 1703 vprintf(fmt, ap); 1704 va_end(ap); 1705 printf(" with the following"); 1706 if (flags & WARN_SLEEPOK) 1707 printf(" non-sleepable"); 1708 printf(" locks held:\n"); 1709 n += witness_list_locks(&lock_list, printf); 1710 } else 1711 sched_unpin(); 1712 if (flags & WARN_PANIC && n) 1713 panic("%s", __func__); 1714 else 1715 witness_debugger(n); 1716 return (n); 1717} 1718 1719const char * 1720witness_file(struct lock_object *lock) 1721{ 1722 struct witness *w; 1723 1724 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1725 return ("?"); 1726 w = lock->lo_witness; 1727 return (w->w_file); 1728} 1729 1730int 1731witness_line(struct lock_object *lock) 1732{ 1733 struct witness *w; 1734 1735 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1736 return (0); 1737 w = lock->lo_witness; 1738 return (w->w_line); 1739} 1740 1741static struct witness * 1742enroll(const char *description, struct lock_class *lock_class) 1743{ 1744 struct witness *w; 1745 struct witness_list *typelist; 1746 1747 MPASS(description != NULL); 1748 1749 if (witness_watch == -1 || panicstr != NULL) 1750 return (NULL); 1751 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1752 if (witness_skipspin) 1753 return (NULL); 1754 else 1755 typelist = &w_spin; 1756 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1757 typelist = &w_sleep; 1758 else 1759 panic("lock class %s is not sleep or spin", 1760 lock_class->lc_name); 1761 1762 mtx_lock_spin(&w_mtx); 1763 w = witness_hash_get(description); 1764 if (w) 1765 goto found; 1766 if ((w = witness_get()) == NULL) 1767 return (NULL); 1768 MPASS(strlen(description) < MAX_W_NAME); 1769 strcpy(w->w_name, description); 1770 w->w_class = lock_class; 1771 w->w_refcount = 1; 1772 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1773 if (lock_class->lc_flags & LC_SPINLOCK) { 1774 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1775 w_spin_cnt++; 1776 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1777 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1778 w_sleep_cnt++; 1779 } 1780 1781 /* Insert new witness into the hash */ 1782 witness_hash_put(w); 1783 witness_increment_graph_generation(); 1784 mtx_unlock_spin(&w_mtx); 1785 return (w); 1786found: 1787 w->w_refcount++; 1788 mtx_unlock_spin(&w_mtx); 1789 if (lock_class != w->w_class) 1790 panic( 1791 "lock (%s) %s does not match earlier (%s) lock", 1792 description, lock_class->lc_name, 1793 w->w_class->lc_name); 1794 return (w); 1795} 1796 1797static void 1798depart(struct witness *w) 1799{ 1800 struct witness_list *list; 1801 1802 MPASS(w->w_refcount == 0); 1803 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1804 list = &w_sleep; 1805 w_sleep_cnt--; 1806 } else { 1807 list = &w_spin; 1808 w_spin_cnt--; 1809 } 1810 /* 1811 * Set file to NULL as it may point into a loadable module. 1812 */ 1813 w->w_file = NULL; 1814 w->w_line = 0; 1815 witness_increment_graph_generation(); 1816} 1817 1818 1819static void 1820adopt(struct witness *parent, struct witness *child) 1821{ 1822 int pi, ci, i, j; 1823 1824 if (witness_cold == 0) 1825 mtx_assert(&w_mtx, MA_OWNED); 1826 1827 /* If the relationship is already known, there's no work to be done. */ 1828 if (isitmychild(parent, child)) 1829 return; 1830 1831 /* When the structure of the graph changes, bump up the generation. */ 1832 witness_increment_graph_generation(); 1833 1834 /* 1835 * The hard part ... create the direct relationship, then propagate all 1836 * indirect relationships. 1837 */ 1838 pi = parent->w_index; 1839 ci = child->w_index; 1840 WITNESS_INDEX_ASSERT(pi); 1841 WITNESS_INDEX_ASSERT(ci); 1842 MPASS(pi != ci); 1843 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1844 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1845 1846 /* 1847 * If parent was not already an ancestor of child, 1848 * then we increment the descendant and ancestor counters. 1849 */ 1850 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1851 parent->w_num_descendants++; 1852 child->w_num_ancestors++; 1853 } 1854 1855 /* 1856 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1857 * an ancestor of 'pi' during this loop. 1858 */ 1859 for (i = 1; i <= w_max_used_index; i++) { 1860 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1861 (i != pi)) 1862 continue; 1863 1864 /* Find each descendant of 'i' and mark it as a descendant. */ 1865 for (j = 1; j <= w_max_used_index; j++) { 1866 1867 /* 1868 * Skip children that are already marked as 1869 * descendants of 'i'. 1870 */ 1871 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1872 continue; 1873 1874 /* 1875 * We are only interested in descendants of 'ci'. Note 1876 * that 'ci' itself is counted as a descendant of 'ci'. 1877 */ 1878 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1879 (j != ci)) 1880 continue; 1881 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1882 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1883 w_data[i].w_num_descendants++; 1884 w_data[j].w_num_ancestors++; 1885 1886 /* 1887 * Make sure we aren't marking a node as both an 1888 * ancestor and descendant. We should have caught 1889 * this as a lock order reversal earlier. 1890 */ 1891 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1892 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1893 printf("witness rmatrix paradox! [%d][%d]=%d " 1894 "both ancestor and descendant\n", 1895 i, j, w_rmatrix[i][j]); 1896 kdb_backtrace(); 1897 printf("Witness disabled.\n"); 1898 witness_watch = -1; 1899 } 1900 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1901 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1902 printf("witness rmatrix paradox! [%d][%d]=%d " 1903 "both ancestor and descendant\n", 1904 j, i, w_rmatrix[j][i]); 1905 kdb_backtrace(); 1906 printf("Witness disabled.\n"); 1907 witness_watch = -1; 1908 } 1909 } 1910 } 1911} 1912 1913static void 1914itismychild(struct witness *parent, struct witness *child) 1915{ 1916 1917 MPASS(child != NULL && parent != NULL); 1918 if (witness_cold == 0) 1919 mtx_assert(&w_mtx, MA_OWNED); 1920 1921 if (!witness_lock_type_equal(parent, child)) { 1922 if (witness_cold == 0) 1923 mtx_unlock_spin(&w_mtx); 1924 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1925 "the same lock type", __func__, parent->w_name, 1926 parent->w_class->lc_name, child->w_name, 1927 child->w_class->lc_name); 1928 } 1929 adopt(parent, child); 1930} 1931 1932/* 1933 * Generic code for the isitmy*() functions. The rmask parameter is the 1934 * expected relationship of w1 to w2. 1935 */ 1936static int 1937_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1938{ 1939 unsigned char r1, r2; 1940 int i1, i2; 1941 1942 i1 = w1->w_index; 1943 i2 = w2->w_index; 1944 WITNESS_INDEX_ASSERT(i1); 1945 WITNESS_INDEX_ASSERT(i2); 1946 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1947 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1948 1949 /* The flags on one better be the inverse of the flags on the other */ 1950 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1951 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1952 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1953 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1954 "w_rmatrix[%d][%d] == %hhx\n", 1955 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1956 i2, i1, r2); 1957 kdb_backtrace(); 1958 printf("Witness disabled.\n"); 1959 witness_watch = -1; 1960 } 1961 return (r1 & rmask); 1962} 1963 1964/* 1965 * Checks if @child is a direct child of @parent. 1966 */ 1967static int 1968isitmychild(struct witness *parent, struct witness *child) 1969{ 1970 1971 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1972} 1973 1974/* 1975 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1976 */ 1977static int 1978isitmydescendant(struct witness *ancestor, struct witness *descendant) 1979{ 1980 1981 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1982 __func__)); 1983} 1984 1985#ifdef BLESSING 1986static int 1987blessed(struct witness *w1, struct witness *w2) 1988{ 1989 int i; 1990 struct witness_blessed *b; 1991 1992 for (i = 0; i < blessed_count; i++) { 1993 b = &blessed_list[i]; 1994 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1995 if (strcmp(w2->w_name, b->b_lock2) == 0) 1996 return (1); 1997 continue; 1998 } 1999 if (strcmp(w1->w_name, b->b_lock2) == 0) 2000 if (strcmp(w2->w_name, b->b_lock1) == 0) 2001 return (1); 2002 } 2003 return (0); 2004} 2005#endif 2006 2007static struct witness * 2008witness_get(void) 2009{ 2010 struct witness *w; 2011 int index; 2012 2013 if (witness_cold == 0) 2014 mtx_assert(&w_mtx, MA_OWNED); 2015 2016 if (witness_watch == -1) { 2017 mtx_unlock_spin(&w_mtx); 2018 return (NULL); 2019 } 2020 if (STAILQ_EMPTY(&w_free)) { 2021 witness_watch = -1; 2022 mtx_unlock_spin(&w_mtx); 2023 printf("WITNESS: unable to allocate a new witness object\n"); 2024 return (NULL); 2025 } 2026 w = STAILQ_FIRST(&w_free); 2027 STAILQ_REMOVE_HEAD(&w_free, w_list); 2028 w_free_cnt--; 2029 index = w->w_index; 2030 MPASS(index > 0 && index == w_max_used_index+1 && 2031 index < WITNESS_COUNT); 2032 bzero(w, sizeof(*w)); 2033 w->w_index = index; 2034 if (index > w_max_used_index) 2035 w_max_used_index = index; 2036 return (w); 2037} 2038 2039static void 2040witness_free(struct witness *w) 2041{ 2042 2043 STAILQ_INSERT_HEAD(&w_free, w, w_list); 2044 w_free_cnt++; 2045} 2046 2047static struct lock_list_entry * 2048witness_lock_list_get(void) 2049{ 2050 struct lock_list_entry *lle; 2051 2052 if (witness_watch == -1) 2053 return (NULL); 2054 mtx_lock_spin(&w_mtx); 2055 lle = w_lock_list_free; 2056 if (lle == NULL) { 2057 witness_watch = -1; 2058 mtx_unlock_spin(&w_mtx); 2059 printf("%s: witness exhausted\n", __func__); 2060 return (NULL); 2061 } 2062 w_lock_list_free = lle->ll_next; 2063 mtx_unlock_spin(&w_mtx); 2064 bzero(lle, sizeof(*lle)); 2065 return (lle); 2066} 2067 2068static void 2069witness_lock_list_free(struct lock_list_entry *lle) 2070{ 2071 2072 mtx_lock_spin(&w_mtx); 2073 lle->ll_next = w_lock_list_free; 2074 w_lock_list_free = lle; 2075 mtx_unlock_spin(&w_mtx); 2076} 2077 2078static struct lock_instance * 2079find_instance(struct lock_list_entry *list, const struct lock_object *lock) 2080{ 2081 struct lock_list_entry *lle; 2082 struct lock_instance *instance; 2083 int i; 2084 2085 for (lle = list; lle != NULL; lle = lle->ll_next) 2086 for (i = lle->ll_count - 1; i >= 0; i--) { 2087 instance = &lle->ll_children[i]; 2088 if (instance->li_lock == lock) 2089 return (instance); 2090 } 2091 return (NULL); 2092} 2093 2094static void 2095witness_list_lock(struct lock_instance *instance, 2096 int (*prnt)(const char *fmt, ...)) 2097{ 2098 struct lock_object *lock; 2099 2100 lock = instance->li_lock; 2101 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2102 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2103 if (lock->lo_witness->w_name != lock->lo_name) 2104 prnt(" (%s)", lock->lo_witness->w_name); 2105 prnt(" r = %d (%p) locked @ %s:%d\n", 2106 instance->li_flags & LI_RECURSEMASK, lock, 2107 fixup_filename(instance->li_file), instance->li_line); 2108} 2109 2110#ifdef DDB 2111static int 2112witness_thread_has_locks(struct thread *td) 2113{ 2114 2115 if (td->td_sleeplocks == NULL) 2116 return (0); 2117 return (td->td_sleeplocks->ll_count != 0); 2118} 2119 2120static int 2121witness_proc_has_locks(struct proc *p) 2122{ 2123 struct thread *td; 2124 2125 FOREACH_THREAD_IN_PROC(p, td) { 2126 if (witness_thread_has_locks(td)) 2127 return (1); 2128 } 2129 return (0); 2130} 2131#endif 2132 2133int 2134witness_list_locks(struct lock_list_entry **lock_list, 2135 int (*prnt)(const char *fmt, ...)) 2136{ 2137 struct lock_list_entry *lle; 2138 int i, nheld; 2139 2140 nheld = 0; 2141 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2142 for (i = lle->ll_count - 1; i >= 0; i--) { 2143 witness_list_lock(&lle->ll_children[i], prnt); 2144 nheld++; 2145 } 2146 return (nheld); 2147} 2148 2149/* 2150 * This is a bit risky at best. We call this function when we have timed 2151 * out acquiring a spin lock, and we assume that the other CPU is stuck 2152 * with this lock held. So, we go groveling around in the other CPU's 2153 * per-cpu data to try to find the lock instance for this spin lock to 2154 * see when it was last acquired. 2155 */ 2156void 2157witness_display_spinlock(struct lock_object *lock, struct thread *owner, 2158 int (*prnt)(const char *fmt, ...)) 2159{ 2160 struct lock_instance *instance; 2161 struct pcpu *pc; 2162 2163 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2164 return; 2165 pc = pcpu_find(owner->td_oncpu); 2166 instance = find_instance(pc->pc_spinlocks, lock); 2167 if (instance != NULL) 2168 witness_list_lock(instance, prnt); 2169} 2170 2171void 2172witness_save(struct lock_object *lock, const char **filep, int *linep) 2173{ 2174 struct lock_list_entry *lock_list; 2175 struct lock_instance *instance; 2176 struct lock_class *class; 2177 2178 /* 2179 * This function is used independently in locking code to deal with 2180 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant 2181 * is gone. 2182 */ 2183 if (SCHEDULER_STOPPED()) 2184 return; 2185 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2186 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2187 return; 2188 class = LOCK_CLASS(lock); 2189 if (class->lc_flags & LC_SLEEPLOCK) 2190 lock_list = curthread->td_sleeplocks; 2191 else { 2192 if (witness_skipspin) 2193 return; 2194 lock_list = PCPU_GET(spinlocks); 2195 } 2196 instance = find_instance(lock_list, lock); 2197 if (instance == NULL) 2198 panic("%s: lock (%s) %s not locked", __func__, 2199 class->lc_name, lock->lo_name); 2200 *filep = instance->li_file; 2201 *linep = instance->li_line; 2202} 2203 2204void 2205witness_restore(struct lock_object *lock, const char *file, int line) 2206{ 2207 struct lock_list_entry *lock_list; 2208 struct lock_instance *instance; 2209 struct lock_class *class; 2210 2211 /* 2212 * This function is used independently in locking code to deal with 2213 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant 2214 * is gone. 2215 */ 2216 if (SCHEDULER_STOPPED()) 2217 return; 2218 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2219 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2220 return; 2221 class = LOCK_CLASS(lock); 2222 if (class->lc_flags & LC_SLEEPLOCK) 2223 lock_list = curthread->td_sleeplocks; 2224 else { 2225 if (witness_skipspin) 2226 return; 2227 lock_list = PCPU_GET(spinlocks); 2228 } 2229 instance = find_instance(lock_list, lock); 2230 if (instance == NULL) 2231 panic("%s: lock (%s) %s not locked", __func__, 2232 class->lc_name, lock->lo_name); 2233 lock->lo_witness->w_file = file; 2234 lock->lo_witness->w_line = line; 2235 instance->li_file = file; 2236 instance->li_line = line; 2237} 2238 2239void 2240witness_assert(const struct lock_object *lock, int flags, const char *file, 2241 int line) 2242{ 2243#ifdef INVARIANT_SUPPORT 2244 struct lock_instance *instance; 2245 struct lock_class *class; 2246 2247 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2248 return; 2249 class = LOCK_CLASS(lock); 2250 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2251 instance = find_instance(curthread->td_sleeplocks, lock); 2252 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2253 instance = find_instance(PCPU_GET(spinlocks), lock); 2254 else { 2255 panic("Lock (%s) %s is not sleep or spin!", 2256 class->lc_name, lock->lo_name); 2257 } 2258 switch (flags) { 2259 case LA_UNLOCKED: 2260 if (instance != NULL) 2261 panic("Lock (%s) %s locked @ %s:%d.", 2262 class->lc_name, lock->lo_name, 2263 fixup_filename(file), line); 2264 break; 2265 case LA_LOCKED: 2266 case LA_LOCKED | LA_RECURSED: 2267 case LA_LOCKED | LA_NOTRECURSED: 2268 case LA_SLOCKED: 2269 case LA_SLOCKED | LA_RECURSED: 2270 case LA_SLOCKED | LA_NOTRECURSED: 2271 case LA_XLOCKED: 2272 case LA_XLOCKED | LA_RECURSED: 2273 case LA_XLOCKED | LA_NOTRECURSED: 2274 if (instance == NULL) { 2275 panic("Lock (%s) %s not locked @ %s:%d.", 2276 class->lc_name, lock->lo_name, 2277 fixup_filename(file), line); 2278 break; 2279 } 2280 if ((flags & LA_XLOCKED) != 0 && 2281 (instance->li_flags & LI_EXCLUSIVE) == 0) 2282 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2283 class->lc_name, lock->lo_name, 2284 fixup_filename(file), line); 2285 if ((flags & LA_SLOCKED) != 0 && 2286 (instance->li_flags & LI_EXCLUSIVE) != 0) 2287 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2288 class->lc_name, lock->lo_name, 2289 fixup_filename(file), line); 2290 if ((flags & LA_RECURSED) != 0 && 2291 (instance->li_flags & LI_RECURSEMASK) == 0) 2292 panic("Lock (%s) %s not recursed @ %s:%d.", 2293 class->lc_name, lock->lo_name, 2294 fixup_filename(file), line); 2295 if ((flags & LA_NOTRECURSED) != 0 && 2296 (instance->li_flags & LI_RECURSEMASK) != 0) 2297 panic("Lock (%s) %s recursed @ %s:%d.", 2298 class->lc_name, lock->lo_name, 2299 fixup_filename(file), line); 2300 break; 2301 default: 2302 panic("Invalid lock assertion at %s:%d.", 2303 fixup_filename(file), line); 2304 2305 } 2306#endif /* INVARIANT_SUPPORT */ 2307} 2308 2309static void 2310witness_setflag(struct lock_object *lock, int flag, int set) 2311{ 2312 struct lock_list_entry *lock_list; 2313 struct lock_instance *instance; 2314 struct lock_class *class; 2315 2316 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2317 return; 2318 class = LOCK_CLASS(lock); 2319 if (class->lc_flags & LC_SLEEPLOCK) 2320 lock_list = curthread->td_sleeplocks; 2321 else { 2322 if (witness_skipspin) 2323 return; 2324 lock_list = PCPU_GET(spinlocks); 2325 } 2326 instance = find_instance(lock_list, lock); 2327 if (instance == NULL) 2328 panic("%s: lock (%s) %s not locked", __func__, 2329 class->lc_name, lock->lo_name); 2330 2331 if (set) 2332 instance->li_flags |= flag; 2333 else 2334 instance->li_flags &= ~flag; 2335} 2336 2337void 2338witness_norelease(struct lock_object *lock) 2339{ 2340 2341 witness_setflag(lock, LI_NORELEASE, 1); 2342} 2343 2344void 2345witness_releaseok(struct lock_object *lock) 2346{ 2347 2348 witness_setflag(lock, LI_NORELEASE, 0); 2349} 2350 2351#ifdef DDB 2352static void 2353witness_ddb_list(struct thread *td) 2354{ 2355 2356 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2357 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2358 2359 if (witness_watch < 1) 2360 return; 2361 2362 witness_list_locks(&td->td_sleeplocks, db_printf); 2363 2364 /* 2365 * We only handle spinlocks if td == curthread. This is somewhat broken 2366 * if td is currently executing on some other CPU and holds spin locks 2367 * as we won't display those locks. If we had a MI way of getting 2368 * the per-cpu data for a given cpu then we could use 2369 * td->td_oncpu to get the list of spinlocks for this thread 2370 * and "fix" this. 2371 * 2372 * That still wouldn't really fix this unless we locked the scheduler 2373 * lock or stopped the other CPU to make sure it wasn't changing the 2374 * list out from under us. It is probably best to just not try to 2375 * handle threads on other CPU's for now. 2376 */ 2377 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2378 witness_list_locks(PCPU_PTR(spinlocks), db_printf); 2379} 2380 2381DB_SHOW_COMMAND(locks, db_witness_list) 2382{ 2383 struct thread *td; 2384 2385 if (have_addr) 2386 td = db_lookup_thread(addr, TRUE); 2387 else 2388 td = kdb_thread; 2389 witness_ddb_list(td); 2390} 2391 2392DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2393{ 2394 struct thread *td; 2395 struct proc *p; 2396 2397 /* 2398 * It would be nice to list only threads and processes that actually 2399 * held sleep locks, but that information is currently not exported 2400 * by WITNESS. 2401 */ 2402 FOREACH_PROC_IN_SYSTEM(p) { 2403 if (!witness_proc_has_locks(p)) 2404 continue; 2405 FOREACH_THREAD_IN_PROC(p, td) { 2406 if (!witness_thread_has_locks(td)) 2407 continue; 2408 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2409 p->p_comm, td, td->td_tid); 2410 witness_ddb_list(td); 2411 if (db_pager_quit) 2412 return; 2413 } 2414 } 2415} 2416DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2417 2418DB_SHOW_COMMAND(witness, db_witness_display) 2419{ 2420 2421 witness_ddb_display(db_printf); 2422} 2423#endif 2424 2425static int 2426sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2427{ 2428 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2429 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2430 struct sbuf *sb; 2431 u_int w_rmatrix1, w_rmatrix2; 2432 int error, generation, i, j; 2433 2434 tmp_data1 = NULL; 2435 tmp_data2 = NULL; 2436 tmp_w1 = NULL; 2437 tmp_w2 = NULL; 2438 if (witness_watch < 1) { 2439 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2440 return (error); 2441 } 2442 if (witness_cold) { 2443 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2444 return (error); 2445 } 2446 error = 0; 2447 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2448 if (sb == NULL) 2449 return (ENOMEM); 2450 2451 /* Allocate and init temporary storage space. */ 2452 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2453 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2454 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2455 M_WAITOK | M_ZERO); 2456 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2457 M_WAITOK | M_ZERO); 2458 stack_zero(&tmp_data1->wlod_stack); 2459 stack_zero(&tmp_data2->wlod_stack); 2460 2461restart: 2462 mtx_lock_spin(&w_mtx); 2463 generation = w_generation; 2464 mtx_unlock_spin(&w_mtx); 2465 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2466 w_lohash.wloh_count); 2467 for (i = 1; i < w_max_used_index; i++) { 2468 mtx_lock_spin(&w_mtx); 2469 if (generation != w_generation) { 2470 mtx_unlock_spin(&w_mtx); 2471 2472 /* The graph has changed, try again. */ 2473 req->oldidx = 0; 2474 sbuf_clear(sb); 2475 goto restart; 2476 } 2477 2478 w1 = &w_data[i]; 2479 if (w1->w_reversed == 0) { 2480 mtx_unlock_spin(&w_mtx); 2481 continue; 2482 } 2483 2484 /* Copy w1 locally so we can release the spin lock. */ 2485 *tmp_w1 = *w1; 2486 mtx_unlock_spin(&w_mtx); 2487 2488 if (tmp_w1->w_reversed == 0) 2489 continue; 2490 for (j = 1; j < w_max_used_index; j++) { 2491 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2492 continue; 2493 2494 mtx_lock_spin(&w_mtx); 2495 if (generation != w_generation) { 2496 mtx_unlock_spin(&w_mtx); 2497 2498 /* The graph has changed, try again. */ 2499 req->oldidx = 0; 2500 sbuf_clear(sb); 2501 goto restart; 2502 } 2503 2504 w2 = &w_data[j]; 2505 data1 = witness_lock_order_get(w1, w2); 2506 data2 = witness_lock_order_get(w2, w1); 2507 2508 /* 2509 * Copy information locally so we can release the 2510 * spin lock. 2511 */ 2512 *tmp_w2 = *w2; 2513 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2514 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2515 2516 if (data1) { 2517 stack_zero(&tmp_data1->wlod_stack); 2518 stack_copy(&data1->wlod_stack, 2519 &tmp_data1->wlod_stack); 2520 } 2521 if (data2 && data2 != data1) { 2522 stack_zero(&tmp_data2->wlod_stack); 2523 stack_copy(&data2->wlod_stack, 2524 &tmp_data2->wlod_stack); 2525 } 2526 mtx_unlock_spin(&w_mtx); 2527 2528 sbuf_printf(sb, 2529 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2530 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2531 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2532#if 0 2533 sbuf_printf(sb, 2534 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2535 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2536 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2537#endif 2538 if (data1) { 2539 sbuf_printf(sb, 2540 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2541 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2542 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2543 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2544 sbuf_printf(sb, "\n"); 2545 } 2546 if (data2 && data2 != data1) { 2547 sbuf_printf(sb, 2548 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2549 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2550 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2551 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2552 sbuf_printf(sb, "\n"); 2553 } 2554 } 2555 } 2556 mtx_lock_spin(&w_mtx); 2557 if (generation != w_generation) { 2558 mtx_unlock_spin(&w_mtx); 2559 2560 /* 2561 * The graph changed while we were printing stack data, 2562 * try again. 2563 */ 2564 req->oldidx = 0; 2565 sbuf_clear(sb); 2566 goto restart; 2567 } 2568 mtx_unlock_spin(&w_mtx); 2569 2570 /* Free temporary storage space. */ 2571 free(tmp_data1, M_TEMP); 2572 free(tmp_data2, M_TEMP); 2573 free(tmp_w1, M_TEMP); 2574 free(tmp_w2, M_TEMP); 2575 2576 sbuf_finish(sb); 2577 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2578 sbuf_delete(sb); 2579 2580 return (error); 2581} 2582 2583static int 2584sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2585{ 2586 struct witness *w; 2587 struct sbuf *sb; 2588 int error; 2589 2590 if (witness_watch < 1) { 2591 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2592 return (error); 2593 } 2594 if (witness_cold) { 2595 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2596 return (error); 2597 } 2598 error = 0; 2599 2600 error = sysctl_wire_old_buffer(req, 0); 2601 if (error != 0) 2602 return (error); 2603 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req); 2604 if (sb == NULL) 2605 return (ENOMEM); 2606 sbuf_printf(sb, "\n"); 2607 2608 mtx_lock_spin(&w_mtx); 2609 STAILQ_FOREACH(w, &w_all, w_list) 2610 w->w_displayed = 0; 2611 STAILQ_FOREACH(w, &w_all, w_list) 2612 witness_add_fullgraph(sb, w); 2613 mtx_unlock_spin(&w_mtx); 2614 2615 /* 2616 * Close the sbuf and return to userland. 2617 */ 2618 error = sbuf_finish(sb); 2619 sbuf_delete(sb); 2620 2621 return (error); 2622} 2623 2624static int 2625sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2626{ 2627 int error, value; 2628 2629 value = witness_watch; 2630 error = sysctl_handle_int(oidp, &value, 0, req); 2631 if (error != 0 || req->newptr == NULL) 2632 return (error); 2633 if (value > 1 || value < -1 || 2634 (witness_watch == -1 && value != witness_watch)) 2635 return (EINVAL); 2636 witness_watch = value; 2637 return (0); 2638} 2639 2640static void 2641witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2642{ 2643 int i; 2644 2645 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2646 return; 2647 w->w_displayed = 1; 2648 2649 WITNESS_INDEX_ASSERT(w->w_index); 2650 for (i = 1; i <= w_max_used_index; i++) { 2651 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2652 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2653 w_data[i].w_name); 2654 witness_add_fullgraph(sb, &w_data[i]); 2655 } 2656 } 2657} 2658 2659/* 2660 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2661 * interprets the key as a string and reads until the null 2662 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2663 * hash value computed from the key. 2664 */ 2665static uint32_t 2666witness_hash_djb2(const uint8_t *key, uint32_t size) 2667{ 2668 unsigned int hash = 5381; 2669 int i; 2670 2671 /* hash = hash * 33 + key[i] */ 2672 if (size) 2673 for (i = 0; i < size; i++) 2674 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2675 else 2676 for (i = 0; key[i] != 0; i++) 2677 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2678 2679 return (hash); 2680} 2681 2682 2683/* 2684 * Initializes the two witness hash tables. Called exactly once from 2685 * witness_initialize(). 2686 */ 2687static void 2688witness_init_hash_tables(void) 2689{ 2690 int i; 2691 2692 MPASS(witness_cold); 2693 2694 /* Initialize the hash tables. */ 2695 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2696 w_hash.wh_array[i] = NULL; 2697 2698 w_hash.wh_size = WITNESS_HASH_SIZE; 2699 w_hash.wh_count = 0; 2700 2701 /* Initialize the lock order data hash. */ 2702 w_lofree = NULL; 2703 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2704 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2705 w_lodata[i].wlod_next = w_lofree; 2706 w_lofree = &w_lodata[i]; 2707 } 2708 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2709 w_lohash.wloh_count = 0; 2710 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2711 w_lohash.wloh_array[i] = NULL; 2712} 2713 2714static struct witness * 2715witness_hash_get(const char *key) 2716{ 2717 struct witness *w; 2718 uint32_t hash; 2719 2720 MPASS(key != NULL); 2721 if (witness_cold == 0) 2722 mtx_assert(&w_mtx, MA_OWNED); 2723 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2724 w = w_hash.wh_array[hash]; 2725 while (w != NULL) { 2726 if (strcmp(w->w_name, key) == 0) 2727 goto out; 2728 w = w->w_hash_next; 2729 } 2730 2731out: 2732 return (w); 2733} 2734 2735static void 2736witness_hash_put(struct witness *w) 2737{ 2738 uint32_t hash; 2739 2740 MPASS(w != NULL); 2741 MPASS(w->w_name != NULL); 2742 if (witness_cold == 0) 2743 mtx_assert(&w_mtx, MA_OWNED); 2744 KASSERT(witness_hash_get(w->w_name) == NULL, 2745 ("%s: trying to add a hash entry that already exists!", __func__)); 2746 KASSERT(w->w_hash_next == NULL, 2747 ("%s: w->w_hash_next != NULL", __func__)); 2748 2749 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2750 w->w_hash_next = w_hash.wh_array[hash]; 2751 w_hash.wh_array[hash] = w; 2752 w_hash.wh_count++; 2753} 2754 2755 2756static struct witness_lock_order_data * 2757witness_lock_order_get(struct witness *parent, struct witness *child) 2758{ 2759 struct witness_lock_order_data *data = NULL; 2760 struct witness_lock_order_key key; 2761 unsigned int hash; 2762 2763 MPASS(parent != NULL && child != NULL); 2764 key.from = parent->w_index; 2765 key.to = child->w_index; 2766 WITNESS_INDEX_ASSERT(key.from); 2767 WITNESS_INDEX_ASSERT(key.to); 2768 if ((w_rmatrix[parent->w_index][child->w_index] 2769 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2770 goto out; 2771 2772 hash = witness_hash_djb2((const char*)&key, 2773 sizeof(key)) % w_lohash.wloh_size; 2774 data = w_lohash.wloh_array[hash]; 2775 while (data != NULL) { 2776 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2777 break; 2778 data = data->wlod_next; 2779 } 2780 2781out: 2782 return (data); 2783} 2784 2785/* 2786 * Verify that parent and child have a known relationship, are not the same, 2787 * and child is actually a child of parent. This is done without w_mtx 2788 * to avoid contention in the common case. 2789 */ 2790static int 2791witness_lock_order_check(struct witness *parent, struct witness *child) 2792{ 2793 2794 if (parent != child && 2795 w_rmatrix[parent->w_index][child->w_index] 2796 & WITNESS_LOCK_ORDER_KNOWN && 2797 isitmychild(parent, child)) 2798 return (1); 2799 2800 return (0); 2801} 2802 2803static int 2804witness_lock_order_add(struct witness *parent, struct witness *child) 2805{ 2806 struct witness_lock_order_data *data = NULL; 2807 struct witness_lock_order_key key; 2808 unsigned int hash; 2809 2810 MPASS(parent != NULL && child != NULL); 2811 key.from = parent->w_index; 2812 key.to = child->w_index; 2813 WITNESS_INDEX_ASSERT(key.from); 2814 WITNESS_INDEX_ASSERT(key.to); 2815 if (w_rmatrix[parent->w_index][child->w_index] 2816 & WITNESS_LOCK_ORDER_KNOWN) 2817 return (1); 2818 2819 hash = witness_hash_djb2((const char*)&key, 2820 sizeof(key)) % w_lohash.wloh_size; 2821 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2822 data = w_lofree; 2823 if (data == NULL) 2824 return (0); 2825 w_lofree = data->wlod_next; 2826 data->wlod_next = w_lohash.wloh_array[hash]; 2827 data->wlod_key = key; 2828 w_lohash.wloh_array[hash] = data; 2829 w_lohash.wloh_count++; 2830 stack_zero(&data->wlod_stack); 2831 stack_save(&data->wlod_stack); 2832 return (1); 2833} 2834 2835/* Call this whenver the structure of the witness graph changes. */ 2836static void 2837witness_increment_graph_generation(void) 2838{ 2839 2840 if (witness_cold == 0) 2841 mtx_assert(&w_mtx, MA_OWNED); 2842 w_generation++; 2843} 2844 2845#ifdef KDB 2846static void 2847_witness_debugger(int cond, const char *msg) 2848{ 2849 2850 if (witness_trace && cond) 2851 kdb_backtrace(); 2852 if (witness_kdb && cond) 2853 kdb_enter(KDB_WHY_WITNESS, msg); 2854} 2855#endif 2856