subr_witness.c revision 226294
1/*- 2 * Copyright (c) 2008 Isilon Systems, Inc. 3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com> 4 * Copyright (c) 1998 Berkeley Software Design, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Berkeley Software Design Inc's name may not be used to endorse or 16 * promote products derived from this software without specific prior 17 * written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 33 */ 34 35/* 36 * Implementation of the `witness' lock verifier. Originally implemented for 37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 38 * classes in FreeBSD. 39 */ 40 41/* 42 * Main Entry: witness 43 * Pronunciation: 'wit-n&s 44 * Function: noun 45 * Etymology: Middle English witnesse, from Old English witnes knowledge, 46 * testimony, witness, from 2wit 47 * Date: before 12th century 48 * 1 : attestation of a fact or event : TESTIMONY 49 * 2 : one that gives evidence; specifically : one who testifies in 50 * a cause or before a judicial tribunal 51 * 3 : one asked to be present at a transaction so as to be able to 52 * testify to its having taken place 53 * 4 : one who has personal knowledge of something 54 * 5 a : something serving as evidence or proof : SIGN 55 * b : public affirmation by word or example of usually 56 * religious faith or conviction <the heroic witness to divine 57 * life -- Pilot> 58 * 6 capitalized : a member of the Jehovah's Witnesses 59 */ 60 61/* 62 * Special rules concerning Giant and lock orders: 63 * 64 * 1) Giant must be acquired before any other mutexes. Stated another way, 65 * no other mutex may be held when Giant is acquired. 66 * 67 * 2) Giant must be released when blocking on a sleepable lock. 68 * 69 * This rule is less obvious, but is a result of Giant providing the same 70 * semantics as spl(). Basically, when a thread sleeps, it must release 71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 72 * 2). 73 * 74 * 3) Giant may be acquired before or after sleepable locks. 75 * 76 * This rule is also not quite as obvious. Giant may be acquired after 77 * a sleepable lock because it is a non-sleepable lock and non-sleepable 78 * locks may always be acquired while holding a sleepable lock. The second 79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose 80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1 81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and 82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to 83 * execute. Thus, acquiring Giant both before and after a sleepable lock 84 * will not result in a lock order reversal. 85 */ 86 87#include <sys/cdefs.h> 88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 226294 2011-10-12 09:21:02Z adrian $"); 89 90#include "opt_ddb.h" 91#include "opt_hwpmc_hooks.h" 92#include "opt_stack.h" 93#include "opt_witness.h" 94 95#include <sys/param.h> 96#include <sys/bus.h> 97#include <sys/kdb.h> 98#include <sys/kernel.h> 99#include <sys/ktr.h> 100#include <sys/lock.h> 101#include <sys/malloc.h> 102#include <sys/mutex.h> 103#include <sys/priv.h> 104#include <sys/proc.h> 105#include <sys/sbuf.h> 106#include <sys/sched.h> 107#include <sys/stack.h> 108#include <sys/sysctl.h> 109#include <sys/systm.h> 110 111#ifdef DDB 112#include <ddb/ddb.h> 113#endif 114 115#include <machine/stdarg.h> 116 117#if !defined(DDB) && !defined(STACK) 118#error "DDB or STACK options are required for WITNESS" 119#endif 120 121/* Note that these traces do not work with KTR_ALQ. */ 122#if 0 123#define KTR_WITNESS KTR_SUBSYS 124#else 125#define KTR_WITNESS 0 126#endif 127 128#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */ 129#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */ 130#define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */ 131 132/* Define this to check for blessed mutexes */ 133#undef BLESSING 134 135#define WITNESS_COUNT 1024 136#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4) 137#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */ 138#define WITNESS_PENDLIST 768 139 140/* Allocate 256 KB of stack data space */ 141#define WITNESS_LO_DATA_COUNT 2048 142 143/* Prime, gives load factor of ~2 at full load */ 144#define WITNESS_LO_HASH_SIZE 1021 145 146/* 147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads 148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should 149 * probably be safe for the most part, but it's still a SWAG. 150 */ 151#define LOCK_NCHILDREN 5 152#define LOCK_CHILDCOUNT 2048 153 154#define MAX_W_NAME 64 155 156#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT) 157#define FULLGRAPH_SBUF_SIZE 512 158 159/* 160 * These flags go in the witness relationship matrix and describe the 161 * relationship between any two struct witness objects. 162 */ 163#define WITNESS_UNRELATED 0x00 /* No lock order relation. */ 164#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */ 165#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */ 166#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */ 167#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */ 168#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR) 169#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT) 170#define WITNESS_RELATED_MASK \ 171 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK) 172#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been 173 * observed. */ 174#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */ 175#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */ 176#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */ 177 178/* Descendant to ancestor flags */ 179#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2) 180 181/* Ancestor to descendant flags */ 182#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2) 183 184#define WITNESS_INDEX_ASSERT(i) \ 185 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT) 186 187MALLOC_DEFINE(M_WITNESS, "Witness", "Witness"); 188 189/* 190 * Lock instances. A lock instance is the data associated with a lock while 191 * it is held by witness. For example, a lock instance will hold the 192 * recursion count of a lock. Lock instances are held in lists. Spin locks 193 * are held in a per-cpu list while sleep locks are held in per-thread list. 194 */ 195struct lock_instance { 196 struct lock_object *li_lock; 197 const char *li_file; 198 int li_line; 199 u_int li_flags; 200}; 201 202/* 203 * A simple list type used to build the list of locks held by a thread 204 * or CPU. We can't simply embed the list in struct lock_object since a 205 * lock may be held by more than one thread if it is a shared lock. Locks 206 * are added to the head of the list, so we fill up each list entry from 207 * "the back" logically. To ease some of the arithmetic, we actually fill 208 * in each list entry the normal way (children[0] then children[1], etc.) but 209 * when we traverse the list we read children[count-1] as the first entry 210 * down to children[0] as the final entry. 211 */ 212struct lock_list_entry { 213 struct lock_list_entry *ll_next; 214 struct lock_instance ll_children[LOCK_NCHILDREN]; 215 u_int ll_count; 216}; 217 218/* 219 * The main witness structure. One of these per named lock type in the system 220 * (for example, "vnode interlock"). 221 */ 222struct witness { 223 char w_name[MAX_W_NAME]; 224 uint32_t w_index; /* Index in the relationship matrix */ 225 struct lock_class *w_class; 226 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */ 227 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */ 228 struct witness *w_hash_next; /* Linked list in hash buckets. */ 229 const char *w_file; /* File where last acquired */ 230 uint32_t w_line; /* Line where last acquired */ 231 uint32_t w_refcount; 232 uint16_t w_num_ancestors; /* direct/indirect 233 * ancestor count */ 234 uint16_t w_num_descendants; /* direct/indirect 235 * descendant count */ 236 int16_t w_ddb_level; 237 unsigned w_displayed:1; 238 unsigned w_reversed:1; 239}; 240 241STAILQ_HEAD(witness_list, witness); 242 243/* 244 * The witness hash table. Keys are witness names (const char *), elements are 245 * witness objects (struct witness *). 246 */ 247struct witness_hash { 248 struct witness *wh_array[WITNESS_HASH_SIZE]; 249 uint32_t wh_size; 250 uint32_t wh_count; 251}; 252 253/* 254 * Key type for the lock order data hash table. 255 */ 256struct witness_lock_order_key { 257 uint16_t from; 258 uint16_t to; 259}; 260 261struct witness_lock_order_data { 262 struct stack wlod_stack; 263 struct witness_lock_order_key wlod_key; 264 struct witness_lock_order_data *wlod_next; 265}; 266 267/* 268 * The witness lock order data hash table. Keys are witness index tuples 269 * (struct witness_lock_order_key), elements are lock order data objects 270 * (struct witness_lock_order_data). 271 */ 272struct witness_lock_order_hash { 273 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE]; 274 u_int wloh_size; 275 u_int wloh_count; 276}; 277 278#ifdef BLESSING 279struct witness_blessed { 280 const char *b_lock1; 281 const char *b_lock2; 282}; 283#endif 284 285struct witness_pendhelp { 286 const char *wh_type; 287 struct lock_object *wh_lock; 288}; 289 290struct witness_order_list_entry { 291 const char *w_name; 292 struct lock_class *w_class; 293}; 294 295/* 296 * Returns 0 if one of the locks is a spin lock and the other is not. 297 * Returns 1 otherwise. 298 */ 299static __inline int 300witness_lock_type_equal(struct witness *w1, struct witness *w2) 301{ 302 303 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) == 304 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK))); 305} 306 307static __inline int 308witness_lock_order_key_empty(const struct witness_lock_order_key *key) 309{ 310 311 return (key->from == 0 && key->to == 0); 312} 313 314static __inline int 315witness_lock_order_key_equal(const struct witness_lock_order_key *a, 316 const struct witness_lock_order_key *b) 317{ 318 319 return (a->from == b->from && a->to == b->to); 320} 321 322static int _isitmyx(struct witness *w1, struct witness *w2, int rmask, 323 const char *fname); 324#ifdef KDB 325static void _witness_debugger(int cond, const char *msg); 326#endif 327static void adopt(struct witness *parent, struct witness *child); 328#ifdef BLESSING 329static int blessed(struct witness *, struct witness *); 330#endif 331static void depart(struct witness *w); 332static struct witness *enroll(const char *description, 333 struct lock_class *lock_class); 334static struct lock_instance *find_instance(struct lock_list_entry *list, 335 struct lock_object *lock); 336static int isitmychild(struct witness *parent, struct witness *child); 337static int isitmydescendant(struct witness *parent, struct witness *child); 338static void itismychild(struct witness *parent, struct witness *child); 339static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS); 340static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS); 341static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS); 342static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent); 343#ifdef DDB 344static void witness_ddb_compute_levels(void); 345static void witness_ddb_display(int(*)(const char *fmt, ...)); 346static void witness_ddb_display_descendants(int(*)(const char *fmt, ...), 347 struct witness *, int indent); 348static void witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 349 struct witness_list *list); 350static void witness_ddb_level_descendants(struct witness *parent, int l); 351static void witness_ddb_list(struct thread *td); 352#endif 353static void witness_free(struct witness *m); 354static struct witness *witness_get(void); 355static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size); 356static struct witness *witness_hash_get(const char *key); 357static void witness_hash_put(struct witness *w); 358static void witness_init_hash_tables(void); 359static void witness_increment_graph_generation(void); 360static void witness_lock_list_free(struct lock_list_entry *lle); 361static struct lock_list_entry *witness_lock_list_get(void); 362static int witness_lock_order_add(struct witness *parent, 363 struct witness *child); 364static int witness_lock_order_check(struct witness *parent, 365 struct witness *child); 366static struct witness_lock_order_data *witness_lock_order_get( 367 struct witness *parent, 368 struct witness *child); 369static void witness_list_lock(struct lock_instance *instance, 370 int (*prnt)(const char *fmt, ...)); 371static void witness_setflag(struct lock_object *lock, int flag, int set); 372 373#ifdef KDB 374#define witness_debugger(c) _witness_debugger(c, __func__) 375#else 376#define witness_debugger(c) 377#endif 378 379SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking"); 380 381/* 382 * If set to 0, lock order checking is disabled. If set to -1, 383 * witness is completely disabled. Otherwise witness performs full 384 * lock order checking for all locks. At runtime, lock order checking 385 * may be toggled. However, witness cannot be reenabled once it is 386 * completely disabled. 387 */ 388static int witness_watch = 1; 389TUNABLE_INT("debug.witness.watch", &witness_watch); 390SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0, 391 sysctl_debug_witness_watch, "I", "witness is watching lock operations"); 392 393#ifdef KDB 394/* 395 * When KDB is enabled and witness_kdb is 1, it will cause the system 396 * to drop into kdebug() when: 397 * - a lock hierarchy violation occurs 398 * - locks are held when going to sleep. 399 */ 400#ifdef WITNESS_KDB 401int witness_kdb = 1; 402#else 403int witness_kdb = 0; 404#endif 405TUNABLE_INT("debug.witness.kdb", &witness_kdb); 406SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, ""); 407 408/* 409 * When KDB is enabled and witness_trace is 1, it will cause the system 410 * to print a stack trace: 411 * - a lock hierarchy violation occurs 412 * - locks are held when going to sleep. 413 */ 414int witness_trace = 1; 415TUNABLE_INT("debug.witness.trace", &witness_trace); 416SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, ""); 417#endif /* KDB */ 418 419#ifdef WITNESS_SKIPSPIN 420int witness_skipspin = 1; 421#else 422int witness_skipspin = 0; 423#endif 424TUNABLE_INT("debug.witness.skipspin", &witness_skipspin); 425SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 426 0, ""); 427 428/* 429 * Call this to print out the relations between locks. 430 */ 431SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD, 432 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs"); 433 434/* 435 * Call this to print out the witness faulty stacks. 436 */ 437SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD, 438 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks"); 439 440static struct mtx w_mtx; 441 442/* w_list */ 443static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free); 444static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all); 445 446/* w_typelist */ 447static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin); 448static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep); 449 450/* lock list */ 451static struct lock_list_entry *w_lock_list_free = NULL; 452static struct witness_pendhelp pending_locks[WITNESS_PENDLIST]; 453static u_int pending_cnt; 454 455static int w_free_cnt, w_spin_cnt, w_sleep_cnt; 456SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, ""); 457SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, ""); 458SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0, 459 ""); 460 461static struct witness *w_data; 462static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1]; 463static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT]; 464static struct witness_hash w_hash; /* The witness hash table. */ 465 466/* The lock order data hash */ 467static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT]; 468static struct witness_lock_order_data *w_lofree = NULL; 469static struct witness_lock_order_hash w_lohash; 470static int w_max_used_index = 0; 471static unsigned int w_generation = 0; 472static const char w_notrunning[] = "Witness not running\n"; 473static const char w_stillcold[] = "Witness is still cold\n"; 474 475 476static struct witness_order_list_entry order_lists[] = { 477 /* 478 * sx locks 479 */ 480 { "proctree", &lock_class_sx }, 481 { "allproc", &lock_class_sx }, 482 { "allprison", &lock_class_sx }, 483 { NULL, NULL }, 484 /* 485 * Various mutexes 486 */ 487 { "Giant", &lock_class_mtx_sleep }, 488 { "pipe mutex", &lock_class_mtx_sleep }, 489 { "sigio lock", &lock_class_mtx_sleep }, 490 { "process group", &lock_class_mtx_sleep }, 491 { "process lock", &lock_class_mtx_sleep }, 492 { "session", &lock_class_mtx_sleep }, 493 { "uidinfo hash", &lock_class_rw }, 494#ifdef HWPMC_HOOKS 495 { "pmc-sleep", &lock_class_mtx_sleep }, 496#endif 497 { "time lock", &lock_class_mtx_sleep }, 498 { NULL, NULL }, 499 /* 500 * Sockets 501 */ 502 { "accept", &lock_class_mtx_sleep }, 503 { "so_snd", &lock_class_mtx_sleep }, 504 { "so_rcv", &lock_class_mtx_sleep }, 505 { "sellck", &lock_class_mtx_sleep }, 506 { NULL, NULL }, 507 /* 508 * Routing 509 */ 510 { "so_rcv", &lock_class_mtx_sleep }, 511 { "radix node head", &lock_class_rw }, 512 { "rtentry", &lock_class_mtx_sleep }, 513 { "ifaddr", &lock_class_mtx_sleep }, 514 { NULL, NULL }, 515 /* 516 * IPv4 multicast: 517 * protocol locks before interface locks, after UDP locks. 518 */ 519 { "udpinp", &lock_class_rw }, 520 { "in_multi_mtx", &lock_class_mtx_sleep }, 521 { "igmp_mtx", &lock_class_mtx_sleep }, 522 { "if_addr_mtx", &lock_class_mtx_sleep }, 523 { NULL, NULL }, 524 /* 525 * IPv6 multicast: 526 * protocol locks before interface locks, after UDP locks. 527 */ 528 { "udpinp", &lock_class_rw }, 529 { "in6_multi_mtx", &lock_class_mtx_sleep }, 530 { "mld_mtx", &lock_class_mtx_sleep }, 531 { "if_addr_mtx", &lock_class_mtx_sleep }, 532 { NULL, NULL }, 533 /* 534 * UNIX Domain Sockets 535 */ 536 { "unp_global_rwlock", &lock_class_rw }, 537 { "unp_list_lock", &lock_class_mtx_sleep }, 538 { "unp", &lock_class_mtx_sleep }, 539 { "so_snd", &lock_class_mtx_sleep }, 540 { NULL, NULL }, 541 /* 542 * UDP/IP 543 */ 544 { "udp", &lock_class_rw }, 545 { "udpinp", &lock_class_rw }, 546 { "so_snd", &lock_class_mtx_sleep }, 547 { NULL, NULL }, 548 /* 549 * TCP/IP 550 */ 551 { "tcp", &lock_class_rw }, 552 { "tcpinp", &lock_class_rw }, 553 { "so_snd", &lock_class_mtx_sleep }, 554 { NULL, NULL }, 555 /* 556 * netatalk 557 */ 558 { "ddp_list_mtx", &lock_class_mtx_sleep }, 559 { "ddp_mtx", &lock_class_mtx_sleep }, 560 { NULL, NULL }, 561 /* 562 * BPF 563 */ 564 { "bpf global lock", &lock_class_mtx_sleep }, 565 { "bpf interface lock", &lock_class_mtx_sleep }, 566 { "bpf cdev lock", &lock_class_mtx_sleep }, 567 { NULL, NULL }, 568 /* 569 * NFS server 570 */ 571 { "nfsd_mtx", &lock_class_mtx_sleep }, 572 { "so_snd", &lock_class_mtx_sleep }, 573 { NULL, NULL }, 574 575 /* 576 * IEEE 802.11 577 */ 578 { "802.11 com lock", &lock_class_mtx_sleep}, 579 { NULL, NULL }, 580 /* 581 * Network drivers 582 */ 583 { "network driver", &lock_class_mtx_sleep}, 584 { NULL, NULL }, 585 586 /* 587 * Netgraph 588 */ 589 { "ng_node", &lock_class_mtx_sleep }, 590 { "ng_worklist", &lock_class_mtx_sleep }, 591 { NULL, NULL }, 592 /* 593 * CDEV 594 */ 595 { "system map", &lock_class_mtx_sleep }, 596 { "vm page queue mutex", &lock_class_mtx_sleep }, 597 { "vnode interlock", &lock_class_mtx_sleep }, 598 { "cdev", &lock_class_mtx_sleep }, 599 { NULL, NULL }, 600 /* 601 * VM 602 * 603 */ 604 { "vm object", &lock_class_mtx_sleep }, 605 { "page lock", &lock_class_mtx_sleep }, 606 { "vm page queue mutex", &lock_class_mtx_sleep }, 607 { "pmap", &lock_class_mtx_sleep }, 608 { NULL, NULL }, 609 /* 610 * kqueue/VFS interaction 611 */ 612 { "kqueue", &lock_class_mtx_sleep }, 613 { "struct mount mtx", &lock_class_mtx_sleep }, 614 { "vnode interlock", &lock_class_mtx_sleep }, 615 { NULL, NULL }, 616 /* 617 * ZFS locking 618 */ 619 { "dn->dn_mtx", &lock_class_sx }, 620 { "dr->dt.di.dr_mtx", &lock_class_sx }, 621 { "db->db_mtx", &lock_class_sx }, 622 { NULL, NULL }, 623 /* 624 * spin locks 625 */ 626#ifdef SMP 627 { "ap boot", &lock_class_mtx_spin }, 628#endif 629 { "rm.mutex_mtx", &lock_class_mtx_spin }, 630 { "sio", &lock_class_mtx_spin }, 631 { "scrlock", &lock_class_mtx_spin }, 632#ifdef __i386__ 633 { "cy", &lock_class_mtx_spin }, 634#endif 635#ifdef __sparc64__ 636 { "pcib_mtx", &lock_class_mtx_spin }, 637 { "rtc_mtx", &lock_class_mtx_spin }, 638#endif 639 { "scc_hwmtx", &lock_class_mtx_spin }, 640 { "uart_hwmtx", &lock_class_mtx_spin }, 641 { "fast_taskqueue", &lock_class_mtx_spin }, 642 { "intr table", &lock_class_mtx_spin }, 643#ifdef HWPMC_HOOKS 644 { "pmc-per-proc", &lock_class_mtx_spin }, 645#endif 646 { "process slock", &lock_class_mtx_spin }, 647 { "sleepq chain", &lock_class_mtx_spin }, 648 { "umtx lock", &lock_class_mtx_spin }, 649 { "rm_spinlock", &lock_class_mtx_spin }, 650 { "turnstile chain", &lock_class_mtx_spin }, 651 { "turnstile lock", &lock_class_mtx_spin }, 652 { "sched lock", &lock_class_mtx_spin }, 653 { "td_contested", &lock_class_mtx_spin }, 654 { "callout", &lock_class_mtx_spin }, 655 { "entropy harvest mutex", &lock_class_mtx_spin }, 656 { "syscons video lock", &lock_class_mtx_spin }, 657#ifdef SMP 658 { "smp rendezvous", &lock_class_mtx_spin }, 659#endif 660#ifdef __powerpc__ 661 { "tlb0", &lock_class_mtx_spin }, 662#endif 663 /* 664 * leaf locks 665 */ 666 { "intrcnt", &lock_class_mtx_spin }, 667 { "icu", &lock_class_mtx_spin }, 668#if defined(SMP) && defined(__sparc64__) 669 { "ipi", &lock_class_mtx_spin }, 670#endif 671#ifdef __i386__ 672 { "allpmaps", &lock_class_mtx_spin }, 673 { "descriptor tables", &lock_class_mtx_spin }, 674#endif 675 { "clk", &lock_class_mtx_spin }, 676 { "cpuset", &lock_class_mtx_spin }, 677 { "mprof lock", &lock_class_mtx_spin }, 678 { "zombie lock", &lock_class_mtx_spin }, 679 { "ALD Queue", &lock_class_mtx_spin }, 680#ifdef __ia64__ 681 { "MCA spin lock", &lock_class_mtx_spin }, 682#endif 683#if defined(__i386__) || defined(__amd64__) 684 { "pcicfg", &lock_class_mtx_spin }, 685 { "NDIS thread lock", &lock_class_mtx_spin }, 686#endif 687 { "tw_osl_io_lock", &lock_class_mtx_spin }, 688 { "tw_osl_q_lock", &lock_class_mtx_spin }, 689 { "tw_cl_io_lock", &lock_class_mtx_spin }, 690 { "tw_cl_intr_lock", &lock_class_mtx_spin }, 691 { "tw_cl_gen_lock", &lock_class_mtx_spin }, 692#ifdef HWPMC_HOOKS 693 { "pmc-leaf", &lock_class_mtx_spin }, 694#endif 695 { "blocked lock", &lock_class_mtx_spin }, 696 { NULL, NULL }, 697 { NULL, NULL } 698}; 699 700#ifdef BLESSING 701/* 702 * Pairs of locks which have been blessed 703 * Don't complain about order problems with blessed locks 704 */ 705static struct witness_blessed blessed_list[] = { 706}; 707static int blessed_count = 708 sizeof(blessed_list) / sizeof(struct witness_blessed); 709#endif 710 711/* 712 * This global is set to 0 once it becomes safe to use the witness code. 713 */ 714static int witness_cold = 1; 715 716/* 717 * This global is set to 1 once the static lock orders have been enrolled 718 * so that a warning can be issued for any spin locks enrolled later. 719 */ 720static int witness_spin_warn = 0; 721 722/* 723 * The WITNESS-enabled diagnostic code. Note that the witness code does 724 * assume that the early boot is single-threaded at least until after this 725 * routine is completed. 726 */ 727static void 728witness_initialize(void *dummy __unused) 729{ 730 struct lock_object *lock; 731 struct witness_order_list_entry *order; 732 struct witness *w, *w1; 733 int i; 734 735 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS, 736 M_NOWAIT | M_ZERO); 737 738 /* 739 * We have to release Giant before initializing its witness 740 * structure so that WITNESS doesn't get confused. 741 */ 742 mtx_unlock(&Giant); 743 mtx_assert(&Giant, MA_NOTOWNED); 744 745 CTR1(KTR_WITNESS, "%s: initializing witness", __func__); 746 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET | 747 MTX_NOWITNESS | MTX_NOPROFILE); 748 for (i = WITNESS_COUNT - 1; i >= 0; i--) { 749 w = &w_data[i]; 750 memset(w, 0, sizeof(*w)); 751 w_data[i].w_index = i; /* Witness index never changes. */ 752 witness_free(w); 753 } 754 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0, 755 ("%s: Invalid list of free witness objects", __func__)); 756 757 /* Witness with index 0 is not used to aid in debugging. */ 758 STAILQ_REMOVE_HEAD(&w_free, w_list); 759 w_free_cnt--; 760 761 memset(w_rmatrix, 0, 762 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1))); 763 764 for (i = 0; i < LOCK_CHILDCOUNT; i++) 765 witness_lock_list_free(&w_locklistdata[i]); 766 witness_init_hash_tables(); 767 768 /* First add in all the specified order lists. */ 769 for (order = order_lists; order->w_name != NULL; order++) { 770 w = enroll(order->w_name, order->w_class); 771 if (w == NULL) 772 continue; 773 w->w_file = "order list"; 774 for (order++; order->w_name != NULL; order++) { 775 w1 = enroll(order->w_name, order->w_class); 776 if (w1 == NULL) 777 continue; 778 w1->w_file = "order list"; 779 itismychild(w, w1); 780 w = w1; 781 } 782 } 783 witness_spin_warn = 1; 784 785 /* Iterate through all locks and add them to witness. */ 786 for (i = 0; pending_locks[i].wh_lock != NULL; i++) { 787 lock = pending_locks[i].wh_lock; 788 KASSERT(lock->lo_flags & LO_WITNESS, 789 ("%s: lock %s is on pending list but not LO_WITNESS", 790 __func__, lock->lo_name)); 791 lock->lo_witness = enroll(pending_locks[i].wh_type, 792 LOCK_CLASS(lock)); 793 } 794 795 /* Mark the witness code as being ready for use. */ 796 witness_cold = 0; 797 798 mtx_lock(&Giant); 799} 800SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, 801 NULL); 802 803void 804witness_init(struct lock_object *lock, const char *type) 805{ 806 struct lock_class *class; 807 808 /* Various sanity checks. */ 809 class = LOCK_CLASS(lock); 810 if ((lock->lo_flags & LO_RECURSABLE) != 0 && 811 (class->lc_flags & LC_RECURSABLE) == 0) 812 panic("%s: lock (%s) %s can not be recursable", __func__, 813 class->lc_name, lock->lo_name); 814 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 815 (class->lc_flags & LC_SLEEPABLE) == 0) 816 panic("%s: lock (%s) %s can not be sleepable", __func__, 817 class->lc_name, lock->lo_name); 818 if ((lock->lo_flags & LO_UPGRADABLE) != 0 && 819 (class->lc_flags & LC_UPGRADABLE) == 0) 820 panic("%s: lock (%s) %s can not be upgradable", __func__, 821 class->lc_name, lock->lo_name); 822 823 /* 824 * If we shouldn't watch this lock, then just clear lo_witness. 825 * Otherwise, if witness_cold is set, then it is too early to 826 * enroll this lock, so defer it to witness_initialize() by adding 827 * it to the pending_locks list. If it is not too early, then enroll 828 * the lock now. 829 */ 830 if (witness_watch < 1 || panicstr != NULL || 831 (lock->lo_flags & LO_WITNESS) == 0) 832 lock->lo_witness = NULL; 833 else if (witness_cold) { 834 pending_locks[pending_cnt].wh_lock = lock; 835 pending_locks[pending_cnt++].wh_type = type; 836 if (pending_cnt > WITNESS_PENDLIST) 837 panic("%s: pending locks list is too small, bump it\n", 838 __func__); 839 } else 840 lock->lo_witness = enroll(type, class); 841} 842 843void 844witness_destroy(struct lock_object *lock) 845{ 846 struct lock_class *class; 847 struct witness *w; 848 849 class = LOCK_CLASS(lock); 850 851 if (witness_cold) 852 panic("lock (%s) %s destroyed while witness_cold", 853 class->lc_name, lock->lo_name); 854 855 /* XXX: need to verify that no one holds the lock */ 856 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL) 857 return; 858 w = lock->lo_witness; 859 860 mtx_lock_spin(&w_mtx); 861 MPASS(w->w_refcount > 0); 862 w->w_refcount--; 863 864 if (w->w_refcount == 0) 865 depart(w); 866 mtx_unlock_spin(&w_mtx); 867} 868 869#ifdef DDB 870static void 871witness_ddb_compute_levels(void) 872{ 873 struct witness *w; 874 875 /* 876 * First clear all levels. 877 */ 878 STAILQ_FOREACH(w, &w_all, w_list) 879 w->w_ddb_level = -1; 880 881 /* 882 * Look for locks with no parents and level all their descendants. 883 */ 884 STAILQ_FOREACH(w, &w_all, w_list) { 885 886 /* If the witness has ancestors (is not a root), skip it. */ 887 if (w->w_num_ancestors > 0) 888 continue; 889 witness_ddb_level_descendants(w, 0); 890 } 891} 892 893static void 894witness_ddb_level_descendants(struct witness *w, int l) 895{ 896 int i; 897 898 if (w->w_ddb_level >= l) 899 return; 900 901 w->w_ddb_level = l; 902 l++; 903 904 for (i = 1; i <= w_max_used_index; i++) { 905 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 906 witness_ddb_level_descendants(&w_data[i], l); 907 } 908} 909 910static void 911witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...), 912 struct witness *w, int indent) 913{ 914 int i; 915 916 for (i = 0; i < indent; i++) 917 prnt(" "); 918 prnt("%s (type: %s, depth: %d, active refs: %d)", 919 w->w_name, w->w_class->lc_name, 920 w->w_ddb_level, w->w_refcount); 921 if (w->w_displayed) { 922 prnt(" -- (already displayed)\n"); 923 return; 924 } 925 w->w_displayed = 1; 926 if (w->w_file != NULL && w->w_line != 0) 927 prnt(" -- last acquired @ %s:%d\n", w->w_file, 928 w->w_line); 929 else 930 prnt(" -- never acquired\n"); 931 indent++; 932 WITNESS_INDEX_ASSERT(w->w_index); 933 for (i = 1; i <= w_max_used_index; i++) { 934 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) 935 witness_ddb_display_descendants(prnt, &w_data[i], 936 indent); 937 } 938} 939 940static void 941witness_ddb_display_list(int(*prnt)(const char *fmt, ...), 942 struct witness_list *list) 943{ 944 struct witness *w; 945 946 STAILQ_FOREACH(w, list, w_typelist) { 947 if (w->w_file == NULL || w->w_ddb_level > 0) 948 continue; 949 950 /* This lock has no anscestors - display its descendants. */ 951 witness_ddb_display_descendants(prnt, w, 0); 952 } 953} 954 955static void 956witness_ddb_display(int(*prnt)(const char *fmt, ...)) 957{ 958 struct witness *w; 959 960 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 961 witness_ddb_compute_levels(); 962 963 /* Clear all the displayed flags. */ 964 STAILQ_FOREACH(w, &w_all, w_list) 965 w->w_displayed = 0; 966 967 /* 968 * First, handle sleep locks which have been acquired at least 969 * once. 970 */ 971 prnt("Sleep locks:\n"); 972 witness_ddb_display_list(prnt, &w_sleep); 973 974 /* 975 * Now do spin locks which have been acquired at least once. 976 */ 977 prnt("\nSpin locks:\n"); 978 witness_ddb_display_list(prnt, &w_spin); 979 980 /* 981 * Finally, any locks which have not been acquired yet. 982 */ 983 prnt("\nLocks which were never acquired:\n"); 984 STAILQ_FOREACH(w, &w_all, w_list) { 985 if (w->w_file != NULL || w->w_refcount == 0) 986 continue; 987 prnt("%s (type: %s, depth: %d)\n", w->w_name, 988 w->w_class->lc_name, w->w_ddb_level); 989 } 990} 991#endif /* DDB */ 992 993/* Trim useless garbage from filenames. */ 994static const char * 995fixup_filename(const char *file) 996{ 997 998 if (file == NULL) 999 return (NULL); 1000 while (strncmp(file, "../", 3) == 0) 1001 file += 3; 1002 return (file); 1003} 1004 1005int 1006witness_defineorder(struct lock_object *lock1, struct lock_object *lock2) 1007{ 1008 1009 if (witness_watch == -1 || panicstr != NULL) 1010 return (0); 1011 1012 /* Require locks that witness knows about. */ 1013 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL || 1014 lock2->lo_witness == NULL) 1015 return (EINVAL); 1016 1017 mtx_assert(&w_mtx, MA_NOTOWNED); 1018 mtx_lock_spin(&w_mtx); 1019 1020 /* 1021 * If we already have either an explicit or implied lock order that 1022 * is the other way around, then return an error. 1023 */ 1024 if (witness_watch && 1025 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) { 1026 mtx_unlock_spin(&w_mtx); 1027 return (EDOOFUS); 1028 } 1029 1030 /* Try to add the new order. */ 1031 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1032 lock2->lo_witness->w_name, lock1->lo_witness->w_name); 1033 itismychild(lock1->lo_witness, lock2->lo_witness); 1034 mtx_unlock_spin(&w_mtx); 1035 return (0); 1036} 1037 1038void 1039witness_checkorder(struct lock_object *lock, int flags, const char *file, 1040 int line, struct lock_object *interlock) 1041{ 1042 struct lock_list_entry *lock_list, *lle; 1043 struct lock_instance *lock1, *lock2, *plock; 1044 struct lock_class *class; 1045 struct witness *w, *w1; 1046 struct thread *td; 1047 int i, j; 1048 1049 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL || 1050 panicstr != NULL) 1051 return; 1052 1053 w = lock->lo_witness; 1054 class = LOCK_CLASS(lock); 1055 td = curthread; 1056 1057 if (class->lc_flags & LC_SLEEPLOCK) { 1058 1059 /* 1060 * Since spin locks include a critical section, this check 1061 * implicitly enforces a lock order of all sleep locks before 1062 * all spin locks. 1063 */ 1064 if (td->td_critnest != 0 && !kdb_active) 1065 panic("blockable sleep lock (%s) %s @ %s:%d", 1066 class->lc_name, lock->lo_name, 1067 fixup_filename(file), line); 1068 1069 /* 1070 * If this is the first lock acquired then just return as 1071 * no order checking is needed. 1072 */ 1073 lock_list = td->td_sleeplocks; 1074 if (lock_list == NULL || lock_list->ll_count == 0) 1075 return; 1076 } else { 1077 1078 /* 1079 * If this is the first lock, just return as no order 1080 * checking is needed. Avoid problems with thread 1081 * migration pinning the thread while checking if 1082 * spinlocks are held. If at least one spinlock is held 1083 * the thread is in a safe path and it is allowed to 1084 * unpin it. 1085 */ 1086 sched_pin(); 1087 lock_list = PCPU_GET(spinlocks); 1088 if (lock_list == NULL || lock_list->ll_count == 0) { 1089 sched_unpin(); 1090 return; 1091 } 1092 sched_unpin(); 1093 } 1094 1095 /* 1096 * Check to see if we are recursing on a lock we already own. If 1097 * so, make sure that we don't mismatch exclusive and shared lock 1098 * acquires. 1099 */ 1100 lock1 = find_instance(lock_list, lock); 1101 if (lock1 != NULL) { 1102 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 && 1103 (flags & LOP_EXCLUSIVE) == 0) { 1104 printf("shared lock of (%s) %s @ %s:%d\n", 1105 class->lc_name, lock->lo_name, 1106 fixup_filename(file), line); 1107 printf("while exclusively locked from %s:%d\n", 1108 fixup_filename(lock1->li_file), lock1->li_line); 1109 panic("share->excl"); 1110 } 1111 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 && 1112 (flags & LOP_EXCLUSIVE) != 0) { 1113 printf("exclusive lock of (%s) %s @ %s:%d\n", 1114 class->lc_name, lock->lo_name, 1115 fixup_filename(file), line); 1116 printf("while share locked from %s:%d\n", 1117 fixup_filename(lock1->li_file), lock1->li_line); 1118 panic("excl->share"); 1119 } 1120 return; 1121 } 1122 1123 /* 1124 * Find the previously acquired lock, but ignore interlocks. 1125 */ 1126 plock = &lock_list->ll_children[lock_list->ll_count - 1]; 1127 if (interlock != NULL && plock->li_lock == interlock) { 1128 if (lock_list->ll_count > 1) 1129 plock = 1130 &lock_list->ll_children[lock_list->ll_count - 2]; 1131 else { 1132 lle = lock_list->ll_next; 1133 1134 /* 1135 * The interlock is the only lock we hold, so 1136 * simply return. 1137 */ 1138 if (lle == NULL) 1139 return; 1140 plock = &lle->ll_children[lle->ll_count - 1]; 1141 } 1142 } 1143 1144 /* 1145 * Try to perform most checks without a lock. If this succeeds we 1146 * can skip acquiring the lock and return success. 1147 */ 1148 w1 = plock->li_lock->lo_witness; 1149 if (witness_lock_order_check(w1, w)) 1150 return; 1151 1152 /* 1153 * Check for duplicate locks of the same type. Note that we only 1154 * have to check for this on the last lock we just acquired. Any 1155 * other cases will be caught as lock order violations. 1156 */ 1157 mtx_lock_spin(&w_mtx); 1158 witness_lock_order_add(w1, w); 1159 if (w1 == w) { 1160 i = w->w_index; 1161 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) && 1162 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) { 1163 w_rmatrix[i][i] |= WITNESS_REVERSAL; 1164 w->w_reversed = 1; 1165 mtx_unlock_spin(&w_mtx); 1166 printf( 1167 "acquiring duplicate lock of same type: \"%s\"\n", 1168 w->w_name); 1169 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name, 1170 fixup_filename(plock->li_file), plock->li_line); 1171 printf(" 2nd %s @ %s:%d\n", lock->lo_name, 1172 fixup_filename(file), line); 1173 witness_debugger(1); 1174 } else 1175 mtx_unlock_spin(&w_mtx); 1176 return; 1177 } 1178 mtx_assert(&w_mtx, MA_OWNED); 1179 1180 /* 1181 * If we know that the lock we are acquiring comes after 1182 * the lock we most recently acquired in the lock order tree, 1183 * then there is no need for any further checks. 1184 */ 1185 if (isitmychild(w1, w)) 1186 goto out; 1187 1188 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) { 1189 for (i = lle->ll_count - 1; i >= 0; i--, j++) { 1190 1191 MPASS(j < WITNESS_COUNT); 1192 lock1 = &lle->ll_children[i]; 1193 1194 /* 1195 * Ignore the interlock the first time we see it. 1196 */ 1197 if (interlock != NULL && interlock == lock1->li_lock) { 1198 interlock = NULL; 1199 continue; 1200 } 1201 1202 /* 1203 * If this lock doesn't undergo witness checking, 1204 * then skip it. 1205 */ 1206 w1 = lock1->li_lock->lo_witness; 1207 if (w1 == NULL) { 1208 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0, 1209 ("lock missing witness structure")); 1210 continue; 1211 } 1212 1213 /* 1214 * If we are locking Giant and this is a sleepable 1215 * lock, then skip it. 1216 */ 1217 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 && 1218 lock == &Giant.lock_object) 1219 continue; 1220 1221 /* 1222 * If we are locking a sleepable lock and this lock 1223 * is Giant, then skip it. 1224 */ 1225 if ((lock->lo_flags & LO_SLEEPABLE) != 0 && 1226 lock1->li_lock == &Giant.lock_object) 1227 continue; 1228 1229 /* 1230 * If we are locking a sleepable lock and this lock 1231 * isn't sleepable, we want to treat it as a lock 1232 * order violation to enfore a general lock order of 1233 * sleepable locks before non-sleepable locks. 1234 */ 1235 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1236 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1237 goto reversal; 1238 1239 /* 1240 * If we are locking Giant and this is a non-sleepable 1241 * lock, then treat it as a reversal. 1242 */ 1243 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 && 1244 lock == &Giant.lock_object) 1245 goto reversal; 1246 1247 /* 1248 * Check the lock order hierarchy for a reveresal. 1249 */ 1250 if (!isitmydescendant(w, w1)) 1251 continue; 1252 reversal: 1253 1254 /* 1255 * We have a lock order violation, check to see if it 1256 * is allowed or has already been yelled about. 1257 */ 1258#ifdef BLESSING 1259 1260 /* 1261 * If the lock order is blessed, just bail. We don't 1262 * look for other lock order violations though, which 1263 * may be a bug. 1264 */ 1265 if (blessed(w, w1)) 1266 goto out; 1267#endif 1268 1269 /* Bail if this violation is known */ 1270 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL) 1271 goto out; 1272 1273 /* Record this as a violation */ 1274 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL; 1275 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL; 1276 w->w_reversed = w1->w_reversed = 1; 1277 witness_increment_graph_generation(); 1278 mtx_unlock_spin(&w_mtx); 1279 1280 /* 1281 * Ok, yell about it. 1282 */ 1283 if (((lock->lo_flags & LO_SLEEPABLE) != 0 && 1284 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0)) 1285 printf( 1286 "lock order reversal: (sleepable after non-sleepable)\n"); 1287 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 1288 && lock == &Giant.lock_object) 1289 printf( 1290 "lock order reversal: (Giant after non-sleepable)\n"); 1291 else 1292 printf("lock order reversal:\n"); 1293 1294 /* 1295 * Try to locate an earlier lock with 1296 * witness w in our list. 1297 */ 1298 do { 1299 lock2 = &lle->ll_children[i]; 1300 MPASS(lock2->li_lock != NULL); 1301 if (lock2->li_lock->lo_witness == w) 1302 break; 1303 if (i == 0 && lle->ll_next != NULL) { 1304 lle = lle->ll_next; 1305 i = lle->ll_count - 1; 1306 MPASS(i >= 0 && i < LOCK_NCHILDREN); 1307 } else 1308 i--; 1309 } while (i >= 0); 1310 if (i < 0) { 1311 printf(" 1st %p %s (%s) @ %s:%d\n", 1312 lock1->li_lock, lock1->li_lock->lo_name, 1313 w1->w_name, fixup_filename(lock1->li_file), 1314 lock1->li_line); 1315 printf(" 2nd %p %s (%s) @ %s:%d\n", lock, 1316 lock->lo_name, w->w_name, 1317 fixup_filename(file), line); 1318 } else { 1319 printf(" 1st %p %s (%s) @ %s:%d\n", 1320 lock2->li_lock, lock2->li_lock->lo_name, 1321 lock2->li_lock->lo_witness->w_name, 1322 fixup_filename(lock2->li_file), 1323 lock2->li_line); 1324 printf(" 2nd %p %s (%s) @ %s:%d\n", 1325 lock1->li_lock, lock1->li_lock->lo_name, 1326 w1->w_name, fixup_filename(lock1->li_file), 1327 lock1->li_line); 1328 printf(" 3rd %p %s (%s) @ %s:%d\n", lock, 1329 lock->lo_name, w->w_name, 1330 fixup_filename(file), line); 1331 } 1332 witness_debugger(1); 1333 return; 1334 } 1335 } 1336 1337 /* 1338 * If requested, build a new lock order. However, don't build a new 1339 * relationship between a sleepable lock and Giant if it is in the 1340 * wrong direction. The correct lock order is that sleepable locks 1341 * always come before Giant. 1342 */ 1343 if (flags & LOP_NEWORDER && 1344 !(plock->li_lock == &Giant.lock_object && 1345 (lock->lo_flags & LO_SLEEPABLE) != 0)) { 1346 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__, 1347 w->w_name, plock->li_lock->lo_witness->w_name); 1348 itismychild(plock->li_lock->lo_witness, w); 1349 } 1350out: 1351 mtx_unlock_spin(&w_mtx); 1352} 1353 1354void 1355witness_lock(struct lock_object *lock, int flags, const char *file, int line) 1356{ 1357 struct lock_list_entry **lock_list, *lle; 1358 struct lock_instance *instance; 1359 struct witness *w; 1360 struct thread *td; 1361 1362 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL || 1363 panicstr != NULL) 1364 return; 1365 w = lock->lo_witness; 1366 td = curthread; 1367 1368 /* Determine lock list for this lock. */ 1369 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK) 1370 lock_list = &td->td_sleeplocks; 1371 else 1372 lock_list = PCPU_PTR(spinlocks); 1373 1374 /* Check to see if we are recursing on a lock we already own. */ 1375 instance = find_instance(*lock_list, lock); 1376 if (instance != NULL) { 1377 instance->li_flags++; 1378 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__, 1379 td->td_proc->p_pid, lock->lo_name, 1380 instance->li_flags & LI_RECURSEMASK); 1381 instance->li_file = file; 1382 instance->li_line = line; 1383 return; 1384 } 1385 1386 /* Update per-witness last file and line acquire. */ 1387 w->w_file = file; 1388 w->w_line = line; 1389 1390 /* Find the next open lock instance in the list and fill it. */ 1391 lle = *lock_list; 1392 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) { 1393 lle = witness_lock_list_get(); 1394 if (lle == NULL) 1395 return; 1396 lle->ll_next = *lock_list; 1397 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__, 1398 td->td_proc->p_pid, lle); 1399 *lock_list = lle; 1400 } 1401 instance = &lle->ll_children[lle->ll_count++]; 1402 instance->li_lock = lock; 1403 instance->li_line = line; 1404 instance->li_file = file; 1405 if ((flags & LOP_EXCLUSIVE) != 0) 1406 instance->li_flags = LI_EXCLUSIVE; 1407 else 1408 instance->li_flags = 0; 1409 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__, 1410 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1); 1411} 1412 1413void 1414witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) 1415{ 1416 struct lock_instance *instance; 1417 struct lock_class *class; 1418 1419 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1420 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1421 return; 1422 class = LOCK_CLASS(lock); 1423 if (witness_watch) { 1424 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1425 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d", 1426 class->lc_name, lock->lo_name, 1427 fixup_filename(file), line); 1428 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1429 panic("upgrade of non-sleep lock (%s) %s @ %s:%d", 1430 class->lc_name, lock->lo_name, 1431 fixup_filename(file), line); 1432 } 1433 instance = find_instance(curthread->td_sleeplocks, lock); 1434 if (instance == NULL) 1435 panic("upgrade of unlocked lock (%s) %s @ %s:%d", 1436 class->lc_name, lock->lo_name, 1437 fixup_filename(file), line); 1438 if (witness_watch) { 1439 if ((instance->li_flags & LI_EXCLUSIVE) != 0) 1440 panic("upgrade of exclusive lock (%s) %s @ %s:%d", 1441 class->lc_name, lock->lo_name, 1442 fixup_filename(file), line); 1443 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1444 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d", 1445 class->lc_name, lock->lo_name, 1446 instance->li_flags & LI_RECURSEMASK, 1447 fixup_filename(file), line); 1448 } 1449 instance->li_flags |= LI_EXCLUSIVE; 1450} 1451 1452void 1453witness_downgrade(struct lock_object *lock, int flags, const char *file, 1454 int line) 1455{ 1456 struct lock_instance *instance; 1457 struct lock_class *class; 1458 1459 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 1460 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 1461 return; 1462 class = LOCK_CLASS(lock); 1463 if (witness_watch) { 1464 if ((lock->lo_flags & LO_UPGRADABLE) == 0) 1465 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d", 1466 class->lc_name, lock->lo_name, 1467 fixup_filename(file), line); 1468 if ((class->lc_flags & LC_SLEEPLOCK) == 0) 1469 panic("downgrade of non-sleep lock (%s) %s @ %s:%d", 1470 class->lc_name, lock->lo_name, 1471 fixup_filename(file), line); 1472 } 1473 instance = find_instance(curthread->td_sleeplocks, lock); 1474 if (instance == NULL) 1475 panic("downgrade of unlocked lock (%s) %s @ %s:%d", 1476 class->lc_name, lock->lo_name, 1477 fixup_filename(file), line); 1478 if (witness_watch) { 1479 if ((instance->li_flags & LI_EXCLUSIVE) == 0) 1480 panic("downgrade of shared lock (%s) %s @ %s:%d", 1481 class->lc_name, lock->lo_name, 1482 fixup_filename(file), line); 1483 if ((instance->li_flags & LI_RECURSEMASK) != 0) 1484 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d", 1485 class->lc_name, lock->lo_name, 1486 instance->li_flags & LI_RECURSEMASK, file, line); 1487 } 1488 instance->li_flags &= ~LI_EXCLUSIVE; 1489} 1490 1491void 1492witness_unlock(struct lock_object *lock, int flags, const char *file, int line) 1493{ 1494 struct lock_list_entry **lock_list, *lle; 1495 struct lock_instance *instance; 1496 struct lock_class *class; 1497 struct thread *td; 1498 register_t s; 1499 int i, j; 1500 1501 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL) 1502 return; 1503 td = curthread; 1504 class = LOCK_CLASS(lock); 1505 1506 /* Find lock instance associated with this lock. */ 1507 if (class->lc_flags & LC_SLEEPLOCK) 1508 lock_list = &td->td_sleeplocks; 1509 else 1510 lock_list = PCPU_PTR(spinlocks); 1511 lle = *lock_list; 1512 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next) 1513 for (i = 0; i < (*lock_list)->ll_count; i++) { 1514 instance = &(*lock_list)->ll_children[i]; 1515 if (instance->li_lock == lock) 1516 goto found; 1517 } 1518 1519 /* 1520 * When disabling WITNESS through witness_watch we could end up in 1521 * having registered locks in the td_sleeplocks queue. 1522 * We have to make sure we flush these queues, so just search for 1523 * eventual register locks and remove them. 1524 */ 1525 if (witness_watch > 0) 1526 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, 1527 lock->lo_name, fixup_filename(file), line); 1528 else 1529 return; 1530found: 1531 1532 /* First, check for shared/exclusive mismatches. */ 1533 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 && 1534 (flags & LOP_EXCLUSIVE) == 0) { 1535 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name, 1536 lock->lo_name, 1537 fixup_filename(file), line); 1538 printf("while exclusively locked from %s:%d\n", 1539 fixup_filename(instance->li_file), instance->li_line); 1540 panic("excl->ushare"); 1541 } 1542 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 && 1543 (flags & LOP_EXCLUSIVE) != 0) { 1544 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name, 1545 lock->lo_name, fixup_filename(file), line); 1546 printf("while share locked from %s:%d\n", 1547 fixup_filename(instance->li_file), 1548 instance->li_line); 1549 panic("share->uexcl"); 1550 } 1551 /* If we are recursed, unrecurse. */ 1552 if ((instance->li_flags & LI_RECURSEMASK) > 0) { 1553 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__, 1554 td->td_proc->p_pid, instance->li_lock->lo_name, 1555 instance->li_flags); 1556 instance->li_flags--; 1557 return; 1558 } 1559 /* The lock is now being dropped, check for NORELEASE flag */ 1560 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) { 1561 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name, 1562 lock->lo_name, fixup_filename(file), line); 1563 panic("lock marked norelease"); 1564 } 1565 1566 /* Otherwise, remove this item from the list. */ 1567 s = intr_disable(); 1568 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__, 1569 td->td_proc->p_pid, instance->li_lock->lo_name, 1570 (*lock_list)->ll_count - 1); 1571 for (j = i; j < (*lock_list)->ll_count - 1; j++) 1572 (*lock_list)->ll_children[j] = 1573 (*lock_list)->ll_children[j + 1]; 1574 (*lock_list)->ll_count--; 1575 intr_restore(s); 1576 1577 /* 1578 * In order to reduce contention on w_mtx, we want to keep always an 1579 * head object into lists so that frequent allocation from the 1580 * free witness pool (and subsequent locking) is avoided. 1581 * In order to maintain the current code simple, when the head 1582 * object is totally unloaded it means also that we do not have 1583 * further objects in the list, so the list ownership needs to be 1584 * hand over to another object if the current head needs to be freed. 1585 */ 1586 if ((*lock_list)->ll_count == 0) { 1587 if (*lock_list == lle) { 1588 if (lle->ll_next == NULL) 1589 return; 1590 } else 1591 lle = *lock_list; 1592 *lock_list = lle->ll_next; 1593 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__, 1594 td->td_proc->p_pid, lle); 1595 witness_lock_list_free(lle); 1596 } 1597} 1598 1599void 1600witness_thread_exit(struct thread *td) 1601{ 1602 struct lock_list_entry *lle; 1603 int i, n; 1604 1605 lle = td->td_sleeplocks; 1606 if (lle == NULL || panicstr != NULL) 1607 return; 1608 if (lle->ll_count != 0) { 1609 for (n = 0; lle != NULL; lle = lle->ll_next) 1610 for (i = lle->ll_count - 1; i >= 0; i--) { 1611 if (n == 0) 1612 printf("Thread %p exiting with the following locks held:\n", 1613 td); 1614 n++; 1615 witness_list_lock(&lle->ll_children[i], printf); 1616 1617 } 1618 panic("Thread %p cannot exit while holding sleeplocks\n", td); 1619 } 1620 witness_lock_list_free(lle); 1621} 1622 1623/* 1624 * Warn if any locks other than 'lock' are held. Flags can be passed in to 1625 * exempt Giant and sleepable locks from the checks as well. If any 1626 * non-exempt locks are held, then a supplied message is printed to the 1627 * console along with a list of the offending locks. If indicated in the 1628 * flags then a failure results in a panic as well. 1629 */ 1630int 1631witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) 1632{ 1633 struct lock_list_entry *lock_list, *lle; 1634 struct lock_instance *lock1; 1635 struct thread *td; 1636 va_list ap; 1637 int i, n; 1638 1639 if (witness_cold || witness_watch < 1 || panicstr != NULL) 1640 return (0); 1641 n = 0; 1642 td = curthread; 1643 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next) 1644 for (i = lle->ll_count - 1; i >= 0; i--) { 1645 lock1 = &lle->ll_children[i]; 1646 if (lock1->li_lock == lock) 1647 continue; 1648 if (flags & WARN_GIANTOK && 1649 lock1->li_lock == &Giant.lock_object) 1650 continue; 1651 if (flags & WARN_SLEEPOK && 1652 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0) 1653 continue; 1654 if (n == 0) { 1655 va_start(ap, fmt); 1656 vprintf(fmt, ap); 1657 va_end(ap); 1658 printf(" with the following"); 1659 if (flags & WARN_SLEEPOK) 1660 printf(" non-sleepable"); 1661 printf(" locks held:\n"); 1662 } 1663 n++; 1664 witness_list_lock(lock1, printf); 1665 } 1666 1667 /* 1668 * Pin the thread in order to avoid problems with thread migration. 1669 * Once that all verifies are passed about spinlocks ownership, 1670 * the thread is in a safe path and it can be unpinned. 1671 */ 1672 sched_pin(); 1673 lock_list = PCPU_GET(spinlocks); 1674 if (lock_list != NULL && lock_list->ll_count != 0) { 1675 sched_unpin(); 1676 1677 /* 1678 * We should only have one spinlock and as long as 1679 * the flags cannot match for this locks class, 1680 * check if the first spinlock is the one curthread 1681 * should hold. 1682 */ 1683 lock1 = &lock_list->ll_children[lock_list->ll_count - 1]; 1684 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL && 1685 lock1->li_lock == lock && n == 0) 1686 return (0); 1687 1688 va_start(ap, fmt); 1689 vprintf(fmt, ap); 1690 va_end(ap); 1691 printf(" with the following"); 1692 if (flags & WARN_SLEEPOK) 1693 printf(" non-sleepable"); 1694 printf(" locks held:\n"); 1695 n += witness_list_locks(&lock_list, printf); 1696 } else 1697 sched_unpin(); 1698 if (flags & WARN_PANIC && n) 1699 panic("%s", __func__); 1700 else 1701 witness_debugger(n); 1702 return (n); 1703} 1704 1705const char * 1706witness_file(struct lock_object *lock) 1707{ 1708 struct witness *w; 1709 1710 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1711 return ("?"); 1712 w = lock->lo_witness; 1713 return (w->w_file); 1714} 1715 1716int 1717witness_line(struct lock_object *lock) 1718{ 1719 struct witness *w; 1720 1721 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL) 1722 return (0); 1723 w = lock->lo_witness; 1724 return (w->w_line); 1725} 1726 1727static struct witness * 1728enroll(const char *description, struct lock_class *lock_class) 1729{ 1730 struct witness *w; 1731 struct witness_list *typelist; 1732 1733 MPASS(description != NULL); 1734 1735 if (witness_watch == -1 || panicstr != NULL) 1736 return (NULL); 1737 if ((lock_class->lc_flags & LC_SPINLOCK)) { 1738 if (witness_skipspin) 1739 return (NULL); 1740 else 1741 typelist = &w_spin; 1742 } else if ((lock_class->lc_flags & LC_SLEEPLOCK)) 1743 typelist = &w_sleep; 1744 else 1745 panic("lock class %s is not sleep or spin", 1746 lock_class->lc_name); 1747 1748 mtx_lock_spin(&w_mtx); 1749 w = witness_hash_get(description); 1750 if (w) 1751 goto found; 1752 if ((w = witness_get()) == NULL) 1753 return (NULL); 1754 MPASS(strlen(description) < MAX_W_NAME); 1755 strcpy(w->w_name, description); 1756 w->w_class = lock_class; 1757 w->w_refcount = 1; 1758 STAILQ_INSERT_HEAD(&w_all, w, w_list); 1759 if (lock_class->lc_flags & LC_SPINLOCK) { 1760 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist); 1761 w_spin_cnt++; 1762 } else if (lock_class->lc_flags & LC_SLEEPLOCK) { 1763 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist); 1764 w_sleep_cnt++; 1765 } 1766 1767 /* Insert new witness into the hash */ 1768 witness_hash_put(w); 1769 witness_increment_graph_generation(); 1770 mtx_unlock_spin(&w_mtx); 1771 return (w); 1772found: 1773 w->w_refcount++; 1774 mtx_unlock_spin(&w_mtx); 1775 if (lock_class != w->w_class) 1776 panic( 1777 "lock (%s) %s does not match earlier (%s) lock", 1778 description, lock_class->lc_name, 1779 w->w_class->lc_name); 1780 return (w); 1781} 1782 1783static void 1784depart(struct witness *w) 1785{ 1786 struct witness_list *list; 1787 1788 MPASS(w->w_refcount == 0); 1789 if (w->w_class->lc_flags & LC_SLEEPLOCK) { 1790 list = &w_sleep; 1791 w_sleep_cnt--; 1792 } else { 1793 list = &w_spin; 1794 w_spin_cnt--; 1795 } 1796 /* 1797 * Set file to NULL as it may point into a loadable module. 1798 */ 1799 w->w_file = NULL; 1800 w->w_line = 0; 1801 witness_increment_graph_generation(); 1802} 1803 1804 1805static void 1806adopt(struct witness *parent, struct witness *child) 1807{ 1808 int pi, ci, i, j; 1809 1810 if (witness_cold == 0) 1811 mtx_assert(&w_mtx, MA_OWNED); 1812 1813 /* If the relationship is already known, there's no work to be done. */ 1814 if (isitmychild(parent, child)) 1815 return; 1816 1817 /* When the structure of the graph changes, bump up the generation. */ 1818 witness_increment_graph_generation(); 1819 1820 /* 1821 * The hard part ... create the direct relationship, then propagate all 1822 * indirect relationships. 1823 */ 1824 pi = parent->w_index; 1825 ci = child->w_index; 1826 WITNESS_INDEX_ASSERT(pi); 1827 WITNESS_INDEX_ASSERT(ci); 1828 MPASS(pi != ci); 1829 w_rmatrix[pi][ci] |= WITNESS_PARENT; 1830 w_rmatrix[ci][pi] |= WITNESS_CHILD; 1831 1832 /* 1833 * If parent was not already an ancestor of child, 1834 * then we increment the descendant and ancestor counters. 1835 */ 1836 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) { 1837 parent->w_num_descendants++; 1838 child->w_num_ancestors++; 1839 } 1840 1841 /* 1842 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as 1843 * an ancestor of 'pi' during this loop. 1844 */ 1845 for (i = 1; i <= w_max_used_index; i++) { 1846 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 && 1847 (i != pi)) 1848 continue; 1849 1850 /* Find each descendant of 'i' and mark it as a descendant. */ 1851 for (j = 1; j <= w_max_used_index; j++) { 1852 1853 /* 1854 * Skip children that are already marked as 1855 * descendants of 'i'. 1856 */ 1857 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) 1858 continue; 1859 1860 /* 1861 * We are only interested in descendants of 'ci'. Note 1862 * that 'ci' itself is counted as a descendant of 'ci'. 1863 */ 1864 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 && 1865 (j != ci)) 1866 continue; 1867 w_rmatrix[i][j] |= WITNESS_ANCESTOR; 1868 w_rmatrix[j][i] |= WITNESS_DESCENDANT; 1869 w_data[i].w_num_descendants++; 1870 w_data[j].w_num_ancestors++; 1871 1872 /* 1873 * Make sure we aren't marking a node as both an 1874 * ancestor and descendant. We should have caught 1875 * this as a lock order reversal earlier. 1876 */ 1877 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) && 1878 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) { 1879 printf("witness rmatrix paradox! [%d][%d]=%d " 1880 "both ancestor and descendant\n", 1881 i, j, w_rmatrix[i][j]); 1882 kdb_backtrace(); 1883 printf("Witness disabled.\n"); 1884 witness_watch = -1; 1885 } 1886 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) && 1887 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) { 1888 printf("witness rmatrix paradox! [%d][%d]=%d " 1889 "both ancestor and descendant\n", 1890 j, i, w_rmatrix[j][i]); 1891 kdb_backtrace(); 1892 printf("Witness disabled.\n"); 1893 witness_watch = -1; 1894 } 1895 } 1896 } 1897} 1898 1899static void 1900itismychild(struct witness *parent, struct witness *child) 1901{ 1902 1903 MPASS(child != NULL && parent != NULL); 1904 if (witness_cold == 0) 1905 mtx_assert(&w_mtx, MA_OWNED); 1906 1907 if (!witness_lock_type_equal(parent, child)) { 1908 if (witness_cold == 0) 1909 mtx_unlock_spin(&w_mtx); 1910 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not " 1911 "the same lock type", __func__, parent->w_name, 1912 parent->w_class->lc_name, child->w_name, 1913 child->w_class->lc_name); 1914 } 1915 adopt(parent, child); 1916} 1917 1918/* 1919 * Generic code for the isitmy*() functions. The rmask parameter is the 1920 * expected relationship of w1 to w2. 1921 */ 1922static int 1923_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname) 1924{ 1925 unsigned char r1, r2; 1926 int i1, i2; 1927 1928 i1 = w1->w_index; 1929 i2 = w2->w_index; 1930 WITNESS_INDEX_ASSERT(i1); 1931 WITNESS_INDEX_ASSERT(i2); 1932 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK; 1933 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK; 1934 1935 /* The flags on one better be the inverse of the flags on the other */ 1936 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) || 1937 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) { 1938 printf("%s: rmatrix mismatch between %s (index %d) and %s " 1939 "(index %d): w_rmatrix[%d][%d] == %hhx but " 1940 "w_rmatrix[%d][%d] == %hhx\n", 1941 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1, 1942 i2, i1, r2); 1943 kdb_backtrace(); 1944 printf("Witness disabled.\n"); 1945 witness_watch = -1; 1946 } 1947 return (r1 & rmask); 1948} 1949 1950/* 1951 * Checks if @child is a direct child of @parent. 1952 */ 1953static int 1954isitmychild(struct witness *parent, struct witness *child) 1955{ 1956 1957 return (_isitmyx(parent, child, WITNESS_PARENT, __func__)); 1958} 1959 1960/* 1961 * Checks if @descendant is a direct or inderect descendant of @ancestor. 1962 */ 1963static int 1964isitmydescendant(struct witness *ancestor, struct witness *descendant) 1965{ 1966 1967 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK, 1968 __func__)); 1969} 1970 1971#ifdef BLESSING 1972static int 1973blessed(struct witness *w1, struct witness *w2) 1974{ 1975 int i; 1976 struct witness_blessed *b; 1977 1978 for (i = 0; i < blessed_count; i++) { 1979 b = &blessed_list[i]; 1980 if (strcmp(w1->w_name, b->b_lock1) == 0) { 1981 if (strcmp(w2->w_name, b->b_lock2) == 0) 1982 return (1); 1983 continue; 1984 } 1985 if (strcmp(w1->w_name, b->b_lock2) == 0) 1986 if (strcmp(w2->w_name, b->b_lock1) == 0) 1987 return (1); 1988 } 1989 return (0); 1990} 1991#endif 1992 1993static struct witness * 1994witness_get(void) 1995{ 1996 struct witness *w; 1997 int index; 1998 1999 if (witness_cold == 0) 2000 mtx_assert(&w_mtx, MA_OWNED); 2001 2002 if (witness_watch == -1) { 2003 mtx_unlock_spin(&w_mtx); 2004 return (NULL); 2005 } 2006 if (STAILQ_EMPTY(&w_free)) { 2007 witness_watch = -1; 2008 mtx_unlock_spin(&w_mtx); 2009 printf("WITNESS: unable to allocate a new witness object\n"); 2010 return (NULL); 2011 } 2012 w = STAILQ_FIRST(&w_free); 2013 STAILQ_REMOVE_HEAD(&w_free, w_list); 2014 w_free_cnt--; 2015 index = w->w_index; 2016 MPASS(index > 0 && index == w_max_used_index+1 && 2017 index < WITNESS_COUNT); 2018 bzero(w, sizeof(*w)); 2019 w->w_index = index; 2020 if (index > w_max_used_index) 2021 w_max_used_index = index; 2022 return (w); 2023} 2024 2025static void 2026witness_free(struct witness *w) 2027{ 2028 2029 STAILQ_INSERT_HEAD(&w_free, w, w_list); 2030 w_free_cnt++; 2031} 2032 2033static struct lock_list_entry * 2034witness_lock_list_get(void) 2035{ 2036 struct lock_list_entry *lle; 2037 2038 if (witness_watch == -1) 2039 return (NULL); 2040 mtx_lock_spin(&w_mtx); 2041 lle = w_lock_list_free; 2042 if (lle == NULL) { 2043 witness_watch = -1; 2044 mtx_unlock_spin(&w_mtx); 2045 printf("%s: witness exhausted\n", __func__); 2046 return (NULL); 2047 } 2048 w_lock_list_free = lle->ll_next; 2049 mtx_unlock_spin(&w_mtx); 2050 bzero(lle, sizeof(*lle)); 2051 return (lle); 2052} 2053 2054static void 2055witness_lock_list_free(struct lock_list_entry *lle) 2056{ 2057 2058 mtx_lock_spin(&w_mtx); 2059 lle->ll_next = w_lock_list_free; 2060 w_lock_list_free = lle; 2061 mtx_unlock_spin(&w_mtx); 2062} 2063 2064static struct lock_instance * 2065find_instance(struct lock_list_entry *list, struct lock_object *lock) 2066{ 2067 struct lock_list_entry *lle; 2068 struct lock_instance *instance; 2069 int i; 2070 2071 for (lle = list; lle != NULL; lle = lle->ll_next) 2072 for (i = lle->ll_count - 1; i >= 0; i--) { 2073 instance = &lle->ll_children[i]; 2074 if (instance->li_lock == lock) 2075 return (instance); 2076 } 2077 return (NULL); 2078} 2079 2080static void 2081witness_list_lock(struct lock_instance *instance, 2082 int (*prnt)(const char *fmt, ...)) 2083{ 2084 struct lock_object *lock; 2085 2086 lock = instance->li_lock; 2087 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ? 2088 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name); 2089 if (lock->lo_witness->w_name != lock->lo_name) 2090 prnt(" (%s)", lock->lo_witness->w_name); 2091 prnt(" r = %d (%p) locked @ %s:%d\n", 2092 instance->li_flags & LI_RECURSEMASK, lock, 2093 fixup_filename(instance->li_file), 2094 instance->li_line); 2095} 2096 2097#ifdef DDB 2098static int 2099witness_thread_has_locks(struct thread *td) 2100{ 2101 2102 if (td->td_sleeplocks == NULL) 2103 return (0); 2104 return (td->td_sleeplocks->ll_count != 0); 2105} 2106 2107static int 2108witness_proc_has_locks(struct proc *p) 2109{ 2110 struct thread *td; 2111 2112 FOREACH_THREAD_IN_PROC(p, td) { 2113 if (witness_thread_has_locks(td)) 2114 return (1); 2115 } 2116 return (0); 2117} 2118#endif 2119 2120int 2121witness_list_locks(struct lock_list_entry **lock_list, 2122 int (*prnt)(const char *fmt, ...)) 2123{ 2124 struct lock_list_entry *lle; 2125 int i, nheld; 2126 2127 nheld = 0; 2128 for (lle = *lock_list; lle != NULL; lle = lle->ll_next) 2129 for (i = lle->ll_count - 1; i >= 0; i--) { 2130 witness_list_lock(&lle->ll_children[i], prnt); 2131 nheld++; 2132 } 2133 return (nheld); 2134} 2135 2136/* 2137 * This is a bit risky at best. We call this function when we have timed 2138 * out acquiring a spin lock, and we assume that the other CPU is stuck 2139 * with this lock held. So, we go groveling around in the other CPU's 2140 * per-cpu data to try to find the lock instance for this spin lock to 2141 * see when it was last acquired. 2142 */ 2143void 2144witness_display_spinlock(struct lock_object *lock, struct thread *owner, 2145 int (*prnt)(const char *fmt, ...)) 2146{ 2147 struct lock_instance *instance; 2148 struct pcpu *pc; 2149 2150 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU) 2151 return; 2152 pc = pcpu_find(owner->td_oncpu); 2153 instance = find_instance(pc->pc_spinlocks, lock); 2154 if (instance != NULL) 2155 witness_list_lock(instance, prnt); 2156} 2157 2158void 2159witness_save(struct lock_object *lock, const char **filep, int *linep) 2160{ 2161 struct lock_list_entry *lock_list; 2162 struct lock_instance *instance; 2163 struct lock_class *class; 2164 2165 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2166 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2167 return; 2168 class = LOCK_CLASS(lock); 2169 if (class->lc_flags & LC_SLEEPLOCK) 2170 lock_list = curthread->td_sleeplocks; 2171 else { 2172 if (witness_skipspin) 2173 return; 2174 lock_list = PCPU_GET(spinlocks); 2175 } 2176 instance = find_instance(lock_list, lock); 2177 if (instance == NULL) 2178 panic("%s: lock (%s) %s not locked", __func__, 2179 class->lc_name, lock->lo_name); 2180 *filep = instance->li_file; 2181 *linep = instance->li_line; 2182} 2183 2184void 2185witness_restore(struct lock_object *lock, const char *file, int line) 2186{ 2187 struct lock_list_entry *lock_list; 2188 struct lock_instance *instance; 2189 struct lock_class *class; 2190 2191 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2192 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2193 return; 2194 class = LOCK_CLASS(lock); 2195 if (class->lc_flags & LC_SLEEPLOCK) 2196 lock_list = curthread->td_sleeplocks; 2197 else { 2198 if (witness_skipspin) 2199 return; 2200 lock_list = PCPU_GET(spinlocks); 2201 } 2202 instance = find_instance(lock_list, lock); 2203 if (instance == NULL) 2204 panic("%s: lock (%s) %s not locked", __func__, 2205 class->lc_name, lock->lo_name); 2206 lock->lo_witness->w_file = file; 2207 lock->lo_witness->w_line = line; 2208 instance->li_file = file; 2209 instance->li_line = line; 2210} 2211 2212void 2213witness_assert(struct lock_object *lock, int flags, const char *file, int line) 2214{ 2215#ifdef INVARIANT_SUPPORT 2216 struct lock_instance *instance; 2217 struct lock_class *class; 2218 2219 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL) 2220 return; 2221 class = LOCK_CLASS(lock); 2222 if ((class->lc_flags & LC_SLEEPLOCK) != 0) 2223 instance = find_instance(curthread->td_sleeplocks, lock); 2224 else if ((class->lc_flags & LC_SPINLOCK) != 0) 2225 instance = find_instance(PCPU_GET(spinlocks), lock); 2226 else { 2227 panic("Lock (%s) %s is not sleep or spin!", 2228 class->lc_name, lock->lo_name); 2229 } 2230 switch (flags) { 2231 case LA_UNLOCKED: 2232 if (instance != NULL) 2233 panic("Lock (%s) %s locked @ %s:%d.", 2234 class->lc_name, lock->lo_name, 2235 fixup_filename(file), line); 2236 break; 2237 case LA_LOCKED: 2238 case LA_LOCKED | LA_RECURSED: 2239 case LA_LOCKED | LA_NOTRECURSED: 2240 case LA_SLOCKED: 2241 case LA_SLOCKED | LA_RECURSED: 2242 case LA_SLOCKED | LA_NOTRECURSED: 2243 case LA_XLOCKED: 2244 case LA_XLOCKED | LA_RECURSED: 2245 case LA_XLOCKED | LA_NOTRECURSED: 2246 if (instance == NULL) { 2247 panic("Lock (%s) %s not locked @ %s:%d.", 2248 class->lc_name, lock->lo_name, 2249 fixup_filename(file), line); 2250 break; 2251 } 2252 if ((flags & LA_XLOCKED) != 0 && 2253 (instance->li_flags & LI_EXCLUSIVE) == 0) 2254 panic("Lock (%s) %s not exclusively locked @ %s:%d.", 2255 class->lc_name, lock->lo_name, 2256 fixup_filename(file), line); 2257 if ((flags & LA_SLOCKED) != 0 && 2258 (instance->li_flags & LI_EXCLUSIVE) != 0) 2259 panic("Lock (%s) %s exclusively locked @ %s:%d.", 2260 class->lc_name, lock->lo_name, 2261 fixup_filename(file), line); 2262 if ((flags & LA_RECURSED) != 0 && 2263 (instance->li_flags & LI_RECURSEMASK) == 0) 2264 panic("Lock (%s) %s not recursed @ %s:%d.", 2265 class->lc_name, lock->lo_name, 2266 fixup_filename(file), line); 2267 if ((flags & LA_NOTRECURSED) != 0 && 2268 (instance->li_flags & LI_RECURSEMASK) != 0) 2269 panic("Lock (%s) %s recursed @ %s:%d.", 2270 class->lc_name, lock->lo_name, 2271 fixup_filename(file), line); 2272 break; 2273 default: 2274 panic("Invalid lock assertion at %s:%d.", 2275 fixup_filename(file), line); 2276 2277 } 2278#endif /* INVARIANT_SUPPORT */ 2279} 2280 2281static void 2282witness_setflag(struct lock_object *lock, int flag, int set) 2283{ 2284 struct lock_list_entry *lock_list; 2285 struct lock_instance *instance; 2286 struct lock_class *class; 2287 2288 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) 2289 return; 2290 class = LOCK_CLASS(lock); 2291 if (class->lc_flags & LC_SLEEPLOCK) 2292 lock_list = curthread->td_sleeplocks; 2293 else { 2294 if (witness_skipspin) 2295 return; 2296 lock_list = PCPU_GET(spinlocks); 2297 } 2298 instance = find_instance(lock_list, lock); 2299 if (instance == NULL) 2300 panic("%s: lock (%s) %s not locked", __func__, 2301 class->lc_name, lock->lo_name); 2302 2303 if (set) 2304 instance->li_flags |= flag; 2305 else 2306 instance->li_flags &= ~flag; 2307} 2308 2309void 2310witness_norelease(struct lock_object *lock) 2311{ 2312 2313 witness_setflag(lock, LI_NORELEASE, 1); 2314} 2315 2316void 2317witness_releaseok(struct lock_object *lock) 2318{ 2319 2320 witness_setflag(lock, LI_NORELEASE, 0); 2321} 2322 2323#ifdef DDB 2324static void 2325witness_ddb_list(struct thread *td) 2326{ 2327 2328 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); 2329 KASSERT(kdb_active, ("%s: not in the debugger", __func__)); 2330 2331 if (witness_watch < 1) 2332 return; 2333 2334 witness_list_locks(&td->td_sleeplocks, db_printf); 2335 2336 /* 2337 * We only handle spinlocks if td == curthread. This is somewhat broken 2338 * if td is currently executing on some other CPU and holds spin locks 2339 * as we won't display those locks. If we had a MI way of getting 2340 * the per-cpu data for a given cpu then we could use 2341 * td->td_oncpu to get the list of spinlocks for this thread 2342 * and "fix" this. 2343 * 2344 * That still wouldn't really fix this unless we locked the scheduler 2345 * lock or stopped the other CPU to make sure it wasn't changing the 2346 * list out from under us. It is probably best to just not try to 2347 * handle threads on other CPU's for now. 2348 */ 2349 if (td == curthread && PCPU_GET(spinlocks) != NULL) 2350 witness_list_locks(PCPU_PTR(spinlocks), db_printf); 2351} 2352 2353DB_SHOW_COMMAND(locks, db_witness_list) 2354{ 2355 struct thread *td; 2356 2357 if (have_addr) 2358 td = db_lookup_thread(addr, TRUE); 2359 else 2360 td = kdb_thread; 2361 witness_ddb_list(td); 2362} 2363 2364DB_SHOW_ALL_COMMAND(locks, db_witness_list_all) 2365{ 2366 struct thread *td; 2367 struct proc *p; 2368 2369 /* 2370 * It would be nice to list only threads and processes that actually 2371 * held sleep locks, but that information is currently not exported 2372 * by WITNESS. 2373 */ 2374 FOREACH_PROC_IN_SYSTEM(p) { 2375 if (!witness_proc_has_locks(p)) 2376 continue; 2377 FOREACH_THREAD_IN_PROC(p, td) { 2378 if (!witness_thread_has_locks(td)) 2379 continue; 2380 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid, 2381 p->p_comm, td, td->td_tid); 2382 witness_ddb_list(td); 2383 } 2384 } 2385} 2386DB_SHOW_ALIAS(alllocks, db_witness_list_all) 2387 2388DB_SHOW_COMMAND(witness, db_witness_display) 2389{ 2390 2391 witness_ddb_display(db_printf); 2392} 2393#endif 2394 2395static int 2396sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS) 2397{ 2398 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2; 2399 struct witness *tmp_w1, *tmp_w2, *w1, *w2; 2400 struct sbuf *sb; 2401 u_int w_rmatrix1, w_rmatrix2; 2402 int error, generation, i, j; 2403 2404 tmp_data1 = NULL; 2405 tmp_data2 = NULL; 2406 tmp_w1 = NULL; 2407 tmp_w2 = NULL; 2408 if (witness_watch < 1) { 2409 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2410 return (error); 2411 } 2412 if (witness_cold) { 2413 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2414 return (error); 2415 } 2416 error = 0; 2417 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND); 2418 if (sb == NULL) 2419 return (ENOMEM); 2420 2421 /* Allocate and init temporary storage space. */ 2422 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2423 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO); 2424 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2425 M_WAITOK | M_ZERO); 2426 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP, 2427 M_WAITOK | M_ZERO); 2428 stack_zero(&tmp_data1->wlod_stack); 2429 stack_zero(&tmp_data2->wlod_stack); 2430 2431restart: 2432 mtx_lock_spin(&w_mtx); 2433 generation = w_generation; 2434 mtx_unlock_spin(&w_mtx); 2435 sbuf_printf(sb, "Number of known direct relationships is %d\n", 2436 w_lohash.wloh_count); 2437 for (i = 1; i < w_max_used_index; i++) { 2438 mtx_lock_spin(&w_mtx); 2439 if (generation != w_generation) { 2440 mtx_unlock_spin(&w_mtx); 2441 2442 /* The graph has changed, try again. */ 2443 req->oldidx = 0; 2444 sbuf_clear(sb); 2445 goto restart; 2446 } 2447 2448 w1 = &w_data[i]; 2449 if (w1->w_reversed == 0) { 2450 mtx_unlock_spin(&w_mtx); 2451 continue; 2452 } 2453 2454 /* Copy w1 locally so we can release the spin lock. */ 2455 *tmp_w1 = *w1; 2456 mtx_unlock_spin(&w_mtx); 2457 2458 if (tmp_w1->w_reversed == 0) 2459 continue; 2460 for (j = 1; j < w_max_used_index; j++) { 2461 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j) 2462 continue; 2463 2464 mtx_lock_spin(&w_mtx); 2465 if (generation != w_generation) { 2466 mtx_unlock_spin(&w_mtx); 2467 2468 /* The graph has changed, try again. */ 2469 req->oldidx = 0; 2470 sbuf_clear(sb); 2471 goto restart; 2472 } 2473 2474 w2 = &w_data[j]; 2475 data1 = witness_lock_order_get(w1, w2); 2476 data2 = witness_lock_order_get(w2, w1); 2477 2478 /* 2479 * Copy information locally so we can release the 2480 * spin lock. 2481 */ 2482 *tmp_w2 = *w2; 2483 w_rmatrix1 = (unsigned int)w_rmatrix[i][j]; 2484 w_rmatrix2 = (unsigned int)w_rmatrix[j][i]; 2485 2486 if (data1) { 2487 stack_zero(&tmp_data1->wlod_stack); 2488 stack_copy(&data1->wlod_stack, 2489 &tmp_data1->wlod_stack); 2490 } 2491 if (data2 && data2 != data1) { 2492 stack_zero(&tmp_data2->wlod_stack); 2493 stack_copy(&data2->wlod_stack, 2494 &tmp_data2->wlod_stack); 2495 } 2496 mtx_unlock_spin(&w_mtx); 2497 2498 sbuf_printf(sb, 2499 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n", 2500 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2501 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2502#if 0 2503 sbuf_printf(sb, 2504 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n", 2505 tmp_w1->name, tmp_w2->w_name, w_rmatrix1, 2506 tmp_w2->name, tmp_w1->w_name, w_rmatrix2); 2507#endif 2508 if (data1) { 2509 sbuf_printf(sb, 2510 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2511 tmp_w1->w_name, tmp_w1->w_class->lc_name, 2512 tmp_w2->w_name, tmp_w2->w_class->lc_name); 2513 stack_sbuf_print(sb, &tmp_data1->wlod_stack); 2514 sbuf_printf(sb, "\n"); 2515 } 2516 if (data2 && data2 != data1) { 2517 sbuf_printf(sb, 2518 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n", 2519 tmp_w2->w_name, tmp_w2->w_class->lc_name, 2520 tmp_w1->w_name, tmp_w1->w_class->lc_name); 2521 stack_sbuf_print(sb, &tmp_data2->wlod_stack); 2522 sbuf_printf(sb, "\n"); 2523 } 2524 } 2525 } 2526 mtx_lock_spin(&w_mtx); 2527 if (generation != w_generation) { 2528 mtx_unlock_spin(&w_mtx); 2529 2530 /* 2531 * The graph changed while we were printing stack data, 2532 * try again. 2533 */ 2534 req->oldidx = 0; 2535 sbuf_clear(sb); 2536 goto restart; 2537 } 2538 mtx_unlock_spin(&w_mtx); 2539 2540 /* Free temporary storage space. */ 2541 free(tmp_data1, M_TEMP); 2542 free(tmp_data2, M_TEMP); 2543 free(tmp_w1, M_TEMP); 2544 free(tmp_w2, M_TEMP); 2545 2546 sbuf_finish(sb); 2547 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 2548 sbuf_delete(sb); 2549 2550 return (error); 2551} 2552 2553static int 2554sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS) 2555{ 2556 struct witness *w; 2557 struct sbuf *sb; 2558 int error; 2559 2560 if (witness_watch < 1) { 2561 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning)); 2562 return (error); 2563 } 2564 if (witness_cold) { 2565 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold)); 2566 return (error); 2567 } 2568 error = 0; 2569 2570 error = sysctl_wire_old_buffer(req, 0); 2571 if (error != 0) 2572 return (error); 2573 sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req); 2574 if (sb == NULL) 2575 return (ENOMEM); 2576 sbuf_printf(sb, "\n"); 2577 2578 mtx_lock_spin(&w_mtx); 2579 STAILQ_FOREACH(w, &w_all, w_list) 2580 w->w_displayed = 0; 2581 STAILQ_FOREACH(w, &w_all, w_list) 2582 witness_add_fullgraph(sb, w); 2583 mtx_unlock_spin(&w_mtx); 2584 2585 /* 2586 * Close the sbuf and return to userland. 2587 */ 2588 error = sbuf_finish(sb); 2589 sbuf_delete(sb); 2590 2591 return (error); 2592} 2593 2594static int 2595sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS) 2596{ 2597 int error, value; 2598 2599 value = witness_watch; 2600 error = sysctl_handle_int(oidp, &value, 0, req); 2601 if (error != 0 || req->newptr == NULL) 2602 return (error); 2603 if (value > 1 || value < -1 || 2604 (witness_watch == -1 && value != witness_watch)) 2605 return (EINVAL); 2606 witness_watch = value; 2607 return (0); 2608} 2609 2610static void 2611witness_add_fullgraph(struct sbuf *sb, struct witness *w) 2612{ 2613 int i; 2614 2615 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0)) 2616 return; 2617 w->w_displayed = 1; 2618 2619 WITNESS_INDEX_ASSERT(w->w_index); 2620 for (i = 1; i <= w_max_used_index; i++) { 2621 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) { 2622 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name, 2623 w_data[i].w_name); 2624 witness_add_fullgraph(sb, &w_data[i]); 2625 } 2626 } 2627} 2628 2629/* 2630 * A simple hash function. Takes a key pointer and a key size. If size == 0, 2631 * interprets the key as a string and reads until the null 2632 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit 2633 * hash value computed from the key. 2634 */ 2635static uint32_t 2636witness_hash_djb2(const uint8_t *key, uint32_t size) 2637{ 2638 unsigned int hash = 5381; 2639 int i; 2640 2641 /* hash = hash * 33 + key[i] */ 2642 if (size) 2643 for (i = 0; i < size; i++) 2644 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2645 else 2646 for (i = 0; key[i] != 0; i++) 2647 hash = ((hash << 5) + hash) + (unsigned int)key[i]; 2648 2649 return (hash); 2650} 2651 2652 2653/* 2654 * Initializes the two witness hash tables. Called exactly once from 2655 * witness_initialize(). 2656 */ 2657static void 2658witness_init_hash_tables(void) 2659{ 2660 int i; 2661 2662 MPASS(witness_cold); 2663 2664 /* Initialize the hash tables. */ 2665 for (i = 0; i < WITNESS_HASH_SIZE; i++) 2666 w_hash.wh_array[i] = NULL; 2667 2668 w_hash.wh_size = WITNESS_HASH_SIZE; 2669 w_hash.wh_count = 0; 2670 2671 /* Initialize the lock order data hash. */ 2672 w_lofree = NULL; 2673 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) { 2674 memset(&w_lodata[i], 0, sizeof(w_lodata[i])); 2675 w_lodata[i].wlod_next = w_lofree; 2676 w_lofree = &w_lodata[i]; 2677 } 2678 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE; 2679 w_lohash.wloh_count = 0; 2680 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++) 2681 w_lohash.wloh_array[i] = NULL; 2682} 2683 2684static struct witness * 2685witness_hash_get(const char *key) 2686{ 2687 struct witness *w; 2688 uint32_t hash; 2689 2690 MPASS(key != NULL); 2691 if (witness_cold == 0) 2692 mtx_assert(&w_mtx, MA_OWNED); 2693 hash = witness_hash_djb2(key, 0) % w_hash.wh_size; 2694 w = w_hash.wh_array[hash]; 2695 while (w != NULL) { 2696 if (strcmp(w->w_name, key) == 0) 2697 goto out; 2698 w = w->w_hash_next; 2699 } 2700 2701out: 2702 return (w); 2703} 2704 2705static void 2706witness_hash_put(struct witness *w) 2707{ 2708 uint32_t hash; 2709 2710 MPASS(w != NULL); 2711 MPASS(w->w_name != NULL); 2712 if (witness_cold == 0) 2713 mtx_assert(&w_mtx, MA_OWNED); 2714 KASSERT(witness_hash_get(w->w_name) == NULL, 2715 ("%s: trying to add a hash entry that already exists!", __func__)); 2716 KASSERT(w->w_hash_next == NULL, 2717 ("%s: w->w_hash_next != NULL", __func__)); 2718 2719 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size; 2720 w->w_hash_next = w_hash.wh_array[hash]; 2721 w_hash.wh_array[hash] = w; 2722 w_hash.wh_count++; 2723} 2724 2725 2726static struct witness_lock_order_data * 2727witness_lock_order_get(struct witness *parent, struct witness *child) 2728{ 2729 struct witness_lock_order_data *data = NULL; 2730 struct witness_lock_order_key key; 2731 unsigned int hash; 2732 2733 MPASS(parent != NULL && child != NULL); 2734 key.from = parent->w_index; 2735 key.to = child->w_index; 2736 WITNESS_INDEX_ASSERT(key.from); 2737 WITNESS_INDEX_ASSERT(key.to); 2738 if ((w_rmatrix[parent->w_index][child->w_index] 2739 & WITNESS_LOCK_ORDER_KNOWN) == 0) 2740 goto out; 2741 2742 hash = witness_hash_djb2((const char*)&key, 2743 sizeof(key)) % w_lohash.wloh_size; 2744 data = w_lohash.wloh_array[hash]; 2745 while (data != NULL) { 2746 if (witness_lock_order_key_equal(&data->wlod_key, &key)) 2747 break; 2748 data = data->wlod_next; 2749 } 2750 2751out: 2752 return (data); 2753} 2754 2755/* 2756 * Verify that parent and child have a known relationship, are not the same, 2757 * and child is actually a child of parent. This is done without w_mtx 2758 * to avoid contention in the common case. 2759 */ 2760static int 2761witness_lock_order_check(struct witness *parent, struct witness *child) 2762{ 2763 2764 if (parent != child && 2765 w_rmatrix[parent->w_index][child->w_index] 2766 & WITNESS_LOCK_ORDER_KNOWN && 2767 isitmychild(parent, child)) 2768 return (1); 2769 2770 return (0); 2771} 2772 2773static int 2774witness_lock_order_add(struct witness *parent, struct witness *child) 2775{ 2776 struct witness_lock_order_data *data = NULL; 2777 struct witness_lock_order_key key; 2778 unsigned int hash; 2779 2780 MPASS(parent != NULL && child != NULL); 2781 key.from = parent->w_index; 2782 key.to = child->w_index; 2783 WITNESS_INDEX_ASSERT(key.from); 2784 WITNESS_INDEX_ASSERT(key.to); 2785 if (w_rmatrix[parent->w_index][child->w_index] 2786 & WITNESS_LOCK_ORDER_KNOWN) 2787 return (1); 2788 2789 hash = witness_hash_djb2((const char*)&key, 2790 sizeof(key)) % w_lohash.wloh_size; 2791 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN; 2792 data = w_lofree; 2793 if (data == NULL) 2794 return (0); 2795 w_lofree = data->wlod_next; 2796 data->wlod_next = w_lohash.wloh_array[hash]; 2797 data->wlod_key = key; 2798 w_lohash.wloh_array[hash] = data; 2799 w_lohash.wloh_count++; 2800 stack_zero(&data->wlod_stack); 2801 stack_save(&data->wlod_stack); 2802 return (1); 2803} 2804 2805/* Call this whenver the structure of the witness graph changes. */ 2806static void 2807witness_increment_graph_generation(void) 2808{ 2809 2810 if (witness_cold == 0) 2811 mtx_assert(&w_mtx, MA_OWNED); 2812 w_generation++; 2813} 2814 2815#ifdef KDB 2816static void 2817_witness_debugger(int cond, const char *msg) 2818{ 2819 2820 if (witness_trace && cond) 2821 kdb_backtrace(); 2822 if (witness_kdb && cond) 2823 kdb_enter(KDB_WHY_WITNESS, msg); 2824} 2825#endif 2826