subr_witness.c revision 239864
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 *    promote products derived from this software without specific prior
17 *    written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier.  Originally implemented for
37 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 *	Main Entry: witness
43 *	Pronunciation: 'wit-n&s
44 *	Function: noun
45 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46 *	    testimony, witness, from 2wit
47 *	Date: before 12th century
48 *	1 : attestation of a fact or event : TESTIMONY
49 *	2 : one that gives evidence; specifically : one who testifies in
50 *	    a cause or before a judicial tribunal
51 *	3 : one asked to be present at a transaction so as to be able to
52 *	    testify to its having taken place
53 *	4 : one who has personal knowledge of something
54 *	5 a : something serving as evidence or proof : SIGN
55 *	  b : public affirmation by word or example of usually
56 *	      religious faith or conviction <the heroic witness to divine
57 *	      life -- Pilot>
58 *	6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes.  Stated another way,
65 *    no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl().  Basically, when a thread sleeps, it must release
71 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious.  Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock.  The second
79 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute.  Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 239864 2012-08-29 16:56:50Z marius $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define	KTR_WITNESS	KTR_SUBSYS
124#else
125#define	KTR_WITNESS	0
126#endif
127
128#define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129#define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130#define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#define	WITNESS_COUNT 		1024
136#define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
137#define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
138#define	WITNESS_PENDLIST	768
139
140/* Allocate 256 KB of stack data space */
141#define	WITNESS_LO_DATA_COUNT	2048
142
143/* Prime, gives load factor of ~2 at full load */
144#define	WITNESS_LO_HASH_SIZE	1021
145
146/*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151#define	LOCK_NCHILDREN	5
152#define	LOCK_CHILDCOUNT	2048
153
154#define	MAX_W_NAME	64
155
156#define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
157#define	FULLGRAPH_SBUF_SIZE	512
158
159/*
160 * These flags go in the witness relationship matrix and describe the
161 * relationship between any two struct witness objects.
162 */
163#define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164#define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165#define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166#define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167#define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168#define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169#define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170#define	WITNESS_RELATED_MASK						\
171	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172#define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173					  * observed. */
174#define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175#define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176#define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177
178/* Descendant to ancestor flags */
179#define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180
181/* Ancestor to descendant flags */
182#define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183
184#define	WITNESS_INDEX_ASSERT(i)						\
185	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
186
187static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188
189/*
190 * Lock instances.  A lock instance is the data associated with a lock while
191 * it is held by witness.  For example, a lock instance will hold the
192 * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193 * are held in a per-cpu list while sleep locks are held in per-thread list.
194 */
195struct lock_instance {
196	struct lock_object	*li_lock;
197	const char		*li_file;
198	int			li_line;
199	u_int			li_flags;
200};
201
202/*
203 * A simple list type used to build the list of locks held by a thread
204 * or CPU.  We can't simply embed the list in struct lock_object since a
205 * lock may be held by more than one thread if it is a shared lock.  Locks
206 * are added to the head of the list, so we fill up each list entry from
207 * "the back" logically.  To ease some of the arithmetic, we actually fill
208 * in each list entry the normal way (children[0] then children[1], etc.) but
209 * when we traverse the list we read children[count-1] as the first entry
210 * down to children[0] as the final entry.
211 */
212struct lock_list_entry {
213	struct lock_list_entry	*ll_next;
214	struct lock_instance	ll_children[LOCK_NCHILDREN];
215	u_int			ll_count;
216};
217
218/*
219 * The main witness structure. One of these per named lock type in the system
220 * (for example, "vnode interlock").
221 */
222struct witness {
223	char  			w_name[MAX_W_NAME];
224	uint32_t 		w_index;  /* Index in the relationship matrix */
225	struct lock_class	*w_class;
226	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229	const char		*w_file; /* File where last acquired */
230	uint32_t 		w_line; /* Line where last acquired */
231	uint32_t 		w_refcount;
232	uint16_t 		w_num_ancestors; /* direct/indirect
233						  * ancestor count */
234	uint16_t 		w_num_descendants; /* direct/indirect
235						    * descendant count */
236	int16_t 		w_ddb_level;
237	unsigned		w_displayed:1;
238	unsigned		w_reversed:1;
239};
240
241STAILQ_HEAD(witness_list, witness);
242
243/*
244 * The witness hash table. Keys are witness names (const char *), elements are
245 * witness objects (struct witness *).
246 */
247struct witness_hash {
248	struct witness	*wh_array[WITNESS_HASH_SIZE];
249	uint32_t	wh_size;
250	uint32_t	wh_count;
251};
252
253/*
254 * Key type for the lock order data hash table.
255 */
256struct witness_lock_order_key {
257	uint16_t	from;
258	uint16_t	to;
259};
260
261struct witness_lock_order_data {
262	struct stack			wlod_stack;
263	struct witness_lock_order_key	wlod_key;
264	struct witness_lock_order_data	*wlod_next;
265};
266
267/*
268 * The witness lock order data hash table. Keys are witness index tuples
269 * (struct witness_lock_order_key), elements are lock order data objects
270 * (struct witness_lock_order_data).
271 */
272struct witness_lock_order_hash {
273	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274	u_int	wloh_size;
275	u_int	wloh_count;
276};
277
278#ifdef BLESSING
279struct witness_blessed {
280	const char	*b_lock1;
281	const char	*b_lock2;
282};
283#endif
284
285struct witness_pendhelp {
286	const char		*wh_type;
287	struct lock_object	*wh_lock;
288};
289
290struct witness_order_list_entry {
291	const char		*w_name;
292	struct lock_class	*w_class;
293};
294
295/*
296 * Returns 0 if one of the locks is a spin lock and the other is not.
297 * Returns 1 otherwise.
298 */
299static __inline int
300witness_lock_type_equal(struct witness *w1, struct witness *w2)
301{
302
303	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305}
306
307static __inline int
308witness_lock_order_key_empty(const struct witness_lock_order_key *key)
309{
310
311	return (key->from == 0 && key->to == 0);
312}
313
314static __inline int
315witness_lock_order_key_equal(const struct witness_lock_order_key *a,
316    const struct witness_lock_order_key *b)
317{
318
319	return (a->from == b->from && a->to == b->to);
320}
321
322static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
323		    const char *fname);
324#ifdef KDB
325static void	_witness_debugger(int cond, const char *msg);
326#endif
327static void	adopt(struct witness *parent, struct witness *child);
328#ifdef BLESSING
329static int	blessed(struct witness *, struct witness *);
330#endif
331static void	depart(struct witness *w);
332static struct witness	*enroll(const char *description,
333			    struct lock_class *lock_class);
334static struct lock_instance	*find_instance(struct lock_list_entry *list,
335				    const struct lock_object *lock);
336static int	isitmychild(struct witness *parent, struct witness *child);
337static int	isitmydescendant(struct witness *parent, struct witness *child);
338static void	itismychild(struct witness *parent, struct witness *child);
339static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
340static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
341static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
342static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
343#ifdef DDB
344static void	witness_ddb_compute_levels(void);
345static void	witness_ddb_display(int(*)(const char *fmt, ...));
346static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
347		    struct witness *, int indent);
348static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
349		    struct witness_list *list);
350static void	witness_ddb_level_descendants(struct witness *parent, int l);
351static void	witness_ddb_list(struct thread *td);
352#endif
353static void	witness_free(struct witness *m);
354static struct witness	*witness_get(void);
355static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
356static struct witness	*witness_hash_get(const char *key);
357static void	witness_hash_put(struct witness *w);
358static void	witness_init_hash_tables(void);
359static void	witness_increment_graph_generation(void);
360static void	witness_lock_list_free(struct lock_list_entry *lle);
361static struct lock_list_entry	*witness_lock_list_get(void);
362static int	witness_lock_order_add(struct witness *parent,
363		    struct witness *child);
364static int	witness_lock_order_check(struct witness *parent,
365		    struct witness *child);
366static struct witness_lock_order_data	*witness_lock_order_get(
367					    struct witness *parent,
368					    struct witness *child);
369static void	witness_list_lock(struct lock_instance *instance,
370		    int (*prnt)(const char *fmt, ...));
371static void	witness_setflag(struct lock_object *lock, int flag, int set);
372
373#ifdef KDB
374#define	witness_debugger(c)	_witness_debugger(c, __func__)
375#else
376#define	witness_debugger(c)
377#endif
378
379static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
380    "Witness Locking");
381
382/*
383 * If set to 0, lock order checking is disabled.  If set to -1,
384 * witness is completely disabled.  Otherwise witness performs full
385 * lock order checking for all locks.  At runtime, lock order checking
386 * may be toggled.  However, witness cannot be reenabled once it is
387 * completely disabled.
388 */
389static int witness_watch = 1;
390TUNABLE_INT("debug.witness.watch", &witness_watch);
391SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
392    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
393
394#ifdef KDB
395/*
396 * When KDB is enabled and witness_kdb is 1, it will cause the system
397 * to drop into kdebug() when:
398 *	- a lock hierarchy violation occurs
399 *	- locks are held when going to sleep.
400 */
401#ifdef WITNESS_KDB
402int	witness_kdb = 1;
403#else
404int	witness_kdb = 0;
405#endif
406TUNABLE_INT("debug.witness.kdb", &witness_kdb);
407SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
408
409/*
410 * When KDB is enabled and witness_trace is 1, it will cause the system
411 * to print a stack trace:
412 *	- a lock hierarchy violation occurs
413 *	- locks are held when going to sleep.
414 */
415int	witness_trace = 1;
416TUNABLE_INT("debug.witness.trace", &witness_trace);
417SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
418#endif /* KDB */
419
420#ifdef WITNESS_SKIPSPIN
421int	witness_skipspin = 1;
422#else
423int	witness_skipspin = 0;
424#endif
425TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
426SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
427    0, "");
428
429/*
430 * Call this to print out the relations between locks.
431 */
432SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
433    NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
434
435/*
436 * Call this to print out the witness faulty stacks.
437 */
438SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
439    NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
440
441static struct mtx w_mtx;
442
443/* w_list */
444static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
445static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
446
447/* w_typelist */
448static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
449static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
450
451/* lock list */
452static struct lock_list_entry *w_lock_list_free = NULL;
453static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
454static u_int pending_cnt;
455
456static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
457SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
458SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
459SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
460    "");
461
462static struct witness *w_data;
463static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
464static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
465static struct witness_hash w_hash;	/* The witness hash table. */
466
467/* The lock order data hash */
468static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
469static struct witness_lock_order_data *w_lofree = NULL;
470static struct witness_lock_order_hash w_lohash;
471static int w_max_used_index = 0;
472static unsigned int w_generation = 0;
473static const char w_notrunning[] = "Witness not running\n";
474static const char w_stillcold[] = "Witness is still cold\n";
475
476
477static struct witness_order_list_entry order_lists[] = {
478	/*
479	 * sx locks
480	 */
481	{ "proctree", &lock_class_sx },
482	{ "allproc", &lock_class_sx },
483	{ "allprison", &lock_class_sx },
484	{ NULL, NULL },
485	/*
486	 * Various mutexes
487	 */
488	{ "Giant", &lock_class_mtx_sleep },
489	{ "pipe mutex", &lock_class_mtx_sleep },
490	{ "sigio lock", &lock_class_mtx_sleep },
491	{ "process group", &lock_class_mtx_sleep },
492	{ "process lock", &lock_class_mtx_sleep },
493	{ "session", &lock_class_mtx_sleep },
494	{ "uidinfo hash", &lock_class_rw },
495#ifdef	HWPMC_HOOKS
496	{ "pmc-sleep", &lock_class_mtx_sleep },
497#endif
498	{ "time lock", &lock_class_mtx_sleep },
499	{ NULL, NULL },
500	/*
501	 * Sockets
502	 */
503	{ "accept", &lock_class_mtx_sleep },
504	{ "so_snd", &lock_class_mtx_sleep },
505	{ "so_rcv", &lock_class_mtx_sleep },
506	{ "sellck", &lock_class_mtx_sleep },
507	{ NULL, NULL },
508	/*
509	 * Routing
510	 */
511	{ "so_rcv", &lock_class_mtx_sleep },
512	{ "radix node head", &lock_class_rw },
513	{ "rtentry", &lock_class_mtx_sleep },
514	{ "ifaddr", &lock_class_mtx_sleep },
515	{ NULL, NULL },
516	/*
517	 * IPv4 multicast:
518	 * protocol locks before interface locks, after UDP locks.
519	 */
520	{ "udpinp", &lock_class_rw },
521	{ "in_multi_mtx", &lock_class_mtx_sleep },
522	{ "igmp_mtx", &lock_class_mtx_sleep },
523	{ "if_addr_lock", &lock_class_rw },
524	{ NULL, NULL },
525	/*
526	 * IPv6 multicast:
527	 * protocol locks before interface locks, after UDP locks.
528	 */
529	{ "udpinp", &lock_class_rw },
530	{ "in6_multi_mtx", &lock_class_mtx_sleep },
531	{ "mld_mtx", &lock_class_mtx_sleep },
532	{ "if_addr_lock", &lock_class_rw },
533	{ NULL, NULL },
534	/*
535	 * UNIX Domain Sockets
536	 */
537	{ "unp_global_rwlock", &lock_class_rw },
538	{ "unp_list_lock", &lock_class_mtx_sleep },
539	{ "unp", &lock_class_mtx_sleep },
540	{ "so_snd", &lock_class_mtx_sleep },
541	{ NULL, NULL },
542	/*
543	 * UDP/IP
544	 */
545	{ "udp", &lock_class_rw },
546	{ "udpinp", &lock_class_rw },
547	{ "so_snd", &lock_class_mtx_sleep },
548	{ NULL, NULL },
549	/*
550	 * TCP/IP
551	 */
552	{ "tcp", &lock_class_rw },
553	{ "tcpinp", &lock_class_rw },
554	{ "so_snd", &lock_class_mtx_sleep },
555	{ NULL, NULL },
556	/*
557	 * netatalk
558	 */
559	{ "ddp_list_mtx", &lock_class_mtx_sleep },
560	{ "ddp_mtx", &lock_class_mtx_sleep },
561	{ NULL, NULL },
562	/*
563	 * BPF
564	 */
565	{ "bpf global lock", &lock_class_mtx_sleep },
566	{ "bpf interface lock", &lock_class_rw },
567	{ "bpf cdev lock", &lock_class_mtx_sleep },
568	{ NULL, NULL },
569	/*
570	 * NFS server
571	 */
572	{ "nfsd_mtx", &lock_class_mtx_sleep },
573	{ "so_snd", &lock_class_mtx_sleep },
574	{ NULL, NULL },
575
576	/*
577	 * IEEE 802.11
578	 */
579	{ "802.11 com lock", &lock_class_mtx_sleep},
580	{ NULL, NULL },
581	/*
582	 * Network drivers
583	 */
584	{ "network driver", &lock_class_mtx_sleep},
585	{ NULL, NULL },
586
587	/*
588	 * Netgraph
589	 */
590	{ "ng_node", &lock_class_mtx_sleep },
591	{ "ng_worklist", &lock_class_mtx_sleep },
592	{ NULL, NULL },
593	/*
594	 * CDEV
595	 */
596	{ "vm map (system)", &lock_class_mtx_sleep },
597	{ "vm page queue", &lock_class_mtx_sleep },
598	{ "vnode interlock", &lock_class_mtx_sleep },
599	{ "cdev", &lock_class_mtx_sleep },
600	{ NULL, NULL },
601	/*
602	 * VM
603	 */
604	{ "vm map (user)", &lock_class_sx },
605	{ "vm object", &lock_class_mtx_sleep },
606	{ "vm page", &lock_class_mtx_sleep },
607	{ "vm page queue", &lock_class_mtx_sleep },
608	{ "pmap pv global", &lock_class_rw },
609	{ "pmap", &lock_class_mtx_sleep },
610	{ "pmap pv list", &lock_class_rw },
611	{ "vm page free queue", &lock_class_mtx_sleep },
612	{ NULL, NULL },
613	/*
614	 * kqueue/VFS interaction
615	 */
616	{ "kqueue", &lock_class_mtx_sleep },
617	{ "struct mount mtx", &lock_class_mtx_sleep },
618	{ "vnode interlock", &lock_class_mtx_sleep },
619	{ NULL, NULL },
620	/*
621	 * ZFS locking
622	 */
623	{ "dn->dn_mtx", &lock_class_sx },
624	{ "dr->dt.di.dr_mtx", &lock_class_sx },
625	{ "db->db_mtx", &lock_class_sx },
626	{ NULL, NULL },
627	/*
628	 * spin locks
629	 */
630#ifdef SMP
631	{ "ap boot", &lock_class_mtx_spin },
632#endif
633	{ "rm.mutex_mtx", &lock_class_mtx_spin },
634	{ "sio", &lock_class_mtx_spin },
635	{ "scrlock", &lock_class_mtx_spin },
636#ifdef __i386__
637	{ "cy", &lock_class_mtx_spin },
638#endif
639#ifdef __sparc64__
640	{ "pcib_mtx", &lock_class_mtx_spin },
641	{ "rtc_mtx", &lock_class_mtx_spin },
642#endif
643	{ "scc_hwmtx", &lock_class_mtx_spin },
644	{ "uart_hwmtx", &lock_class_mtx_spin },
645	{ "fast_taskqueue", &lock_class_mtx_spin },
646	{ "intr table", &lock_class_mtx_spin },
647#ifdef	HWPMC_HOOKS
648	{ "pmc-per-proc", &lock_class_mtx_spin },
649#endif
650	{ "process slock", &lock_class_mtx_spin },
651	{ "sleepq chain", &lock_class_mtx_spin },
652	{ "umtx lock", &lock_class_mtx_spin },
653	{ "rm_spinlock", &lock_class_mtx_spin },
654	{ "turnstile chain", &lock_class_mtx_spin },
655	{ "turnstile lock", &lock_class_mtx_spin },
656	{ "sched lock", &lock_class_mtx_spin },
657	{ "td_contested", &lock_class_mtx_spin },
658	{ "callout", &lock_class_mtx_spin },
659	{ "entropy harvest mutex", &lock_class_mtx_spin },
660	{ "syscons video lock", &lock_class_mtx_spin },
661#ifdef SMP
662	{ "smp rendezvous", &lock_class_mtx_spin },
663#endif
664#ifdef __powerpc__
665	{ "tlb0", &lock_class_mtx_spin },
666#endif
667	/*
668	 * leaf locks
669	 */
670	{ "intrcnt", &lock_class_mtx_spin },
671	{ "icu", &lock_class_mtx_spin },
672#ifdef __i386__
673	{ "allpmaps", &lock_class_mtx_spin },
674	{ "descriptor tables", &lock_class_mtx_spin },
675#endif
676	{ "clk", &lock_class_mtx_spin },
677	{ "cpuset", &lock_class_mtx_spin },
678	{ "mprof lock", &lock_class_mtx_spin },
679	{ "zombie lock", &lock_class_mtx_spin },
680	{ "ALD Queue", &lock_class_mtx_spin },
681#ifdef __ia64__
682	{ "MCA spin lock", &lock_class_mtx_spin },
683#endif
684#if defined(__i386__) || defined(__amd64__)
685	{ "pcicfg", &lock_class_mtx_spin },
686	{ "NDIS thread lock", &lock_class_mtx_spin },
687#endif
688	{ "tw_osl_io_lock", &lock_class_mtx_spin },
689	{ "tw_osl_q_lock", &lock_class_mtx_spin },
690	{ "tw_cl_io_lock", &lock_class_mtx_spin },
691	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
692	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
693#ifdef	HWPMC_HOOKS
694	{ "pmc-leaf", &lock_class_mtx_spin },
695#endif
696	{ "blocked lock", &lock_class_mtx_spin },
697	{ NULL, NULL },
698	{ NULL, NULL }
699};
700
701#ifdef BLESSING
702/*
703 * Pairs of locks which have been blessed
704 * Don't complain about order problems with blessed locks
705 */
706static struct witness_blessed blessed_list[] = {
707};
708static int blessed_count =
709	sizeof(blessed_list) / sizeof(struct witness_blessed);
710#endif
711
712/*
713 * This global is set to 0 once it becomes safe to use the witness code.
714 */
715static int witness_cold = 1;
716
717/*
718 * This global is set to 1 once the static lock orders have been enrolled
719 * so that a warning can be issued for any spin locks enrolled later.
720 */
721static int witness_spin_warn = 0;
722
723/* Trim useless garbage from filenames. */
724static const char *
725fixup_filename(const char *file)
726{
727
728	if (file == NULL)
729		return (NULL);
730	while (strncmp(file, "../", 3) == 0)
731		file += 3;
732	return (file);
733}
734
735/*
736 * The WITNESS-enabled diagnostic code.  Note that the witness code does
737 * assume that the early boot is single-threaded at least until after this
738 * routine is completed.
739 */
740static void
741witness_initialize(void *dummy __unused)
742{
743	struct lock_object *lock;
744	struct witness_order_list_entry *order;
745	struct witness *w, *w1;
746	int i;
747
748	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
749	    M_NOWAIT | M_ZERO);
750
751	/*
752	 * We have to release Giant before initializing its witness
753	 * structure so that WITNESS doesn't get confused.
754	 */
755	mtx_unlock(&Giant);
756	mtx_assert(&Giant, MA_NOTOWNED);
757
758	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
759	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
760	    MTX_NOWITNESS | MTX_NOPROFILE);
761	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
762		w = &w_data[i];
763		memset(w, 0, sizeof(*w));
764		w_data[i].w_index = i;	/* Witness index never changes. */
765		witness_free(w);
766	}
767	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
768	    ("%s: Invalid list of free witness objects", __func__));
769
770	/* Witness with index 0 is not used to aid in debugging. */
771	STAILQ_REMOVE_HEAD(&w_free, w_list);
772	w_free_cnt--;
773
774	memset(w_rmatrix, 0,
775	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
776
777	for (i = 0; i < LOCK_CHILDCOUNT; i++)
778		witness_lock_list_free(&w_locklistdata[i]);
779	witness_init_hash_tables();
780
781	/* First add in all the specified order lists. */
782	for (order = order_lists; order->w_name != NULL; order++) {
783		w = enroll(order->w_name, order->w_class);
784		if (w == NULL)
785			continue;
786		w->w_file = "order list";
787		for (order++; order->w_name != NULL; order++) {
788			w1 = enroll(order->w_name, order->w_class);
789			if (w1 == NULL)
790				continue;
791			w1->w_file = "order list";
792			itismychild(w, w1);
793			w = w1;
794		}
795	}
796	witness_spin_warn = 1;
797
798	/* Iterate through all locks and add them to witness. */
799	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
800		lock = pending_locks[i].wh_lock;
801		KASSERT(lock->lo_flags & LO_WITNESS,
802		    ("%s: lock %s is on pending list but not LO_WITNESS",
803		    __func__, lock->lo_name));
804		lock->lo_witness = enroll(pending_locks[i].wh_type,
805		    LOCK_CLASS(lock));
806	}
807
808	/* Mark the witness code as being ready for use. */
809	witness_cold = 0;
810
811	mtx_lock(&Giant);
812}
813SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
814    NULL);
815
816void
817witness_init(struct lock_object *lock, const char *type)
818{
819	struct lock_class *class;
820
821	/* Various sanity checks. */
822	class = LOCK_CLASS(lock);
823	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
824	    (class->lc_flags & LC_RECURSABLE) == 0)
825		panic("%s: lock (%s) %s can not be recursable", __func__,
826		    class->lc_name, lock->lo_name);
827	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
828	    (class->lc_flags & LC_SLEEPABLE) == 0)
829		panic("%s: lock (%s) %s can not be sleepable", __func__,
830		    class->lc_name, lock->lo_name);
831	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
832	    (class->lc_flags & LC_UPGRADABLE) == 0)
833		panic("%s: lock (%s) %s can not be upgradable", __func__,
834		    class->lc_name, lock->lo_name);
835
836	/*
837	 * If we shouldn't watch this lock, then just clear lo_witness.
838	 * Otherwise, if witness_cold is set, then it is too early to
839	 * enroll this lock, so defer it to witness_initialize() by adding
840	 * it to the pending_locks list.  If it is not too early, then enroll
841	 * the lock now.
842	 */
843	if (witness_watch < 1 || panicstr != NULL ||
844	    (lock->lo_flags & LO_WITNESS) == 0)
845		lock->lo_witness = NULL;
846	else if (witness_cold) {
847		pending_locks[pending_cnt].wh_lock = lock;
848		pending_locks[pending_cnt++].wh_type = type;
849		if (pending_cnt > WITNESS_PENDLIST)
850			panic("%s: pending locks list is too small, bump it\n",
851			    __func__);
852	} else
853		lock->lo_witness = enroll(type, class);
854}
855
856void
857witness_destroy(struct lock_object *lock)
858{
859	struct lock_class *class;
860	struct witness *w;
861
862	class = LOCK_CLASS(lock);
863
864	if (witness_cold)
865		panic("lock (%s) %s destroyed while witness_cold",
866		    class->lc_name, lock->lo_name);
867
868	/* XXX: need to verify that no one holds the lock */
869	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
870		return;
871	w = lock->lo_witness;
872
873	mtx_lock_spin(&w_mtx);
874	MPASS(w->w_refcount > 0);
875	w->w_refcount--;
876
877	if (w->w_refcount == 0)
878		depart(w);
879	mtx_unlock_spin(&w_mtx);
880}
881
882#ifdef DDB
883static void
884witness_ddb_compute_levels(void)
885{
886	struct witness *w;
887
888	/*
889	 * First clear all levels.
890	 */
891	STAILQ_FOREACH(w, &w_all, w_list)
892		w->w_ddb_level = -1;
893
894	/*
895	 * Look for locks with no parents and level all their descendants.
896	 */
897	STAILQ_FOREACH(w, &w_all, w_list) {
898
899		/* If the witness has ancestors (is not a root), skip it. */
900		if (w->w_num_ancestors > 0)
901			continue;
902		witness_ddb_level_descendants(w, 0);
903	}
904}
905
906static void
907witness_ddb_level_descendants(struct witness *w, int l)
908{
909	int i;
910
911	if (w->w_ddb_level >= l)
912		return;
913
914	w->w_ddb_level = l;
915	l++;
916
917	for (i = 1; i <= w_max_used_index; i++) {
918		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
919			witness_ddb_level_descendants(&w_data[i], l);
920	}
921}
922
923static void
924witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
925    struct witness *w, int indent)
926{
927	int i;
928
929 	for (i = 0; i < indent; i++)
930 		prnt(" ");
931	prnt("%s (type: %s, depth: %d, active refs: %d)",
932	     w->w_name, w->w_class->lc_name,
933	     w->w_ddb_level, w->w_refcount);
934 	if (w->w_displayed) {
935 		prnt(" -- (already displayed)\n");
936 		return;
937 	}
938 	w->w_displayed = 1;
939	if (w->w_file != NULL && w->w_line != 0)
940		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
941		    w->w_line);
942	else
943		prnt(" -- never acquired\n");
944	indent++;
945	WITNESS_INDEX_ASSERT(w->w_index);
946	for (i = 1; i <= w_max_used_index; i++) {
947		if (db_pager_quit)
948			return;
949		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
950			witness_ddb_display_descendants(prnt, &w_data[i],
951			    indent);
952	}
953}
954
955static void
956witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
957    struct witness_list *list)
958{
959	struct witness *w;
960
961	STAILQ_FOREACH(w, list, w_typelist) {
962		if (w->w_file == NULL || w->w_ddb_level > 0)
963			continue;
964
965		/* This lock has no anscestors - display its descendants. */
966		witness_ddb_display_descendants(prnt, w, 0);
967		if (db_pager_quit)
968			return;
969	}
970}
971
972static void
973witness_ddb_display(int(*prnt)(const char *fmt, ...))
974{
975	struct witness *w;
976
977	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
978	witness_ddb_compute_levels();
979
980	/* Clear all the displayed flags. */
981	STAILQ_FOREACH(w, &w_all, w_list)
982		w->w_displayed = 0;
983
984	/*
985	 * First, handle sleep locks which have been acquired at least
986	 * once.
987	 */
988	prnt("Sleep locks:\n");
989	witness_ddb_display_list(prnt, &w_sleep);
990	if (db_pager_quit)
991		return;
992
993	/*
994	 * Now do spin locks which have been acquired at least once.
995	 */
996	prnt("\nSpin locks:\n");
997	witness_ddb_display_list(prnt, &w_spin);
998	if (db_pager_quit)
999		return;
1000
1001	/*
1002	 * Finally, any locks which have not been acquired yet.
1003	 */
1004	prnt("\nLocks which were never acquired:\n");
1005	STAILQ_FOREACH(w, &w_all, w_list) {
1006		if (w->w_file != NULL || w->w_refcount == 0)
1007			continue;
1008		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1009		    w->w_class->lc_name, w->w_ddb_level);
1010		if (db_pager_quit)
1011			return;
1012	}
1013}
1014#endif /* DDB */
1015
1016int
1017witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1018{
1019
1020	if (witness_watch == -1 || panicstr != NULL)
1021		return (0);
1022
1023	/* Require locks that witness knows about. */
1024	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1025	    lock2->lo_witness == NULL)
1026		return (EINVAL);
1027
1028	mtx_assert(&w_mtx, MA_NOTOWNED);
1029	mtx_lock_spin(&w_mtx);
1030
1031	/*
1032	 * If we already have either an explicit or implied lock order that
1033	 * is the other way around, then return an error.
1034	 */
1035	if (witness_watch &&
1036	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1037		mtx_unlock_spin(&w_mtx);
1038		return (EDOOFUS);
1039	}
1040
1041	/* Try to add the new order. */
1042	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1043	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1044	itismychild(lock1->lo_witness, lock2->lo_witness);
1045	mtx_unlock_spin(&w_mtx);
1046	return (0);
1047}
1048
1049void
1050witness_checkorder(struct lock_object *lock, int flags, const char *file,
1051    int line, struct lock_object *interlock)
1052{
1053	struct lock_list_entry *lock_list, *lle;
1054	struct lock_instance *lock1, *lock2, *plock;
1055	struct lock_class *class;
1056	struct witness *w, *w1;
1057	struct thread *td;
1058	int i, j;
1059
1060	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1061	    panicstr != NULL)
1062		return;
1063
1064	w = lock->lo_witness;
1065	class = LOCK_CLASS(lock);
1066	td = curthread;
1067
1068	if (class->lc_flags & LC_SLEEPLOCK) {
1069
1070		/*
1071		 * Since spin locks include a critical section, this check
1072		 * implicitly enforces a lock order of all sleep locks before
1073		 * all spin locks.
1074		 */
1075		if (td->td_critnest != 0 && !kdb_active)
1076			panic("blockable sleep lock (%s) %s @ %s:%d",
1077			    class->lc_name, lock->lo_name,
1078			    fixup_filename(file), line);
1079
1080		/*
1081		 * If this is the first lock acquired then just return as
1082		 * no order checking is needed.
1083		 */
1084		lock_list = td->td_sleeplocks;
1085		if (lock_list == NULL || lock_list->ll_count == 0)
1086			return;
1087	} else {
1088
1089		/*
1090		 * If this is the first lock, just return as no order
1091		 * checking is needed.  Avoid problems with thread
1092		 * migration pinning the thread while checking if
1093		 * spinlocks are held.  If at least one spinlock is held
1094		 * the thread is in a safe path and it is allowed to
1095		 * unpin it.
1096		 */
1097		sched_pin();
1098		lock_list = PCPU_GET(spinlocks);
1099		if (lock_list == NULL || lock_list->ll_count == 0) {
1100			sched_unpin();
1101			return;
1102		}
1103		sched_unpin();
1104	}
1105
1106	/*
1107	 * Check to see if we are recursing on a lock we already own.  If
1108	 * so, make sure that we don't mismatch exclusive and shared lock
1109	 * acquires.
1110	 */
1111	lock1 = find_instance(lock_list, lock);
1112	if (lock1 != NULL) {
1113		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1114		    (flags & LOP_EXCLUSIVE) == 0) {
1115			printf("shared lock of (%s) %s @ %s:%d\n",
1116			    class->lc_name, lock->lo_name,
1117			    fixup_filename(file), line);
1118			printf("while exclusively locked from %s:%d\n",
1119			    fixup_filename(lock1->li_file), lock1->li_line);
1120			panic("share->excl");
1121		}
1122		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1123		    (flags & LOP_EXCLUSIVE) != 0) {
1124			printf("exclusive lock of (%s) %s @ %s:%d\n",
1125			    class->lc_name, lock->lo_name,
1126			    fixup_filename(file), line);
1127			printf("while share locked from %s:%d\n",
1128			    fixup_filename(lock1->li_file), lock1->li_line);
1129			panic("excl->share");
1130		}
1131		return;
1132	}
1133
1134	/*
1135	 * Find the previously acquired lock, but ignore interlocks.
1136	 */
1137	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1138	if (interlock != NULL && plock->li_lock == interlock) {
1139		if (lock_list->ll_count > 1)
1140			plock =
1141			    &lock_list->ll_children[lock_list->ll_count - 2];
1142		else {
1143			lle = lock_list->ll_next;
1144
1145			/*
1146			 * The interlock is the only lock we hold, so
1147			 * simply return.
1148			 */
1149			if (lle == NULL)
1150				return;
1151			plock = &lle->ll_children[lle->ll_count - 1];
1152		}
1153	}
1154
1155	/*
1156	 * Try to perform most checks without a lock.  If this succeeds we
1157	 * can skip acquiring the lock and return success.
1158	 */
1159	w1 = plock->li_lock->lo_witness;
1160	if (witness_lock_order_check(w1, w))
1161		return;
1162
1163	/*
1164	 * Check for duplicate locks of the same type.  Note that we only
1165	 * have to check for this on the last lock we just acquired.  Any
1166	 * other cases will be caught as lock order violations.
1167	 */
1168	mtx_lock_spin(&w_mtx);
1169	witness_lock_order_add(w1, w);
1170	if (w1 == w) {
1171		i = w->w_index;
1172		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1173		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1174		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1175			w->w_reversed = 1;
1176			mtx_unlock_spin(&w_mtx);
1177			printf(
1178			    "acquiring duplicate lock of same type: \"%s\"\n",
1179			    w->w_name);
1180			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1181			    fixup_filename(plock->li_file), plock->li_line);
1182			printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1183			    fixup_filename(file), line);
1184			witness_debugger(1);
1185		} else
1186			mtx_unlock_spin(&w_mtx);
1187		return;
1188	}
1189	mtx_assert(&w_mtx, MA_OWNED);
1190
1191	/*
1192	 * If we know that the lock we are acquiring comes after
1193	 * the lock we most recently acquired in the lock order tree,
1194	 * then there is no need for any further checks.
1195	 */
1196	if (isitmychild(w1, w))
1197		goto out;
1198
1199	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1200		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1201
1202			MPASS(j < WITNESS_COUNT);
1203			lock1 = &lle->ll_children[i];
1204
1205			/*
1206			 * Ignore the interlock the first time we see it.
1207			 */
1208			if (interlock != NULL && interlock == lock1->li_lock) {
1209				interlock = NULL;
1210				continue;
1211			}
1212
1213			/*
1214			 * If this lock doesn't undergo witness checking,
1215			 * then skip it.
1216			 */
1217			w1 = lock1->li_lock->lo_witness;
1218			if (w1 == NULL) {
1219				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1220				    ("lock missing witness structure"));
1221				continue;
1222			}
1223
1224			/*
1225			 * If we are locking Giant and this is a sleepable
1226			 * lock, then skip it.
1227			 */
1228			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1229			    lock == &Giant.lock_object)
1230				continue;
1231
1232			/*
1233			 * If we are locking a sleepable lock and this lock
1234			 * is Giant, then skip it.
1235			 */
1236			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1237			    lock1->li_lock == &Giant.lock_object)
1238				continue;
1239
1240			/*
1241			 * If we are locking a sleepable lock and this lock
1242			 * isn't sleepable, we want to treat it as a lock
1243			 * order violation to enfore a general lock order of
1244			 * sleepable locks before non-sleepable locks.
1245			 */
1246			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1247			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1248				goto reversal;
1249
1250			/*
1251			 * If we are locking Giant and this is a non-sleepable
1252			 * lock, then treat it as a reversal.
1253			 */
1254			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1255			    lock == &Giant.lock_object)
1256				goto reversal;
1257
1258			/*
1259			 * Check the lock order hierarchy for a reveresal.
1260			 */
1261			if (!isitmydescendant(w, w1))
1262				continue;
1263		reversal:
1264
1265			/*
1266			 * We have a lock order violation, check to see if it
1267			 * is allowed or has already been yelled about.
1268			 */
1269#ifdef BLESSING
1270
1271			/*
1272			 * If the lock order is blessed, just bail.  We don't
1273			 * look for other lock order violations though, which
1274			 * may be a bug.
1275			 */
1276			if (blessed(w, w1))
1277				goto out;
1278#endif
1279
1280			/* Bail if this violation is known */
1281			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1282				goto out;
1283
1284			/* Record this as a violation */
1285			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1286			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1287			w->w_reversed = w1->w_reversed = 1;
1288			witness_increment_graph_generation();
1289			mtx_unlock_spin(&w_mtx);
1290
1291			/*
1292			 * Ok, yell about it.
1293			 */
1294			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1295			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1296				printf(
1297		"lock order reversal: (sleepable after non-sleepable)\n");
1298			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1299			    && lock == &Giant.lock_object)
1300				printf(
1301		"lock order reversal: (Giant after non-sleepable)\n");
1302			else
1303				printf("lock order reversal:\n");
1304
1305			/*
1306			 * Try to locate an earlier lock with
1307			 * witness w in our list.
1308			 */
1309			do {
1310				lock2 = &lle->ll_children[i];
1311				MPASS(lock2->li_lock != NULL);
1312				if (lock2->li_lock->lo_witness == w)
1313					break;
1314				if (i == 0 && lle->ll_next != NULL) {
1315					lle = lle->ll_next;
1316					i = lle->ll_count - 1;
1317					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1318				} else
1319					i--;
1320			} while (i >= 0);
1321			if (i < 0) {
1322				printf(" 1st %p %s (%s) @ %s:%d\n",
1323				    lock1->li_lock, lock1->li_lock->lo_name,
1324				    w1->w_name, fixup_filename(lock1->li_file),
1325				    lock1->li_line);
1326				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1327				    lock->lo_name, w->w_name,
1328				    fixup_filename(file), line);
1329			} else {
1330				printf(" 1st %p %s (%s) @ %s:%d\n",
1331				    lock2->li_lock, lock2->li_lock->lo_name,
1332				    lock2->li_lock->lo_witness->w_name,
1333				    fixup_filename(lock2->li_file),
1334				    lock2->li_line);
1335				printf(" 2nd %p %s (%s) @ %s:%d\n",
1336				    lock1->li_lock, lock1->li_lock->lo_name,
1337				    w1->w_name, fixup_filename(lock1->li_file),
1338				    lock1->li_line);
1339				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1340				    lock->lo_name, w->w_name,
1341				    fixup_filename(file), line);
1342			}
1343			witness_debugger(1);
1344			return;
1345		}
1346	}
1347
1348	/*
1349	 * If requested, build a new lock order.  However, don't build a new
1350	 * relationship between a sleepable lock and Giant if it is in the
1351	 * wrong direction.  The correct lock order is that sleepable locks
1352	 * always come before Giant.
1353	 */
1354	if (flags & LOP_NEWORDER &&
1355	    !(plock->li_lock == &Giant.lock_object &&
1356	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1357		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1358		    w->w_name, plock->li_lock->lo_witness->w_name);
1359		itismychild(plock->li_lock->lo_witness, w);
1360	}
1361out:
1362	mtx_unlock_spin(&w_mtx);
1363}
1364
1365void
1366witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1367{
1368	struct lock_list_entry **lock_list, *lle;
1369	struct lock_instance *instance;
1370	struct witness *w;
1371	struct thread *td;
1372
1373	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1374	    panicstr != NULL)
1375		return;
1376	w = lock->lo_witness;
1377	td = curthread;
1378
1379	/* Determine lock list for this lock. */
1380	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1381		lock_list = &td->td_sleeplocks;
1382	else
1383		lock_list = PCPU_PTR(spinlocks);
1384
1385	/* Check to see if we are recursing on a lock we already own. */
1386	instance = find_instance(*lock_list, lock);
1387	if (instance != NULL) {
1388		instance->li_flags++;
1389		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1390		    td->td_proc->p_pid, lock->lo_name,
1391		    instance->li_flags & LI_RECURSEMASK);
1392		instance->li_file = file;
1393		instance->li_line = line;
1394		return;
1395	}
1396
1397	/* Update per-witness last file and line acquire. */
1398	w->w_file = file;
1399	w->w_line = line;
1400
1401	/* Find the next open lock instance in the list and fill it. */
1402	lle = *lock_list;
1403	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1404		lle = witness_lock_list_get();
1405		if (lle == NULL)
1406			return;
1407		lle->ll_next = *lock_list;
1408		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1409		    td->td_proc->p_pid, lle);
1410		*lock_list = lle;
1411	}
1412	instance = &lle->ll_children[lle->ll_count++];
1413	instance->li_lock = lock;
1414	instance->li_line = line;
1415	instance->li_file = file;
1416	if ((flags & LOP_EXCLUSIVE) != 0)
1417		instance->li_flags = LI_EXCLUSIVE;
1418	else
1419		instance->li_flags = 0;
1420	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1421	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1422}
1423
1424void
1425witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1426{
1427	struct lock_instance *instance;
1428	struct lock_class *class;
1429
1430	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1431	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1432		return;
1433	class = LOCK_CLASS(lock);
1434	if (witness_watch) {
1435		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1436			panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1437			    class->lc_name, lock->lo_name,
1438			    fixup_filename(file), line);
1439		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1440			panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1441			    class->lc_name, lock->lo_name,
1442			    fixup_filename(file), line);
1443	}
1444	instance = find_instance(curthread->td_sleeplocks, lock);
1445	if (instance == NULL)
1446		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1447		    class->lc_name, lock->lo_name,
1448		    fixup_filename(file), line);
1449	if (witness_watch) {
1450		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1451			panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1452			    class->lc_name, lock->lo_name,
1453			    fixup_filename(file), line);
1454		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1455			panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1456			    class->lc_name, lock->lo_name,
1457			    instance->li_flags & LI_RECURSEMASK,
1458			    fixup_filename(file), line);
1459	}
1460	instance->li_flags |= LI_EXCLUSIVE;
1461}
1462
1463void
1464witness_downgrade(struct lock_object *lock, int flags, const char *file,
1465    int line)
1466{
1467	struct lock_instance *instance;
1468	struct lock_class *class;
1469
1470	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1471	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1472		return;
1473	class = LOCK_CLASS(lock);
1474	if (witness_watch) {
1475		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1476		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1477			    class->lc_name, lock->lo_name,
1478			    fixup_filename(file), line);
1479		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1480			panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1481			    class->lc_name, lock->lo_name,
1482			    fixup_filename(file), line);
1483	}
1484	instance = find_instance(curthread->td_sleeplocks, lock);
1485	if (instance == NULL)
1486		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1487		    class->lc_name, lock->lo_name,
1488		    fixup_filename(file), line);
1489	if (witness_watch) {
1490		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1491			panic("downgrade of shared lock (%s) %s @ %s:%d",
1492			    class->lc_name, lock->lo_name,
1493			    fixup_filename(file), line);
1494		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1495			panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1496			    class->lc_name, lock->lo_name,
1497			    instance->li_flags & LI_RECURSEMASK,
1498			    fixup_filename(file), line);
1499	}
1500	instance->li_flags &= ~LI_EXCLUSIVE;
1501}
1502
1503void
1504witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1505{
1506	struct lock_list_entry **lock_list, *lle;
1507	struct lock_instance *instance;
1508	struct lock_class *class;
1509	struct thread *td;
1510	register_t s;
1511	int i, j;
1512
1513	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1514		return;
1515	td = curthread;
1516	class = LOCK_CLASS(lock);
1517
1518	/* Find lock instance associated with this lock. */
1519	if (class->lc_flags & LC_SLEEPLOCK)
1520		lock_list = &td->td_sleeplocks;
1521	else
1522		lock_list = PCPU_PTR(spinlocks);
1523	lle = *lock_list;
1524	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1525		for (i = 0; i < (*lock_list)->ll_count; i++) {
1526			instance = &(*lock_list)->ll_children[i];
1527			if (instance->li_lock == lock)
1528				goto found;
1529		}
1530
1531	/*
1532	 * When disabling WITNESS through witness_watch we could end up in
1533	 * having registered locks in the td_sleeplocks queue.
1534	 * We have to make sure we flush these queues, so just search for
1535	 * eventual register locks and remove them.
1536	 */
1537	if (witness_watch > 0)
1538		panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1539		    lock->lo_name, fixup_filename(file), line);
1540	else
1541		return;
1542found:
1543
1544	/* First, check for shared/exclusive mismatches. */
1545	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1546	    (flags & LOP_EXCLUSIVE) == 0) {
1547		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1548		    lock->lo_name, fixup_filename(file), line);
1549		printf("while exclusively locked from %s:%d\n",
1550		    fixup_filename(instance->li_file), instance->li_line);
1551		panic("excl->ushare");
1552	}
1553	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1554	    (flags & LOP_EXCLUSIVE) != 0) {
1555		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1556		    lock->lo_name, fixup_filename(file), line);
1557		printf("while share locked from %s:%d\n",
1558		    fixup_filename(instance->li_file),
1559		    instance->li_line);
1560		panic("share->uexcl");
1561	}
1562	/* If we are recursed, unrecurse. */
1563	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1564		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1565		    td->td_proc->p_pid, instance->li_lock->lo_name,
1566		    instance->li_flags);
1567		instance->li_flags--;
1568		return;
1569	}
1570	/* The lock is now being dropped, check for NORELEASE flag */
1571	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1572		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1573		    lock->lo_name, fixup_filename(file), line);
1574		panic("lock marked norelease");
1575	}
1576
1577	/* Otherwise, remove this item from the list. */
1578	s = intr_disable();
1579	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1580	    td->td_proc->p_pid, instance->li_lock->lo_name,
1581	    (*lock_list)->ll_count - 1);
1582	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1583		(*lock_list)->ll_children[j] =
1584		    (*lock_list)->ll_children[j + 1];
1585	(*lock_list)->ll_count--;
1586	intr_restore(s);
1587
1588	/*
1589	 * In order to reduce contention on w_mtx, we want to keep always an
1590	 * head object into lists so that frequent allocation from the
1591	 * free witness pool (and subsequent locking) is avoided.
1592	 * In order to maintain the current code simple, when the head
1593	 * object is totally unloaded it means also that we do not have
1594	 * further objects in the list, so the list ownership needs to be
1595	 * hand over to another object if the current head needs to be freed.
1596	 */
1597	if ((*lock_list)->ll_count == 0) {
1598		if (*lock_list == lle) {
1599			if (lle->ll_next == NULL)
1600				return;
1601		} else
1602			lle = *lock_list;
1603		*lock_list = lle->ll_next;
1604		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1605		    td->td_proc->p_pid, lle);
1606		witness_lock_list_free(lle);
1607	}
1608}
1609
1610void
1611witness_thread_exit(struct thread *td)
1612{
1613	struct lock_list_entry *lle;
1614	int i, n;
1615
1616	lle = td->td_sleeplocks;
1617	if (lle == NULL || panicstr != NULL)
1618		return;
1619	if (lle->ll_count != 0) {
1620		for (n = 0; lle != NULL; lle = lle->ll_next)
1621			for (i = lle->ll_count - 1; i >= 0; i--) {
1622				if (n == 0)
1623		printf("Thread %p exiting with the following locks held:\n",
1624					    td);
1625				n++;
1626				witness_list_lock(&lle->ll_children[i], printf);
1627
1628			}
1629		panic("Thread %p cannot exit while holding sleeplocks\n", td);
1630	}
1631	witness_lock_list_free(lle);
1632}
1633
1634/*
1635 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1636 * exempt Giant and sleepable locks from the checks as well.  If any
1637 * non-exempt locks are held, then a supplied message is printed to the
1638 * console along with a list of the offending locks.  If indicated in the
1639 * flags then a failure results in a panic as well.
1640 */
1641int
1642witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1643{
1644	struct lock_list_entry *lock_list, *lle;
1645	struct lock_instance *lock1;
1646	struct thread *td;
1647	va_list ap;
1648	int i, n;
1649
1650	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1651		return (0);
1652	n = 0;
1653	td = curthread;
1654	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1655		for (i = lle->ll_count - 1; i >= 0; i--) {
1656			lock1 = &lle->ll_children[i];
1657			if (lock1->li_lock == lock)
1658				continue;
1659			if (flags & WARN_GIANTOK &&
1660			    lock1->li_lock == &Giant.lock_object)
1661				continue;
1662			if (flags & WARN_SLEEPOK &&
1663			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1664				continue;
1665			if (n == 0) {
1666				va_start(ap, fmt);
1667				vprintf(fmt, ap);
1668				va_end(ap);
1669				printf(" with the following");
1670				if (flags & WARN_SLEEPOK)
1671					printf(" non-sleepable");
1672				printf(" locks held:\n");
1673			}
1674			n++;
1675			witness_list_lock(lock1, printf);
1676		}
1677
1678	/*
1679	 * Pin the thread in order to avoid problems with thread migration.
1680	 * Once that all verifies are passed about spinlocks ownership,
1681	 * the thread is in a safe path and it can be unpinned.
1682	 */
1683	sched_pin();
1684	lock_list = PCPU_GET(spinlocks);
1685	if (lock_list != NULL && lock_list->ll_count != 0) {
1686		sched_unpin();
1687
1688		/*
1689		 * We should only have one spinlock and as long as
1690		 * the flags cannot match for this locks class,
1691		 * check if the first spinlock is the one curthread
1692		 * should hold.
1693		 */
1694		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1695		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1696		    lock1->li_lock == lock && n == 0)
1697			return (0);
1698
1699		va_start(ap, fmt);
1700		vprintf(fmt, ap);
1701		va_end(ap);
1702		printf(" with the following");
1703		if (flags & WARN_SLEEPOK)
1704			printf(" non-sleepable");
1705		printf(" locks held:\n");
1706		n += witness_list_locks(&lock_list, printf);
1707	} else
1708		sched_unpin();
1709	if (flags & WARN_PANIC && n)
1710		panic("%s", __func__);
1711	else
1712		witness_debugger(n);
1713	return (n);
1714}
1715
1716const char *
1717witness_file(struct lock_object *lock)
1718{
1719	struct witness *w;
1720
1721	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1722		return ("?");
1723	w = lock->lo_witness;
1724	return (w->w_file);
1725}
1726
1727int
1728witness_line(struct lock_object *lock)
1729{
1730	struct witness *w;
1731
1732	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1733		return (0);
1734	w = lock->lo_witness;
1735	return (w->w_line);
1736}
1737
1738static struct witness *
1739enroll(const char *description, struct lock_class *lock_class)
1740{
1741	struct witness *w;
1742	struct witness_list *typelist;
1743
1744	MPASS(description != NULL);
1745
1746	if (witness_watch == -1 || panicstr != NULL)
1747		return (NULL);
1748	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1749		if (witness_skipspin)
1750			return (NULL);
1751		else
1752			typelist = &w_spin;
1753	} else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1754		typelist = &w_sleep;
1755	else
1756		panic("lock class %s is not sleep or spin",
1757		    lock_class->lc_name);
1758
1759	mtx_lock_spin(&w_mtx);
1760	w = witness_hash_get(description);
1761	if (w)
1762		goto found;
1763	if ((w = witness_get()) == NULL)
1764		return (NULL);
1765	MPASS(strlen(description) < MAX_W_NAME);
1766	strcpy(w->w_name, description);
1767	w->w_class = lock_class;
1768	w->w_refcount = 1;
1769	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1770	if (lock_class->lc_flags & LC_SPINLOCK) {
1771		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1772		w_spin_cnt++;
1773	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1774		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1775		w_sleep_cnt++;
1776	}
1777
1778	/* Insert new witness into the hash */
1779	witness_hash_put(w);
1780	witness_increment_graph_generation();
1781	mtx_unlock_spin(&w_mtx);
1782	return (w);
1783found:
1784	w->w_refcount++;
1785	mtx_unlock_spin(&w_mtx);
1786	if (lock_class != w->w_class)
1787		panic(
1788			"lock (%s) %s does not match earlier (%s) lock",
1789			description, lock_class->lc_name,
1790			w->w_class->lc_name);
1791	return (w);
1792}
1793
1794static void
1795depart(struct witness *w)
1796{
1797	struct witness_list *list;
1798
1799	MPASS(w->w_refcount == 0);
1800	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1801		list = &w_sleep;
1802		w_sleep_cnt--;
1803	} else {
1804		list = &w_spin;
1805		w_spin_cnt--;
1806	}
1807	/*
1808	 * Set file to NULL as it may point into a loadable module.
1809	 */
1810	w->w_file = NULL;
1811	w->w_line = 0;
1812	witness_increment_graph_generation();
1813}
1814
1815
1816static void
1817adopt(struct witness *parent, struct witness *child)
1818{
1819	int pi, ci, i, j;
1820
1821	if (witness_cold == 0)
1822		mtx_assert(&w_mtx, MA_OWNED);
1823
1824	/* If the relationship is already known, there's no work to be done. */
1825	if (isitmychild(parent, child))
1826		return;
1827
1828	/* When the structure of the graph changes, bump up the generation. */
1829	witness_increment_graph_generation();
1830
1831	/*
1832	 * The hard part ... create the direct relationship, then propagate all
1833	 * indirect relationships.
1834	 */
1835	pi = parent->w_index;
1836	ci = child->w_index;
1837	WITNESS_INDEX_ASSERT(pi);
1838	WITNESS_INDEX_ASSERT(ci);
1839	MPASS(pi != ci);
1840	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1841	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1842
1843	/*
1844	 * If parent was not already an ancestor of child,
1845	 * then we increment the descendant and ancestor counters.
1846	 */
1847	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1848		parent->w_num_descendants++;
1849		child->w_num_ancestors++;
1850	}
1851
1852	/*
1853	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1854	 * an ancestor of 'pi' during this loop.
1855	 */
1856	for (i = 1; i <= w_max_used_index; i++) {
1857		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1858		    (i != pi))
1859			continue;
1860
1861		/* Find each descendant of 'i' and mark it as a descendant. */
1862		for (j = 1; j <= w_max_used_index; j++) {
1863
1864			/*
1865			 * Skip children that are already marked as
1866			 * descendants of 'i'.
1867			 */
1868			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1869				continue;
1870
1871			/*
1872			 * We are only interested in descendants of 'ci'. Note
1873			 * that 'ci' itself is counted as a descendant of 'ci'.
1874			 */
1875			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1876			    (j != ci))
1877				continue;
1878			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1879			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1880			w_data[i].w_num_descendants++;
1881			w_data[j].w_num_ancestors++;
1882
1883			/*
1884			 * Make sure we aren't marking a node as both an
1885			 * ancestor and descendant. We should have caught
1886			 * this as a lock order reversal earlier.
1887			 */
1888			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1889			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1890				printf("witness rmatrix paradox! [%d][%d]=%d "
1891				    "both ancestor and descendant\n",
1892				    i, j, w_rmatrix[i][j]);
1893				kdb_backtrace();
1894				printf("Witness disabled.\n");
1895				witness_watch = -1;
1896			}
1897			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1898			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1899				printf("witness rmatrix paradox! [%d][%d]=%d "
1900				    "both ancestor and descendant\n",
1901				    j, i, w_rmatrix[j][i]);
1902				kdb_backtrace();
1903				printf("Witness disabled.\n");
1904				witness_watch = -1;
1905			}
1906		}
1907	}
1908}
1909
1910static void
1911itismychild(struct witness *parent, struct witness *child)
1912{
1913
1914	MPASS(child != NULL && parent != NULL);
1915	if (witness_cold == 0)
1916		mtx_assert(&w_mtx, MA_OWNED);
1917
1918	if (!witness_lock_type_equal(parent, child)) {
1919		if (witness_cold == 0)
1920			mtx_unlock_spin(&w_mtx);
1921		panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1922		    "the same lock type", __func__, parent->w_name,
1923		    parent->w_class->lc_name, child->w_name,
1924		    child->w_class->lc_name);
1925	}
1926	adopt(parent, child);
1927}
1928
1929/*
1930 * Generic code for the isitmy*() functions. The rmask parameter is the
1931 * expected relationship of w1 to w2.
1932 */
1933static int
1934_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1935{
1936	unsigned char r1, r2;
1937	int i1, i2;
1938
1939	i1 = w1->w_index;
1940	i2 = w2->w_index;
1941	WITNESS_INDEX_ASSERT(i1);
1942	WITNESS_INDEX_ASSERT(i2);
1943	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1944	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1945
1946	/* The flags on one better be the inverse of the flags on the other */
1947	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1948		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1949		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1950		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1951		    "w_rmatrix[%d][%d] == %hhx\n",
1952		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1953		    i2, i1, r2);
1954		kdb_backtrace();
1955		printf("Witness disabled.\n");
1956		witness_watch = -1;
1957	}
1958	return (r1 & rmask);
1959}
1960
1961/*
1962 * Checks if @child is a direct child of @parent.
1963 */
1964static int
1965isitmychild(struct witness *parent, struct witness *child)
1966{
1967
1968	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1969}
1970
1971/*
1972 * Checks if @descendant is a direct or inderect descendant of @ancestor.
1973 */
1974static int
1975isitmydescendant(struct witness *ancestor, struct witness *descendant)
1976{
1977
1978	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1979	    __func__));
1980}
1981
1982#ifdef BLESSING
1983static int
1984blessed(struct witness *w1, struct witness *w2)
1985{
1986	int i;
1987	struct witness_blessed *b;
1988
1989	for (i = 0; i < blessed_count; i++) {
1990		b = &blessed_list[i];
1991		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1992			if (strcmp(w2->w_name, b->b_lock2) == 0)
1993				return (1);
1994			continue;
1995		}
1996		if (strcmp(w1->w_name, b->b_lock2) == 0)
1997			if (strcmp(w2->w_name, b->b_lock1) == 0)
1998				return (1);
1999	}
2000	return (0);
2001}
2002#endif
2003
2004static struct witness *
2005witness_get(void)
2006{
2007	struct witness *w;
2008	int index;
2009
2010	if (witness_cold == 0)
2011		mtx_assert(&w_mtx, MA_OWNED);
2012
2013	if (witness_watch == -1) {
2014		mtx_unlock_spin(&w_mtx);
2015		return (NULL);
2016	}
2017	if (STAILQ_EMPTY(&w_free)) {
2018		witness_watch = -1;
2019		mtx_unlock_spin(&w_mtx);
2020		printf("WITNESS: unable to allocate a new witness object\n");
2021		return (NULL);
2022	}
2023	w = STAILQ_FIRST(&w_free);
2024	STAILQ_REMOVE_HEAD(&w_free, w_list);
2025	w_free_cnt--;
2026	index = w->w_index;
2027	MPASS(index > 0 && index == w_max_used_index+1 &&
2028	    index < WITNESS_COUNT);
2029	bzero(w, sizeof(*w));
2030	w->w_index = index;
2031	if (index > w_max_used_index)
2032		w_max_used_index = index;
2033	return (w);
2034}
2035
2036static void
2037witness_free(struct witness *w)
2038{
2039
2040	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2041	w_free_cnt++;
2042}
2043
2044static struct lock_list_entry *
2045witness_lock_list_get(void)
2046{
2047	struct lock_list_entry *lle;
2048
2049	if (witness_watch == -1)
2050		return (NULL);
2051	mtx_lock_spin(&w_mtx);
2052	lle = w_lock_list_free;
2053	if (lle == NULL) {
2054		witness_watch = -1;
2055		mtx_unlock_spin(&w_mtx);
2056		printf("%s: witness exhausted\n", __func__);
2057		return (NULL);
2058	}
2059	w_lock_list_free = lle->ll_next;
2060	mtx_unlock_spin(&w_mtx);
2061	bzero(lle, sizeof(*lle));
2062	return (lle);
2063}
2064
2065static void
2066witness_lock_list_free(struct lock_list_entry *lle)
2067{
2068
2069	mtx_lock_spin(&w_mtx);
2070	lle->ll_next = w_lock_list_free;
2071	w_lock_list_free = lle;
2072	mtx_unlock_spin(&w_mtx);
2073}
2074
2075static struct lock_instance *
2076find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2077{
2078	struct lock_list_entry *lle;
2079	struct lock_instance *instance;
2080	int i;
2081
2082	for (lle = list; lle != NULL; lle = lle->ll_next)
2083		for (i = lle->ll_count - 1; i >= 0; i--) {
2084			instance = &lle->ll_children[i];
2085			if (instance->li_lock == lock)
2086				return (instance);
2087		}
2088	return (NULL);
2089}
2090
2091static void
2092witness_list_lock(struct lock_instance *instance,
2093    int (*prnt)(const char *fmt, ...))
2094{
2095	struct lock_object *lock;
2096
2097	lock = instance->li_lock;
2098	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2099	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2100	if (lock->lo_witness->w_name != lock->lo_name)
2101		prnt(" (%s)", lock->lo_witness->w_name);
2102	prnt(" r = %d (%p) locked @ %s:%d\n",
2103	    instance->li_flags & LI_RECURSEMASK, lock,
2104	    fixup_filename(instance->li_file), instance->li_line);
2105}
2106
2107#ifdef DDB
2108static int
2109witness_thread_has_locks(struct thread *td)
2110{
2111
2112	if (td->td_sleeplocks == NULL)
2113		return (0);
2114	return (td->td_sleeplocks->ll_count != 0);
2115}
2116
2117static int
2118witness_proc_has_locks(struct proc *p)
2119{
2120	struct thread *td;
2121
2122	FOREACH_THREAD_IN_PROC(p, td) {
2123		if (witness_thread_has_locks(td))
2124			return (1);
2125	}
2126	return (0);
2127}
2128#endif
2129
2130int
2131witness_list_locks(struct lock_list_entry **lock_list,
2132    int (*prnt)(const char *fmt, ...))
2133{
2134	struct lock_list_entry *lle;
2135	int i, nheld;
2136
2137	nheld = 0;
2138	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2139		for (i = lle->ll_count - 1; i >= 0; i--) {
2140			witness_list_lock(&lle->ll_children[i], prnt);
2141			nheld++;
2142		}
2143	return (nheld);
2144}
2145
2146/*
2147 * This is a bit risky at best.  We call this function when we have timed
2148 * out acquiring a spin lock, and we assume that the other CPU is stuck
2149 * with this lock held.  So, we go groveling around in the other CPU's
2150 * per-cpu data to try to find the lock instance for this spin lock to
2151 * see when it was last acquired.
2152 */
2153void
2154witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2155    int (*prnt)(const char *fmt, ...))
2156{
2157	struct lock_instance *instance;
2158	struct pcpu *pc;
2159
2160	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2161		return;
2162	pc = pcpu_find(owner->td_oncpu);
2163	instance = find_instance(pc->pc_spinlocks, lock);
2164	if (instance != NULL)
2165		witness_list_lock(instance, prnt);
2166}
2167
2168void
2169witness_save(struct lock_object *lock, const char **filep, int *linep)
2170{
2171	struct lock_list_entry *lock_list;
2172	struct lock_instance *instance;
2173	struct lock_class *class;
2174
2175	/*
2176	 * This function is used independently in locking code to deal with
2177	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2178	 * is gone.
2179	 */
2180	if (SCHEDULER_STOPPED())
2181		return;
2182	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2183	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2184		return;
2185	class = LOCK_CLASS(lock);
2186	if (class->lc_flags & LC_SLEEPLOCK)
2187		lock_list = curthread->td_sleeplocks;
2188	else {
2189		if (witness_skipspin)
2190			return;
2191		lock_list = PCPU_GET(spinlocks);
2192	}
2193	instance = find_instance(lock_list, lock);
2194	if (instance == NULL)
2195		panic("%s: lock (%s) %s not locked", __func__,
2196		    class->lc_name, lock->lo_name);
2197	*filep = instance->li_file;
2198	*linep = instance->li_line;
2199}
2200
2201void
2202witness_restore(struct lock_object *lock, const char *file, int line)
2203{
2204	struct lock_list_entry *lock_list;
2205	struct lock_instance *instance;
2206	struct lock_class *class;
2207
2208	/*
2209	 * This function is used independently in locking code to deal with
2210	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2211	 * is gone.
2212	 */
2213	if (SCHEDULER_STOPPED())
2214		return;
2215	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2216	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2217		return;
2218	class = LOCK_CLASS(lock);
2219	if (class->lc_flags & LC_SLEEPLOCK)
2220		lock_list = curthread->td_sleeplocks;
2221	else {
2222		if (witness_skipspin)
2223			return;
2224		lock_list = PCPU_GET(spinlocks);
2225	}
2226	instance = find_instance(lock_list, lock);
2227	if (instance == NULL)
2228		panic("%s: lock (%s) %s not locked", __func__,
2229		    class->lc_name, lock->lo_name);
2230	lock->lo_witness->w_file = file;
2231	lock->lo_witness->w_line = line;
2232	instance->li_file = file;
2233	instance->li_line = line;
2234}
2235
2236void
2237witness_assert(const struct lock_object *lock, int flags, const char *file,
2238    int line)
2239{
2240#ifdef INVARIANT_SUPPORT
2241	struct lock_instance *instance;
2242	struct lock_class *class;
2243
2244	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2245		return;
2246	class = LOCK_CLASS(lock);
2247	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2248		instance = find_instance(curthread->td_sleeplocks, lock);
2249	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2250		instance = find_instance(PCPU_GET(spinlocks), lock);
2251	else {
2252		panic("Lock (%s) %s is not sleep or spin!",
2253		    class->lc_name, lock->lo_name);
2254	}
2255	switch (flags) {
2256	case LA_UNLOCKED:
2257		if (instance != NULL)
2258			panic("Lock (%s) %s locked @ %s:%d.",
2259			    class->lc_name, lock->lo_name,
2260			    fixup_filename(file), line);
2261		break;
2262	case LA_LOCKED:
2263	case LA_LOCKED | LA_RECURSED:
2264	case LA_LOCKED | LA_NOTRECURSED:
2265	case LA_SLOCKED:
2266	case LA_SLOCKED | LA_RECURSED:
2267	case LA_SLOCKED | LA_NOTRECURSED:
2268	case LA_XLOCKED:
2269	case LA_XLOCKED | LA_RECURSED:
2270	case LA_XLOCKED | LA_NOTRECURSED:
2271		if (instance == NULL) {
2272			panic("Lock (%s) %s not locked @ %s:%d.",
2273			    class->lc_name, lock->lo_name,
2274			    fixup_filename(file), line);
2275			break;
2276		}
2277		if ((flags & LA_XLOCKED) != 0 &&
2278		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2279			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2280			    class->lc_name, lock->lo_name,
2281			    fixup_filename(file), line);
2282		if ((flags & LA_SLOCKED) != 0 &&
2283		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2284			panic("Lock (%s) %s exclusively locked @ %s:%d.",
2285			    class->lc_name, lock->lo_name,
2286			    fixup_filename(file), line);
2287		if ((flags & LA_RECURSED) != 0 &&
2288		    (instance->li_flags & LI_RECURSEMASK) == 0)
2289			panic("Lock (%s) %s not recursed @ %s:%d.",
2290			    class->lc_name, lock->lo_name,
2291			    fixup_filename(file), line);
2292		if ((flags & LA_NOTRECURSED) != 0 &&
2293		    (instance->li_flags & LI_RECURSEMASK) != 0)
2294			panic("Lock (%s) %s recursed @ %s:%d.",
2295			    class->lc_name, lock->lo_name,
2296			    fixup_filename(file), line);
2297		break;
2298	default:
2299		panic("Invalid lock assertion at %s:%d.",
2300		    fixup_filename(file), line);
2301
2302	}
2303#endif	/* INVARIANT_SUPPORT */
2304}
2305
2306static void
2307witness_setflag(struct lock_object *lock, int flag, int set)
2308{
2309	struct lock_list_entry *lock_list;
2310	struct lock_instance *instance;
2311	struct lock_class *class;
2312
2313	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2314		return;
2315	class = LOCK_CLASS(lock);
2316	if (class->lc_flags & LC_SLEEPLOCK)
2317		lock_list = curthread->td_sleeplocks;
2318	else {
2319		if (witness_skipspin)
2320			return;
2321		lock_list = PCPU_GET(spinlocks);
2322	}
2323	instance = find_instance(lock_list, lock);
2324	if (instance == NULL)
2325		panic("%s: lock (%s) %s not locked", __func__,
2326		    class->lc_name, lock->lo_name);
2327
2328	if (set)
2329		instance->li_flags |= flag;
2330	else
2331		instance->li_flags &= ~flag;
2332}
2333
2334void
2335witness_norelease(struct lock_object *lock)
2336{
2337
2338	witness_setflag(lock, LI_NORELEASE, 1);
2339}
2340
2341void
2342witness_releaseok(struct lock_object *lock)
2343{
2344
2345	witness_setflag(lock, LI_NORELEASE, 0);
2346}
2347
2348#ifdef DDB
2349static void
2350witness_ddb_list(struct thread *td)
2351{
2352
2353	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2354	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2355
2356	if (witness_watch < 1)
2357		return;
2358
2359	witness_list_locks(&td->td_sleeplocks, db_printf);
2360
2361	/*
2362	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2363	 * if td is currently executing on some other CPU and holds spin locks
2364	 * as we won't display those locks.  If we had a MI way of getting
2365	 * the per-cpu data for a given cpu then we could use
2366	 * td->td_oncpu to get the list of spinlocks for this thread
2367	 * and "fix" this.
2368	 *
2369	 * That still wouldn't really fix this unless we locked the scheduler
2370	 * lock or stopped the other CPU to make sure it wasn't changing the
2371	 * list out from under us.  It is probably best to just not try to
2372	 * handle threads on other CPU's for now.
2373	 */
2374	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2375		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2376}
2377
2378DB_SHOW_COMMAND(locks, db_witness_list)
2379{
2380	struct thread *td;
2381
2382	if (have_addr)
2383		td = db_lookup_thread(addr, TRUE);
2384	else
2385		td = kdb_thread;
2386	witness_ddb_list(td);
2387}
2388
2389DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2390{
2391	struct thread *td;
2392	struct proc *p;
2393
2394	/*
2395	 * It would be nice to list only threads and processes that actually
2396	 * held sleep locks, but that information is currently not exported
2397	 * by WITNESS.
2398	 */
2399	FOREACH_PROC_IN_SYSTEM(p) {
2400		if (!witness_proc_has_locks(p))
2401			continue;
2402		FOREACH_THREAD_IN_PROC(p, td) {
2403			if (!witness_thread_has_locks(td))
2404				continue;
2405			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2406			    p->p_comm, td, td->td_tid);
2407			witness_ddb_list(td);
2408			if (db_pager_quit)
2409				return;
2410		}
2411	}
2412}
2413DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2414
2415DB_SHOW_COMMAND(witness, db_witness_display)
2416{
2417
2418	witness_ddb_display(db_printf);
2419}
2420#endif
2421
2422static int
2423sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2424{
2425	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2426	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2427	struct sbuf *sb;
2428	u_int w_rmatrix1, w_rmatrix2;
2429	int error, generation, i, j;
2430
2431	tmp_data1 = NULL;
2432	tmp_data2 = NULL;
2433	tmp_w1 = NULL;
2434	tmp_w2 = NULL;
2435	if (witness_watch < 1) {
2436		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2437		return (error);
2438	}
2439	if (witness_cold) {
2440		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2441		return (error);
2442	}
2443	error = 0;
2444	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2445	if (sb == NULL)
2446		return (ENOMEM);
2447
2448	/* Allocate and init temporary storage space. */
2449	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2450	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2451	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2452	    M_WAITOK | M_ZERO);
2453	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2454	    M_WAITOK | M_ZERO);
2455	stack_zero(&tmp_data1->wlod_stack);
2456	stack_zero(&tmp_data2->wlod_stack);
2457
2458restart:
2459	mtx_lock_spin(&w_mtx);
2460	generation = w_generation;
2461	mtx_unlock_spin(&w_mtx);
2462	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2463	    w_lohash.wloh_count);
2464	for (i = 1; i < w_max_used_index; i++) {
2465		mtx_lock_spin(&w_mtx);
2466		if (generation != w_generation) {
2467			mtx_unlock_spin(&w_mtx);
2468
2469			/* The graph has changed, try again. */
2470			req->oldidx = 0;
2471			sbuf_clear(sb);
2472			goto restart;
2473		}
2474
2475		w1 = &w_data[i];
2476		if (w1->w_reversed == 0) {
2477			mtx_unlock_spin(&w_mtx);
2478			continue;
2479		}
2480
2481		/* Copy w1 locally so we can release the spin lock. */
2482		*tmp_w1 = *w1;
2483		mtx_unlock_spin(&w_mtx);
2484
2485		if (tmp_w1->w_reversed == 0)
2486			continue;
2487		for (j = 1; j < w_max_used_index; j++) {
2488			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2489				continue;
2490
2491			mtx_lock_spin(&w_mtx);
2492			if (generation != w_generation) {
2493				mtx_unlock_spin(&w_mtx);
2494
2495				/* The graph has changed, try again. */
2496				req->oldidx = 0;
2497				sbuf_clear(sb);
2498				goto restart;
2499			}
2500
2501			w2 = &w_data[j];
2502			data1 = witness_lock_order_get(w1, w2);
2503			data2 = witness_lock_order_get(w2, w1);
2504
2505			/*
2506			 * Copy information locally so we can release the
2507			 * spin lock.
2508			 */
2509			*tmp_w2 = *w2;
2510			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2511			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2512
2513			if (data1) {
2514				stack_zero(&tmp_data1->wlod_stack);
2515				stack_copy(&data1->wlod_stack,
2516				    &tmp_data1->wlod_stack);
2517			}
2518			if (data2 && data2 != data1) {
2519				stack_zero(&tmp_data2->wlod_stack);
2520				stack_copy(&data2->wlod_stack,
2521				    &tmp_data2->wlod_stack);
2522			}
2523			mtx_unlock_spin(&w_mtx);
2524
2525			sbuf_printf(sb,
2526	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2527			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2528			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2529#if 0
2530 			sbuf_printf(sb,
2531			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2532 			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2533 			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2534#endif
2535			if (data1) {
2536				sbuf_printf(sb,
2537			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2538				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2539				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2540				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2541				sbuf_printf(sb, "\n");
2542			}
2543			if (data2 && data2 != data1) {
2544				sbuf_printf(sb,
2545			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2546				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2547				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2548				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2549				sbuf_printf(sb, "\n");
2550			}
2551		}
2552	}
2553	mtx_lock_spin(&w_mtx);
2554	if (generation != w_generation) {
2555		mtx_unlock_spin(&w_mtx);
2556
2557		/*
2558		 * The graph changed while we were printing stack data,
2559		 * try again.
2560		 */
2561		req->oldidx = 0;
2562		sbuf_clear(sb);
2563		goto restart;
2564	}
2565	mtx_unlock_spin(&w_mtx);
2566
2567	/* Free temporary storage space. */
2568	free(tmp_data1, M_TEMP);
2569	free(tmp_data2, M_TEMP);
2570	free(tmp_w1, M_TEMP);
2571	free(tmp_w2, M_TEMP);
2572
2573	sbuf_finish(sb);
2574	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2575	sbuf_delete(sb);
2576
2577	return (error);
2578}
2579
2580static int
2581sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2582{
2583	struct witness *w;
2584	struct sbuf *sb;
2585	int error;
2586
2587	if (witness_watch < 1) {
2588		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2589		return (error);
2590	}
2591	if (witness_cold) {
2592		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2593		return (error);
2594	}
2595	error = 0;
2596
2597	error = sysctl_wire_old_buffer(req, 0);
2598	if (error != 0)
2599		return (error);
2600	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2601	if (sb == NULL)
2602		return (ENOMEM);
2603	sbuf_printf(sb, "\n");
2604
2605	mtx_lock_spin(&w_mtx);
2606	STAILQ_FOREACH(w, &w_all, w_list)
2607		w->w_displayed = 0;
2608	STAILQ_FOREACH(w, &w_all, w_list)
2609		witness_add_fullgraph(sb, w);
2610	mtx_unlock_spin(&w_mtx);
2611
2612	/*
2613	 * Close the sbuf and return to userland.
2614	 */
2615	error = sbuf_finish(sb);
2616	sbuf_delete(sb);
2617
2618	return (error);
2619}
2620
2621static int
2622sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2623{
2624	int error, value;
2625
2626	value = witness_watch;
2627	error = sysctl_handle_int(oidp, &value, 0, req);
2628	if (error != 0 || req->newptr == NULL)
2629		return (error);
2630	if (value > 1 || value < -1 ||
2631	    (witness_watch == -1 && value != witness_watch))
2632		return (EINVAL);
2633	witness_watch = value;
2634	return (0);
2635}
2636
2637static void
2638witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2639{
2640	int i;
2641
2642	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2643		return;
2644	w->w_displayed = 1;
2645
2646	WITNESS_INDEX_ASSERT(w->w_index);
2647	for (i = 1; i <= w_max_used_index; i++) {
2648		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2649			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2650			    w_data[i].w_name);
2651			witness_add_fullgraph(sb, &w_data[i]);
2652		}
2653	}
2654}
2655
2656/*
2657 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2658 * interprets the key as a string and reads until the null
2659 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2660 * hash value computed from the key.
2661 */
2662static uint32_t
2663witness_hash_djb2(const uint8_t *key, uint32_t size)
2664{
2665	unsigned int hash = 5381;
2666	int i;
2667
2668	/* hash = hash * 33 + key[i] */
2669	if (size)
2670		for (i = 0; i < size; i++)
2671			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2672	else
2673		for (i = 0; key[i] != 0; i++)
2674			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2675
2676	return (hash);
2677}
2678
2679
2680/*
2681 * Initializes the two witness hash tables. Called exactly once from
2682 * witness_initialize().
2683 */
2684static void
2685witness_init_hash_tables(void)
2686{
2687	int i;
2688
2689	MPASS(witness_cold);
2690
2691	/* Initialize the hash tables. */
2692	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2693		w_hash.wh_array[i] = NULL;
2694
2695	w_hash.wh_size = WITNESS_HASH_SIZE;
2696	w_hash.wh_count = 0;
2697
2698	/* Initialize the lock order data hash. */
2699	w_lofree = NULL;
2700	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2701		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2702		w_lodata[i].wlod_next = w_lofree;
2703		w_lofree = &w_lodata[i];
2704	}
2705	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2706	w_lohash.wloh_count = 0;
2707	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2708		w_lohash.wloh_array[i] = NULL;
2709}
2710
2711static struct witness *
2712witness_hash_get(const char *key)
2713{
2714	struct witness *w;
2715	uint32_t hash;
2716
2717	MPASS(key != NULL);
2718	if (witness_cold == 0)
2719		mtx_assert(&w_mtx, MA_OWNED);
2720	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2721	w = w_hash.wh_array[hash];
2722	while (w != NULL) {
2723		if (strcmp(w->w_name, key) == 0)
2724			goto out;
2725		w = w->w_hash_next;
2726	}
2727
2728out:
2729	return (w);
2730}
2731
2732static void
2733witness_hash_put(struct witness *w)
2734{
2735	uint32_t hash;
2736
2737	MPASS(w != NULL);
2738	MPASS(w->w_name != NULL);
2739	if (witness_cold == 0)
2740		mtx_assert(&w_mtx, MA_OWNED);
2741	KASSERT(witness_hash_get(w->w_name) == NULL,
2742	    ("%s: trying to add a hash entry that already exists!", __func__));
2743	KASSERT(w->w_hash_next == NULL,
2744	    ("%s: w->w_hash_next != NULL", __func__));
2745
2746	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2747	w->w_hash_next = w_hash.wh_array[hash];
2748	w_hash.wh_array[hash] = w;
2749	w_hash.wh_count++;
2750}
2751
2752
2753static struct witness_lock_order_data *
2754witness_lock_order_get(struct witness *parent, struct witness *child)
2755{
2756	struct witness_lock_order_data *data = NULL;
2757	struct witness_lock_order_key key;
2758	unsigned int hash;
2759
2760	MPASS(parent != NULL && child != NULL);
2761	key.from = parent->w_index;
2762	key.to = child->w_index;
2763	WITNESS_INDEX_ASSERT(key.from);
2764	WITNESS_INDEX_ASSERT(key.to);
2765	if ((w_rmatrix[parent->w_index][child->w_index]
2766	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2767		goto out;
2768
2769	hash = witness_hash_djb2((const char*)&key,
2770	    sizeof(key)) % w_lohash.wloh_size;
2771	data = w_lohash.wloh_array[hash];
2772	while (data != NULL) {
2773		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2774			break;
2775		data = data->wlod_next;
2776	}
2777
2778out:
2779	return (data);
2780}
2781
2782/*
2783 * Verify that parent and child have a known relationship, are not the same,
2784 * and child is actually a child of parent.  This is done without w_mtx
2785 * to avoid contention in the common case.
2786 */
2787static int
2788witness_lock_order_check(struct witness *parent, struct witness *child)
2789{
2790
2791	if (parent != child &&
2792	    w_rmatrix[parent->w_index][child->w_index]
2793	    & WITNESS_LOCK_ORDER_KNOWN &&
2794	    isitmychild(parent, child))
2795		return (1);
2796
2797	return (0);
2798}
2799
2800static int
2801witness_lock_order_add(struct witness *parent, struct witness *child)
2802{
2803	struct witness_lock_order_data *data = NULL;
2804	struct witness_lock_order_key key;
2805	unsigned int hash;
2806
2807	MPASS(parent != NULL && child != NULL);
2808	key.from = parent->w_index;
2809	key.to = child->w_index;
2810	WITNESS_INDEX_ASSERT(key.from);
2811	WITNESS_INDEX_ASSERT(key.to);
2812	if (w_rmatrix[parent->w_index][child->w_index]
2813	    & WITNESS_LOCK_ORDER_KNOWN)
2814		return (1);
2815
2816	hash = witness_hash_djb2((const char*)&key,
2817	    sizeof(key)) % w_lohash.wloh_size;
2818	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2819	data = w_lofree;
2820	if (data == NULL)
2821		return (0);
2822	w_lofree = data->wlod_next;
2823	data->wlod_next = w_lohash.wloh_array[hash];
2824	data->wlod_key = key;
2825	w_lohash.wloh_array[hash] = data;
2826	w_lohash.wloh_count++;
2827	stack_zero(&data->wlod_stack);
2828	stack_save(&data->wlod_stack);
2829	return (1);
2830}
2831
2832/* Call this whenver the structure of the witness graph changes. */
2833static void
2834witness_increment_graph_generation(void)
2835{
2836
2837	if (witness_cold == 0)
2838		mtx_assert(&w_mtx, MA_OWNED);
2839	w_generation++;
2840}
2841
2842#ifdef KDB
2843static void
2844_witness_debugger(int cond, const char *msg)
2845{
2846
2847	if (witness_trace && cond)
2848		kdb_backtrace();
2849	if (witness_kdb && cond)
2850		kdb_enter(KDB_WHY_WITNESS, msg);
2851}
2852#endif
2853