1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 *    promote products derived from this software without specific prior
17 *    written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier.  Originally implemented for
37 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 *	Main Entry: witness
43 *	Pronunciation: 'wit-n&s
44 *	Function: noun
45 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46 *	    testimony, witness, from 2wit
47 *	Date: before 12th century
48 *	1 : attestation of a fact or event : TESTIMONY
49 *	2 : one that gives evidence; specifically : one who testifies in
50 *	    a cause or before a judicial tribunal
51 *	3 : one asked to be present at a transaction so as to be able to
52 *	    testify to its having taken place
53 *	4 : one who has personal knowledge of something
54 *	5 a : something serving as evidence or proof : SIGN
55 *	  b : public affirmation by word or example of usually
56 *	      religious faith or conviction <the heroic witness to divine
57 *	      life -- Pilot>
58 *	6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes.  Stated another way,
65 *    no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl().  Basically, when a thread sleeps, it must release
71 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious.  Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock.  The second
79 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute.  Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD$");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define	KTR_WITNESS	KTR_SUBSYS
124#else
125#define	KTR_WITNESS	0
126#endif
127
128#define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129#define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130#define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#define	WITNESS_COUNT 		1024
136#define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
137#define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
138#define	WITNESS_PENDLIST	768
139
140/* Allocate 256 KB of stack data space */
141#define	WITNESS_LO_DATA_COUNT	2048
142
143/* Prime, gives load factor of ~2 at full load */
144#define	WITNESS_LO_HASH_SIZE	1021
145
146/*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151#define	LOCK_NCHILDREN	5
152#define	LOCK_CHILDCOUNT	2048
153
154#define	MAX_W_NAME	64
155
156#define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
157#define	FULLGRAPH_SBUF_SIZE	512
158
159/*
160 * These flags go in the witness relationship matrix and describe the
161 * relationship between any two struct witness objects.
162 */
163#define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164#define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165#define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166#define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167#define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168#define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169#define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170#define	WITNESS_RELATED_MASK						\
171	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172#define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173					  * observed. */
174#define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175#define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176#define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177
178/* Descendant to ancestor flags */
179#define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180
181/* Ancestor to descendant flags */
182#define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183
184#define	WITNESS_INDEX_ASSERT(i)						\
185	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
186
187static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188
189/*
190 * Lock instances.  A lock instance is the data associated with a lock while
191 * it is held by witness.  For example, a lock instance will hold the
192 * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193 * are held in a per-cpu list while sleep locks are held in per-thread list.
194 */
195struct lock_instance {
196	struct lock_object	*li_lock;
197	const char		*li_file;
198	int			li_line;
199	u_int			li_flags;
200};
201
202/*
203 * A simple list type used to build the list of locks held by a thread
204 * or CPU.  We can't simply embed the list in struct lock_object since a
205 * lock may be held by more than one thread if it is a shared lock.  Locks
206 * are added to the head of the list, so we fill up each list entry from
207 * "the back" logically.  To ease some of the arithmetic, we actually fill
208 * in each list entry the normal way (children[0] then children[1], etc.) but
209 * when we traverse the list we read children[count-1] as the first entry
210 * down to children[0] as the final entry.
211 */
212struct lock_list_entry {
213	struct lock_list_entry	*ll_next;
214	struct lock_instance	ll_children[LOCK_NCHILDREN];
215	u_int			ll_count;
216};
217
218/*
219 * The main witness structure. One of these per named lock type in the system
220 * (for example, "vnode interlock").
221 */
222struct witness {
223	char  			w_name[MAX_W_NAME];
224	uint32_t 		w_index;  /* Index in the relationship matrix */
225	struct lock_class	*w_class;
226	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229	const char		*w_file; /* File where last acquired */
230	uint32_t 		w_line; /* Line where last acquired */
231	uint32_t 		w_refcount;
232	uint16_t 		w_num_ancestors; /* direct/indirect
233						  * ancestor count */
234	uint16_t 		w_num_descendants; /* direct/indirect
235						    * descendant count */
236	int16_t 		w_ddb_level;
237	unsigned		w_displayed:1;
238	unsigned		w_reversed:1;
239};
240
241STAILQ_HEAD(witness_list, witness);
242
243/*
244 * The witness hash table. Keys are witness names (const char *), elements are
245 * witness objects (struct witness *).
246 */
247struct witness_hash {
248	struct witness	*wh_array[WITNESS_HASH_SIZE];
249	uint32_t	wh_size;
250	uint32_t	wh_count;
251};
252
253/*
254 * Key type for the lock order data hash table.
255 */
256struct witness_lock_order_key {
257	uint16_t	from;
258	uint16_t	to;
259};
260
261struct witness_lock_order_data {
262	struct stack			wlod_stack;
263	struct witness_lock_order_key	wlod_key;
264	struct witness_lock_order_data	*wlod_next;
265};
266
267/*
268 * The witness lock order data hash table. Keys are witness index tuples
269 * (struct witness_lock_order_key), elements are lock order data objects
270 * (struct witness_lock_order_data).
271 */
272struct witness_lock_order_hash {
273	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274	u_int	wloh_size;
275	u_int	wloh_count;
276};
277
278#ifdef BLESSING
279struct witness_blessed {
280	const char	*b_lock1;
281	const char	*b_lock2;
282};
283#endif
284
285struct witness_pendhelp {
286	const char		*wh_type;
287	struct lock_object	*wh_lock;
288};
289
290struct witness_order_list_entry {
291	const char		*w_name;
292	struct lock_class	*w_class;
293};
294
295/*
296 * Returns 0 if one of the locks is a spin lock and the other is not.
297 * Returns 1 otherwise.
298 */
299static __inline int
300witness_lock_type_equal(struct witness *w1, struct witness *w2)
301{
302
303	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305}
306
307static __inline int
308witness_lock_order_key_equal(const struct witness_lock_order_key *a,
309    const struct witness_lock_order_key *b)
310{
311
312	return (a->from == b->from && a->to == b->to);
313}
314
315static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
316		    const char *fname);
317#ifdef KDB
318static void	_witness_debugger(int cond, const char *msg);
319#endif
320static void	adopt(struct witness *parent, struct witness *child);
321#ifdef BLESSING
322static int	blessed(struct witness *, struct witness *);
323#endif
324static void	depart(struct witness *w);
325static struct witness	*enroll(const char *description,
326			    struct lock_class *lock_class);
327static struct lock_instance	*find_instance(struct lock_list_entry *list,
328				    struct lock_object *lock);
329static int	isitmychild(struct witness *parent, struct witness *child);
330static int	isitmydescendant(struct witness *parent, struct witness *child);
331static void	itismychild(struct witness *parent, struct witness *child);
332static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
333static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
334static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
335static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
336#ifdef DDB
337static void	witness_ddb_compute_levels(void);
338static void	witness_ddb_display(int(*)(const char *fmt, ...));
339static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
340		    struct witness *, int indent);
341static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
342		    struct witness_list *list);
343static void	witness_ddb_level_descendants(struct witness *parent, int l);
344static void	witness_ddb_list(struct thread *td);
345#endif
346static void	witness_free(struct witness *m);
347static struct witness	*witness_get(void);
348static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
349static struct witness	*witness_hash_get(const char *key);
350static void	witness_hash_put(struct witness *w);
351static void	witness_init_hash_tables(void);
352static void	witness_increment_graph_generation(void);
353static void	witness_lock_list_free(struct lock_list_entry *lle);
354static struct lock_list_entry	*witness_lock_list_get(void);
355static int	witness_lock_order_add(struct witness *parent,
356		    struct witness *child);
357static int	witness_lock_order_check(struct witness *parent,
358		    struct witness *child);
359static struct witness_lock_order_data	*witness_lock_order_get(
360					    struct witness *parent,
361					    struct witness *child);
362static void	witness_list_lock(struct lock_instance *instance,
363		    int (*prnt)(const char *fmt, ...));
364static void	witness_setflag(struct lock_object *lock, int flag, int set);
365
366#ifdef KDB
367#define	witness_debugger(c)	_witness_debugger(c, __func__)
368#else
369#define	witness_debugger(c)
370#endif
371
372static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
373    "Witness Locking");
374
375/*
376 * If set to 0, lock order checking is disabled.  If set to -1,
377 * witness is completely disabled.  Otherwise witness performs full
378 * lock order checking for all locks.  At runtime, lock order checking
379 * may be toggled.  However, witness cannot be reenabled once it is
380 * completely disabled.
381 */
382static int witness_watch = 1;
383TUNABLE_INT("debug.witness.watch", &witness_watch);
384SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
385    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
386
387#ifdef KDB
388/*
389 * When KDB is enabled and witness_kdb is 1, it will cause the system
390 * to drop into kdebug() when:
391 *	- a lock hierarchy violation occurs
392 *	- locks are held when going to sleep.
393 */
394#ifdef WITNESS_KDB
395int	witness_kdb = 1;
396#else
397int	witness_kdb = 0;
398#endif
399TUNABLE_INT("debug.witness.kdb", &witness_kdb);
400SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
401
402/*
403 * When KDB is enabled and witness_trace is 1, it will cause the system
404 * to print a stack trace:
405 *	- a lock hierarchy violation occurs
406 *	- locks are held when going to sleep.
407 */
408int	witness_trace = 1;
409TUNABLE_INT("debug.witness.trace", &witness_trace);
410SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
411#endif /* KDB */
412
413#ifdef WITNESS_SKIPSPIN
414int	witness_skipspin = 1;
415#else
416int	witness_skipspin = 0;
417#endif
418TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
419SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
420    0, "");
421
422/*
423 * Call this to print out the relations between locks.
424 */
425SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
426    NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
427
428/*
429 * Call this to print out the witness faulty stacks.
430 */
431SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
432    NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
433
434static struct mtx w_mtx;
435
436/* w_list */
437static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
438static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
439
440/* w_typelist */
441static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
442static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
443
444/* lock list */
445static struct lock_list_entry *w_lock_list_free = NULL;
446static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
447static u_int pending_cnt;
448
449static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
450SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
451SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
452SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
453    "");
454
455static struct witness *w_data;
456static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
457static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
458static struct witness_hash w_hash;	/* The witness hash table. */
459
460/* The lock order data hash */
461static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
462static struct witness_lock_order_data *w_lofree = NULL;
463static struct witness_lock_order_hash w_lohash;
464static int w_max_used_index = 0;
465static unsigned int w_generation = 0;
466static const char w_notrunning[] = "Witness not running\n";
467static const char w_stillcold[] = "Witness is still cold\n";
468
469
470static struct witness_order_list_entry order_lists[] = {
471	/*
472	 * sx locks
473	 */
474	{ "proctree", &lock_class_sx },
475	{ "allproc", &lock_class_sx },
476	{ "allprison", &lock_class_sx },
477	{ NULL, NULL },
478	/*
479	 * Various mutexes
480	 */
481	{ "Giant", &lock_class_mtx_sleep },
482	{ "pipe mutex", &lock_class_mtx_sleep },
483	{ "sigio lock", &lock_class_mtx_sleep },
484	{ "process group", &lock_class_mtx_sleep },
485	{ "process lock", &lock_class_mtx_sleep },
486	{ "session", &lock_class_mtx_sleep },
487	{ "uidinfo hash", &lock_class_rw },
488#ifdef	HWPMC_HOOKS
489	{ "pmc-sleep", &lock_class_mtx_sleep },
490#endif
491	{ "time lock", &lock_class_mtx_sleep },
492	{ NULL, NULL },
493	/*
494	 * Sockets
495	 */
496	{ "accept", &lock_class_mtx_sleep },
497	{ "so_snd", &lock_class_mtx_sleep },
498	{ "so_rcv", &lock_class_mtx_sleep },
499	{ "sellck", &lock_class_mtx_sleep },
500	{ NULL, NULL },
501	/*
502	 * Routing
503	 */
504	{ "so_rcv", &lock_class_mtx_sleep },
505	{ "radix node head", &lock_class_rw },
506	{ "rtentry", &lock_class_mtx_sleep },
507	{ "ifaddr", &lock_class_mtx_sleep },
508	{ NULL, NULL },
509	/*
510	 * IPv4 multicast:
511	 * protocol locks before interface locks, after UDP locks.
512	 */
513	{ "udpinp", &lock_class_rw },
514	{ "in_multi_mtx", &lock_class_mtx_sleep },
515	{ "igmp_mtx", &lock_class_mtx_sleep },
516	{ "if_addr_mtx", &lock_class_mtx_sleep },
517	{ NULL, NULL },
518	/*
519	 * IPv6 multicast:
520	 * protocol locks before interface locks, after UDP locks.
521	 */
522	{ "udpinp", &lock_class_rw },
523	{ "in6_multi_mtx", &lock_class_mtx_sleep },
524	{ "mld_mtx", &lock_class_mtx_sleep },
525	{ "if_addr_mtx", &lock_class_mtx_sleep },
526	{ NULL, NULL },
527	/*
528	 * UNIX Domain Sockets
529	 */
530	{ "unp_global_rwlock", &lock_class_rw },
531	{ "unp_list_lock", &lock_class_mtx_sleep },
532	{ "unp", &lock_class_mtx_sleep },
533	{ "so_snd", &lock_class_mtx_sleep },
534	{ NULL, NULL },
535	/*
536	 * UDP/IP
537	 */
538	{ "udp", &lock_class_rw },
539	{ "udpinp", &lock_class_rw },
540	{ "so_snd", &lock_class_mtx_sleep },
541	{ NULL, NULL },
542	/*
543	 * TCP/IP
544	 */
545	{ "tcp", &lock_class_rw },
546	{ "tcpinp", &lock_class_rw },
547	{ "so_snd", &lock_class_mtx_sleep },
548	{ NULL, NULL },
549	/*
550	 * netatalk
551	 */
552	{ "ddp_list_mtx", &lock_class_mtx_sleep },
553	{ "ddp_mtx", &lock_class_mtx_sleep },
554	{ NULL, NULL },
555	/*
556	 * BPF
557	 */
558	{ "bpf global lock", &lock_class_mtx_sleep },
559	{ "bpf interface lock", &lock_class_rw },
560	{ "bpf cdev lock", &lock_class_mtx_sleep },
561	{ NULL, NULL },
562	/*
563	 * NFS server
564	 */
565	{ "nfsd_mtx", &lock_class_mtx_sleep },
566	{ "so_snd", &lock_class_mtx_sleep },
567	{ NULL, NULL },
568
569	/*
570	 * IEEE 802.11
571	 */
572	{ "802.11 com lock", &lock_class_mtx_sleep},
573	{ NULL, NULL },
574	/*
575	 * Network drivers
576	 */
577	{ "network driver", &lock_class_mtx_sleep},
578	{ NULL, NULL },
579
580	/*
581	 * Netgraph
582	 */
583	{ "ng_node", &lock_class_mtx_sleep },
584	{ "ng_worklist", &lock_class_mtx_sleep },
585	{ NULL, NULL },
586	/*
587	 * CDEV
588	 */
589	{ "vm map (system)", &lock_class_mtx_sleep },
590	{ "vm page queue", &lock_class_mtx_sleep },
591	{ "vnode interlock", &lock_class_mtx_sleep },
592	{ "cdev", &lock_class_mtx_sleep },
593	{ NULL, NULL },
594	/*
595	 * VM
596	 */
597	{ "vm map (user)", &lock_class_sx },
598	{ "vm object", &lock_class_mtx_sleep },
599	{ "vm page", &lock_class_mtx_sleep },
600	{ "vm page queue", &lock_class_mtx_sleep },
601	{ "pmap pv global", &lock_class_rw },
602	{ "pmap", &lock_class_mtx_sleep },
603	{ "pmap pv list", &lock_class_rw },
604	{ "vm page free queue", &lock_class_mtx_sleep },
605	{ NULL, NULL },
606	/*
607	 * kqueue/VFS interaction
608	 */
609	{ "kqueue", &lock_class_mtx_sleep },
610	{ "struct mount mtx", &lock_class_mtx_sleep },
611	{ "vnode interlock", &lock_class_mtx_sleep },
612	{ NULL, NULL },
613	/*
614	 * ZFS locking
615	 */
616	{ "dn->dn_mtx", &lock_class_sx },
617	{ "dr->dt.di.dr_mtx", &lock_class_sx },
618	{ "db->db_mtx", &lock_class_sx },
619	{ NULL, NULL },
620	/*
621	 * spin locks
622	 */
623#ifdef SMP
624	{ "ap boot", &lock_class_mtx_spin },
625#endif
626	{ "rm.mutex_mtx", &lock_class_mtx_spin },
627	{ "sio", &lock_class_mtx_spin },
628	{ "scrlock", &lock_class_mtx_spin },
629#ifdef __i386__
630	{ "cy", &lock_class_mtx_spin },
631#endif
632#ifdef __sparc64__
633	{ "pcib_mtx", &lock_class_mtx_spin },
634	{ "rtc_mtx", &lock_class_mtx_spin },
635#endif
636	{ "scc_hwmtx", &lock_class_mtx_spin },
637	{ "uart_hwmtx", &lock_class_mtx_spin },
638	{ "fast_taskqueue", &lock_class_mtx_spin },
639	{ "intr table", &lock_class_mtx_spin },
640#ifdef	HWPMC_HOOKS
641	{ "pmc-per-proc", &lock_class_mtx_spin },
642#endif
643	{ "process slock", &lock_class_mtx_spin },
644	{ "sleepq chain", &lock_class_mtx_spin },
645	{ "umtx lock", &lock_class_mtx_spin },
646	{ "rm_spinlock", &lock_class_mtx_spin },
647	{ "turnstile chain", &lock_class_mtx_spin },
648	{ "turnstile lock", &lock_class_mtx_spin },
649	{ "sched lock", &lock_class_mtx_spin },
650	{ "td_contested", &lock_class_mtx_spin },
651	{ "callout", &lock_class_mtx_spin },
652	{ "entropy harvest mutex", &lock_class_mtx_spin },
653	{ "syscons video lock", &lock_class_mtx_spin },
654#ifdef SMP
655	{ "smp rendezvous", &lock_class_mtx_spin },
656#endif
657#ifdef __powerpc__
658	{ "tlb0", &lock_class_mtx_spin },
659#endif
660	/*
661	 * leaf locks
662	 */
663	{ "intrcnt", &lock_class_mtx_spin },
664	{ "icu", &lock_class_mtx_spin },
665#ifdef __i386__
666	{ "allpmaps", &lock_class_mtx_spin },
667	{ "descriptor tables", &lock_class_mtx_spin },
668#endif
669	{ "clk", &lock_class_mtx_spin },
670	{ "cpuset", &lock_class_mtx_spin },
671	{ "mprof lock", &lock_class_mtx_spin },
672	{ "zombie lock", &lock_class_mtx_spin },
673	{ "ALD Queue", &lock_class_mtx_spin },
674#ifdef __ia64__
675	{ "MCA spin lock", &lock_class_mtx_spin },
676#endif
677#if defined(__i386__) || defined(__amd64__)
678	{ "pcicfg", &lock_class_mtx_spin },
679	{ "NDIS thread lock", &lock_class_mtx_spin },
680#endif
681	{ "tw_osl_io_lock", &lock_class_mtx_spin },
682	{ "tw_osl_q_lock", &lock_class_mtx_spin },
683	{ "tw_cl_io_lock", &lock_class_mtx_spin },
684	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
685	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
686#ifdef	HWPMC_HOOKS
687	{ "pmc-leaf", &lock_class_mtx_spin },
688#endif
689	{ "blocked lock", &lock_class_mtx_spin },
690	{ NULL, NULL },
691	{ NULL, NULL }
692};
693
694#ifdef BLESSING
695/*
696 * Pairs of locks which have been blessed
697 * Don't complain about order problems with blessed locks
698 */
699static struct witness_blessed blessed_list[] = {
700};
701static int blessed_count =
702	sizeof(blessed_list) / sizeof(struct witness_blessed);
703#endif
704
705/*
706 * This global is set to 0 once it becomes safe to use the witness code.
707 */
708static int witness_cold = 1;
709
710/*
711 * This global is set to 1 once the static lock orders have been enrolled
712 * so that a warning can be issued for any spin locks enrolled later.
713 */
714static int witness_spin_warn = 0;
715
716/* Trim useless garbage from filenames. */
717static const char *
718fixup_filename(const char *file)
719{
720
721	if (file == NULL)
722		return (NULL);
723	while (strncmp(file, "../", 3) == 0)
724		file += 3;
725	return (file);
726}
727
728/*
729 * The WITNESS-enabled diagnostic code.  Note that the witness code does
730 * assume that the early boot is single-threaded at least until after this
731 * routine is completed.
732 */
733static void
734witness_initialize(void *dummy __unused)
735{
736	struct lock_object *lock;
737	struct witness_order_list_entry *order;
738	struct witness *w, *w1;
739	int i;
740
741	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
742	    M_NOWAIT | M_ZERO);
743
744	/*
745	 * We have to release Giant before initializing its witness
746	 * structure so that WITNESS doesn't get confused.
747	 */
748	mtx_unlock(&Giant);
749	mtx_assert(&Giant, MA_NOTOWNED);
750
751	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
752	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
753	    MTX_NOWITNESS | MTX_NOPROFILE);
754	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
755		w = &w_data[i];
756		memset(w, 0, sizeof(*w));
757		w_data[i].w_index = i;	/* Witness index never changes. */
758		witness_free(w);
759	}
760	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
761	    ("%s: Invalid list of free witness objects", __func__));
762
763	/* Witness with index 0 is not used to aid in debugging. */
764	STAILQ_REMOVE_HEAD(&w_free, w_list);
765	w_free_cnt--;
766
767	memset(w_rmatrix, 0,
768	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
769
770	for (i = 0; i < LOCK_CHILDCOUNT; i++)
771		witness_lock_list_free(&w_locklistdata[i]);
772	witness_init_hash_tables();
773
774	/* First add in all the specified order lists. */
775	for (order = order_lists; order->w_name != NULL; order++) {
776		w = enroll(order->w_name, order->w_class);
777		if (w == NULL)
778			continue;
779		w->w_file = "order list";
780		for (order++; order->w_name != NULL; order++) {
781			w1 = enroll(order->w_name, order->w_class);
782			if (w1 == NULL)
783				continue;
784			w1->w_file = "order list";
785			itismychild(w, w1);
786			w = w1;
787		}
788	}
789	witness_spin_warn = 1;
790
791	/* Iterate through all locks and add them to witness. */
792	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
793		lock = pending_locks[i].wh_lock;
794		KASSERT(lock->lo_flags & LO_WITNESS,
795		    ("%s: lock %s is on pending list but not LO_WITNESS",
796		    __func__, lock->lo_name));
797		lock->lo_witness = enroll(pending_locks[i].wh_type,
798		    LOCK_CLASS(lock));
799	}
800
801	/* Mark the witness code as being ready for use. */
802	witness_cold = 0;
803
804	mtx_lock(&Giant);
805}
806SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
807    NULL);
808
809void
810witness_init(struct lock_object *lock, const char *type)
811{
812	struct lock_class *class;
813
814	/* Various sanity checks. */
815	class = LOCK_CLASS(lock);
816	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
817	    (class->lc_flags & LC_RECURSABLE) == 0)
818		panic("%s: lock (%s) %s can not be recursable", __func__,
819		    class->lc_name, lock->lo_name);
820	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
821	    (class->lc_flags & LC_SLEEPABLE) == 0)
822		panic("%s: lock (%s) %s can not be sleepable", __func__,
823		    class->lc_name, lock->lo_name);
824	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
825	    (class->lc_flags & LC_UPGRADABLE) == 0)
826		panic("%s: lock (%s) %s can not be upgradable", __func__,
827		    class->lc_name, lock->lo_name);
828
829	/*
830	 * If we shouldn't watch this lock, then just clear lo_witness.
831	 * Otherwise, if witness_cold is set, then it is too early to
832	 * enroll this lock, so defer it to witness_initialize() by adding
833	 * it to the pending_locks list.  If it is not too early, then enroll
834	 * the lock now.
835	 */
836	if (witness_watch < 1 || panicstr != NULL ||
837	    (lock->lo_flags & LO_WITNESS) == 0)
838		lock->lo_witness = NULL;
839	else if (witness_cold) {
840		pending_locks[pending_cnt].wh_lock = lock;
841		pending_locks[pending_cnt++].wh_type = type;
842		if (pending_cnt > WITNESS_PENDLIST)
843			panic("%s: pending locks list is too small, bump it\n",
844			    __func__);
845	} else
846		lock->lo_witness = enroll(type, class);
847}
848
849void
850witness_destroy(struct lock_object *lock)
851{
852	struct lock_class *class;
853	struct witness *w;
854
855	class = LOCK_CLASS(lock);
856
857	if (witness_cold)
858		panic("lock (%s) %s destroyed while witness_cold",
859		    class->lc_name, lock->lo_name);
860
861	/* XXX: need to verify that no one holds the lock */
862	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
863		return;
864	w = lock->lo_witness;
865
866	mtx_lock_spin(&w_mtx);
867	MPASS(w->w_refcount > 0);
868	w->w_refcount--;
869
870	if (w->w_refcount == 0)
871		depart(w);
872	mtx_unlock_spin(&w_mtx);
873}
874
875#ifdef DDB
876static void
877witness_ddb_compute_levels(void)
878{
879	struct witness *w;
880
881	/*
882	 * First clear all levels.
883	 */
884	STAILQ_FOREACH(w, &w_all, w_list)
885		w->w_ddb_level = -1;
886
887	/*
888	 * Look for locks with no parents and level all their descendants.
889	 */
890	STAILQ_FOREACH(w, &w_all, w_list) {
891
892		/* If the witness has ancestors (is not a root), skip it. */
893		if (w->w_num_ancestors > 0)
894			continue;
895		witness_ddb_level_descendants(w, 0);
896	}
897}
898
899static void
900witness_ddb_level_descendants(struct witness *w, int l)
901{
902	int i;
903
904	if (w->w_ddb_level >= l)
905		return;
906
907	w->w_ddb_level = l;
908	l++;
909
910	for (i = 1; i <= w_max_used_index; i++) {
911		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
912			witness_ddb_level_descendants(&w_data[i], l);
913	}
914}
915
916static void
917witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
918    struct witness *w, int indent)
919{
920	int i;
921
922 	for (i = 0; i < indent; i++)
923 		prnt(" ");
924	prnt("%s (type: %s, depth: %d, active refs: %d)",
925	     w->w_name, w->w_class->lc_name,
926	     w->w_ddb_level, w->w_refcount);
927 	if (w->w_displayed) {
928 		prnt(" -- (already displayed)\n");
929 		return;
930 	}
931 	w->w_displayed = 1;
932	if (w->w_file != NULL && w->w_line != 0)
933		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
934		    w->w_line);
935	else
936		prnt(" -- never acquired\n");
937	indent++;
938	WITNESS_INDEX_ASSERT(w->w_index);
939	for (i = 1; i <= w_max_used_index; i++) {
940		if (db_pager_quit)
941			return;
942		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
943			witness_ddb_display_descendants(prnt, &w_data[i],
944			    indent);
945	}
946}
947
948static void
949witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
950    struct witness_list *list)
951{
952	struct witness *w;
953
954	STAILQ_FOREACH(w, list, w_typelist) {
955		if (w->w_file == NULL || w->w_ddb_level > 0)
956			continue;
957
958		/* This lock has no anscestors - display its descendants. */
959		witness_ddb_display_descendants(prnt, w, 0);
960		if (db_pager_quit)
961			return;
962	}
963}
964
965static void
966witness_ddb_display(int(*prnt)(const char *fmt, ...))
967{
968	struct witness *w;
969
970	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
971	witness_ddb_compute_levels();
972
973	/* Clear all the displayed flags. */
974	STAILQ_FOREACH(w, &w_all, w_list)
975		w->w_displayed = 0;
976
977	/*
978	 * First, handle sleep locks which have been acquired at least
979	 * once.
980	 */
981	prnt("Sleep locks:\n");
982	witness_ddb_display_list(prnt, &w_sleep);
983	if (db_pager_quit)
984		return;
985
986	/*
987	 * Now do spin locks which have been acquired at least once.
988	 */
989	prnt("\nSpin locks:\n");
990	witness_ddb_display_list(prnt, &w_spin);
991	if (db_pager_quit)
992		return;
993
994	/*
995	 * Finally, any locks which have not been acquired yet.
996	 */
997	prnt("\nLocks which were never acquired:\n");
998	STAILQ_FOREACH(w, &w_all, w_list) {
999		if (w->w_file != NULL || w->w_refcount == 0)
1000			continue;
1001		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1002		    w->w_class->lc_name, w->w_ddb_level);
1003		if (db_pager_quit)
1004			return;
1005	}
1006}
1007#endif /* DDB */
1008
1009int
1010witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1011{
1012
1013	if (witness_watch == -1 || panicstr != NULL)
1014		return (0);
1015
1016	/* Require locks that witness knows about. */
1017	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1018	    lock2->lo_witness == NULL)
1019		return (EINVAL);
1020
1021	mtx_assert(&w_mtx, MA_NOTOWNED);
1022	mtx_lock_spin(&w_mtx);
1023
1024	/*
1025	 * If we already have either an explicit or implied lock order that
1026	 * is the other way around, then return an error.
1027	 */
1028	if (witness_watch &&
1029	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1030		mtx_unlock_spin(&w_mtx);
1031		return (EDOOFUS);
1032	}
1033
1034	/* Try to add the new order. */
1035	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1036	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1037	itismychild(lock1->lo_witness, lock2->lo_witness);
1038	mtx_unlock_spin(&w_mtx);
1039	return (0);
1040}
1041
1042void
1043witness_checkorder(struct lock_object *lock, int flags, const char *file,
1044    int line, struct lock_object *interlock)
1045{
1046	struct lock_list_entry *lock_list, *lle;
1047	struct lock_instance *lock1, *lock2, *plock;
1048	struct lock_class *class;
1049	struct witness *w, *w1;
1050	struct thread *td;
1051	int i, j;
1052
1053	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1054	    panicstr != NULL)
1055		return;
1056
1057	w = lock->lo_witness;
1058	class = LOCK_CLASS(lock);
1059	td = curthread;
1060
1061	if (class->lc_flags & LC_SLEEPLOCK) {
1062
1063		/*
1064		 * Since spin locks include a critical section, this check
1065		 * implicitly enforces a lock order of all sleep locks before
1066		 * all spin locks.
1067		 */
1068		if (td->td_critnest != 0 && !kdb_active)
1069			panic("blockable sleep lock (%s) %s @ %s:%d",
1070			    class->lc_name, lock->lo_name,
1071			    fixup_filename(file), line);
1072
1073		/*
1074		 * If this is the first lock acquired then just return as
1075		 * no order checking is needed.
1076		 */
1077		lock_list = td->td_sleeplocks;
1078		if (lock_list == NULL || lock_list->ll_count == 0)
1079			return;
1080	} else {
1081
1082		/*
1083		 * If this is the first lock, just return as no order
1084		 * checking is needed.  Avoid problems with thread
1085		 * migration pinning the thread while checking if
1086		 * spinlocks are held.  If at least one spinlock is held
1087		 * the thread is in a safe path and it is allowed to
1088		 * unpin it.
1089		 */
1090		sched_pin();
1091		lock_list = PCPU_GET(spinlocks);
1092		if (lock_list == NULL || lock_list->ll_count == 0) {
1093			sched_unpin();
1094			return;
1095		}
1096		sched_unpin();
1097	}
1098
1099	/*
1100	 * Check to see if we are recursing on a lock we already own.  If
1101	 * so, make sure that we don't mismatch exclusive and shared lock
1102	 * acquires.
1103	 */
1104	lock1 = find_instance(lock_list, lock);
1105	if (lock1 != NULL) {
1106		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1107		    (flags & LOP_EXCLUSIVE) == 0) {
1108			printf("shared lock of (%s) %s @ %s:%d\n",
1109			    class->lc_name, lock->lo_name,
1110			    fixup_filename(file), line);
1111			printf("while exclusively locked from %s:%d\n",
1112			    fixup_filename(lock1->li_file), lock1->li_line);
1113			panic("share->excl");
1114		}
1115		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1116		    (flags & LOP_EXCLUSIVE) != 0) {
1117			printf("exclusive lock of (%s) %s @ %s:%d\n",
1118			    class->lc_name, lock->lo_name,
1119			    fixup_filename(file), line);
1120			printf("while share locked from %s:%d\n",
1121			    fixup_filename(lock1->li_file), lock1->li_line);
1122			panic("excl->share");
1123		}
1124		return;
1125	}
1126
1127	/*
1128	 * Find the previously acquired lock, but ignore interlocks.
1129	 */
1130	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1131	if (interlock != NULL && plock->li_lock == interlock) {
1132		if (lock_list->ll_count > 1)
1133			plock =
1134			    &lock_list->ll_children[lock_list->ll_count - 2];
1135		else {
1136			lle = lock_list->ll_next;
1137
1138			/*
1139			 * The interlock is the only lock we hold, so
1140			 * simply return.
1141			 */
1142			if (lle == NULL)
1143				return;
1144			plock = &lle->ll_children[lle->ll_count - 1];
1145		}
1146	}
1147
1148	/*
1149	 * Try to perform most checks without a lock.  If this succeeds we
1150	 * can skip acquiring the lock and return success.
1151	 */
1152	w1 = plock->li_lock->lo_witness;
1153	if (witness_lock_order_check(w1, w))
1154		return;
1155
1156	/*
1157	 * Check for duplicate locks of the same type.  Note that we only
1158	 * have to check for this on the last lock we just acquired.  Any
1159	 * other cases will be caught as lock order violations.
1160	 */
1161	mtx_lock_spin(&w_mtx);
1162	witness_lock_order_add(w1, w);
1163	if (w1 == w) {
1164		i = w->w_index;
1165		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1166		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1167		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1168			w->w_reversed = 1;
1169			mtx_unlock_spin(&w_mtx);
1170			printf(
1171			    "acquiring duplicate lock of same type: \"%s\"\n",
1172			    w->w_name);
1173			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1174			    fixup_filename(plock->li_file), plock->li_line);
1175			printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1176			    fixup_filename(file), line);
1177			witness_debugger(1);
1178		} else
1179			mtx_unlock_spin(&w_mtx);
1180		return;
1181	}
1182	mtx_assert(&w_mtx, MA_OWNED);
1183
1184	/*
1185	 * If we know that the lock we are acquiring comes after
1186	 * the lock we most recently acquired in the lock order tree,
1187	 * then there is no need for any further checks.
1188	 */
1189	if (isitmychild(w1, w))
1190		goto out;
1191
1192	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1193		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1194
1195			MPASS(j < WITNESS_COUNT);
1196			lock1 = &lle->ll_children[i];
1197
1198			/*
1199			 * Ignore the interlock the first time we see it.
1200			 */
1201			if (interlock != NULL && interlock == lock1->li_lock) {
1202				interlock = NULL;
1203				continue;
1204			}
1205
1206			/*
1207			 * If this lock doesn't undergo witness checking,
1208			 * then skip it.
1209			 */
1210			w1 = lock1->li_lock->lo_witness;
1211			if (w1 == NULL) {
1212				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1213				    ("lock missing witness structure"));
1214				continue;
1215			}
1216
1217			/*
1218			 * If we are locking Giant and this is a sleepable
1219			 * lock, then skip it.
1220			 */
1221			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1222			    lock == &Giant.lock_object)
1223				continue;
1224
1225			/*
1226			 * If we are locking a sleepable lock and this lock
1227			 * is Giant, then skip it.
1228			 */
1229			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1230			    lock1->li_lock == &Giant.lock_object)
1231				continue;
1232
1233			/*
1234			 * If we are locking a sleepable lock and this lock
1235			 * isn't sleepable, we want to treat it as a lock
1236			 * order violation to enfore a general lock order of
1237			 * sleepable locks before non-sleepable locks.
1238			 */
1239			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1240			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1241				goto reversal;
1242
1243			/*
1244			 * If we are locking Giant and this is a non-sleepable
1245			 * lock, then treat it as a reversal.
1246			 */
1247			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1248			    lock == &Giant.lock_object)
1249				goto reversal;
1250
1251			/*
1252			 * Check the lock order hierarchy for a reveresal.
1253			 */
1254			if (!isitmydescendant(w, w1))
1255				continue;
1256		reversal:
1257
1258			/*
1259			 * We have a lock order violation, check to see if it
1260			 * is allowed or has already been yelled about.
1261			 */
1262#ifdef BLESSING
1263
1264			/*
1265			 * If the lock order is blessed, just bail.  We don't
1266			 * look for other lock order violations though, which
1267			 * may be a bug.
1268			 */
1269			if (blessed(w, w1))
1270				goto out;
1271#endif
1272
1273			/* Bail if this violation is known */
1274			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1275				goto out;
1276
1277			/* Record this as a violation */
1278			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1279			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1280			w->w_reversed = w1->w_reversed = 1;
1281			witness_increment_graph_generation();
1282			mtx_unlock_spin(&w_mtx);
1283
1284			/*
1285			 * Ok, yell about it.
1286			 */
1287			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1288			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1289				printf(
1290		"lock order reversal: (sleepable after non-sleepable)\n");
1291			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1292			    && lock == &Giant.lock_object)
1293				printf(
1294		"lock order reversal: (Giant after non-sleepable)\n");
1295			else
1296				printf("lock order reversal:\n");
1297
1298			/*
1299			 * Try to locate an earlier lock with
1300			 * witness w in our list.
1301			 */
1302			do {
1303				lock2 = &lle->ll_children[i];
1304				MPASS(lock2->li_lock != NULL);
1305				if (lock2->li_lock->lo_witness == w)
1306					break;
1307				if (i == 0 && lle->ll_next != NULL) {
1308					lle = lle->ll_next;
1309					i = lle->ll_count - 1;
1310					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1311				} else
1312					i--;
1313			} while (i >= 0);
1314			if (i < 0) {
1315				printf(" 1st %p %s (%s) @ %s:%d\n",
1316				    lock1->li_lock, lock1->li_lock->lo_name,
1317				    w1->w_name, fixup_filename(lock1->li_file),
1318				    lock1->li_line);
1319				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1320				    lock->lo_name, w->w_name,
1321				    fixup_filename(file), line);
1322			} else {
1323				printf(" 1st %p %s (%s) @ %s:%d\n",
1324				    lock2->li_lock, lock2->li_lock->lo_name,
1325				    lock2->li_lock->lo_witness->w_name,
1326				    fixup_filename(lock2->li_file),
1327				    lock2->li_line);
1328				printf(" 2nd %p %s (%s) @ %s:%d\n",
1329				    lock1->li_lock, lock1->li_lock->lo_name,
1330				    w1->w_name, fixup_filename(lock1->li_file),
1331				    lock1->li_line);
1332				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1333				    lock->lo_name, w->w_name,
1334				    fixup_filename(file), line);
1335			}
1336			witness_debugger(1);
1337			return;
1338		}
1339	}
1340
1341	/*
1342	 * If requested, build a new lock order.  However, don't build a new
1343	 * relationship between a sleepable lock and Giant if it is in the
1344	 * wrong direction.  The correct lock order is that sleepable locks
1345	 * always come before Giant.
1346	 */
1347	if (flags & LOP_NEWORDER &&
1348	    !(plock->li_lock == &Giant.lock_object &&
1349	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1350		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1351		    w->w_name, plock->li_lock->lo_witness->w_name);
1352		itismychild(plock->li_lock->lo_witness, w);
1353	}
1354out:
1355	mtx_unlock_spin(&w_mtx);
1356}
1357
1358void
1359witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1360{
1361	struct lock_list_entry **lock_list, *lle;
1362	struct lock_instance *instance;
1363	struct witness *w;
1364	struct thread *td;
1365
1366	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1367	    panicstr != NULL)
1368		return;
1369	w = lock->lo_witness;
1370	td = curthread;
1371
1372	/* Determine lock list for this lock. */
1373	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1374		lock_list = &td->td_sleeplocks;
1375	else
1376		lock_list = PCPU_PTR(spinlocks);
1377
1378	/* Check to see if we are recursing on a lock we already own. */
1379	instance = find_instance(*lock_list, lock);
1380	if (instance != NULL) {
1381		instance->li_flags++;
1382		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1383		    td->td_proc->p_pid, lock->lo_name,
1384		    instance->li_flags & LI_RECURSEMASK);
1385		instance->li_file = file;
1386		instance->li_line = line;
1387		return;
1388	}
1389
1390	/* Update per-witness last file and line acquire. */
1391	w->w_file = file;
1392	w->w_line = line;
1393
1394	/* Find the next open lock instance in the list and fill it. */
1395	lle = *lock_list;
1396	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1397		lle = witness_lock_list_get();
1398		if (lle == NULL)
1399			return;
1400		lle->ll_next = *lock_list;
1401		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1402		    td->td_proc->p_pid, lle);
1403		*lock_list = lle;
1404	}
1405	instance = &lle->ll_children[lle->ll_count++];
1406	instance->li_lock = lock;
1407	instance->li_line = line;
1408	instance->li_file = file;
1409	if ((flags & LOP_EXCLUSIVE) != 0)
1410		instance->li_flags = LI_EXCLUSIVE;
1411	else
1412		instance->li_flags = 0;
1413	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1414	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1415}
1416
1417void
1418witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1419{
1420	struct lock_instance *instance;
1421	struct lock_class *class;
1422
1423	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1424	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1425		return;
1426	class = LOCK_CLASS(lock);
1427	if (witness_watch) {
1428		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1429			panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1430			    class->lc_name, lock->lo_name,
1431			    fixup_filename(file), line);
1432		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1433			panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1434			    class->lc_name, lock->lo_name,
1435			    fixup_filename(file), line);
1436	}
1437	instance = find_instance(curthread->td_sleeplocks, lock);
1438	if (instance == NULL)
1439		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1440		    class->lc_name, lock->lo_name,
1441		    fixup_filename(file), line);
1442	if (witness_watch) {
1443		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1444			panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1445			    class->lc_name, lock->lo_name,
1446			    fixup_filename(file), line);
1447		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1448			panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1449			    class->lc_name, lock->lo_name,
1450			    instance->li_flags & LI_RECURSEMASK,
1451			    fixup_filename(file), line);
1452	}
1453	instance->li_flags |= LI_EXCLUSIVE;
1454}
1455
1456void
1457witness_downgrade(struct lock_object *lock, int flags, const char *file,
1458    int line)
1459{
1460	struct lock_instance *instance;
1461	struct lock_class *class;
1462
1463	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1464	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1465		return;
1466	class = LOCK_CLASS(lock);
1467	if (witness_watch) {
1468		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1469		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1470			    class->lc_name, lock->lo_name,
1471			    fixup_filename(file), line);
1472		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1473			panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1474			    class->lc_name, lock->lo_name,
1475			    fixup_filename(file), line);
1476	}
1477	instance = find_instance(curthread->td_sleeplocks, lock);
1478	if (instance == NULL)
1479		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1480		    class->lc_name, lock->lo_name,
1481		    fixup_filename(file), line);
1482	if (witness_watch) {
1483		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1484			panic("downgrade of shared lock (%s) %s @ %s:%d",
1485			    class->lc_name, lock->lo_name,
1486			    fixup_filename(file), line);
1487		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1488			panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1489			    class->lc_name, lock->lo_name,
1490			    instance->li_flags & LI_RECURSEMASK,
1491			    fixup_filename(file), line);
1492	}
1493	instance->li_flags &= ~LI_EXCLUSIVE;
1494}
1495
1496void
1497witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1498{
1499	struct lock_list_entry **lock_list, *lle;
1500	struct lock_instance *instance;
1501	struct lock_class *class;
1502	struct thread *td;
1503	register_t s;
1504	int i, j;
1505
1506	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1507		return;
1508	td = curthread;
1509	class = LOCK_CLASS(lock);
1510
1511	/* Find lock instance associated with this lock. */
1512	if (class->lc_flags & LC_SLEEPLOCK)
1513		lock_list = &td->td_sleeplocks;
1514	else
1515		lock_list = PCPU_PTR(spinlocks);
1516	lle = *lock_list;
1517	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1518		for (i = 0; i < (*lock_list)->ll_count; i++) {
1519			instance = &(*lock_list)->ll_children[i];
1520			if (instance->li_lock == lock)
1521				goto found;
1522		}
1523
1524	/*
1525	 * When disabling WITNESS through witness_watch we could end up in
1526	 * having registered locks in the td_sleeplocks queue.
1527	 * We have to make sure we flush these queues, so just search for
1528	 * eventual register locks and remove them.
1529	 */
1530	if (witness_watch > 0)
1531		panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1532		    lock->lo_name, fixup_filename(file), line);
1533	else
1534		return;
1535found:
1536
1537	/* First, check for shared/exclusive mismatches. */
1538	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1539	    (flags & LOP_EXCLUSIVE) == 0) {
1540		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1541		    lock->lo_name, fixup_filename(file), line);
1542		printf("while exclusively locked from %s:%d\n",
1543		    fixup_filename(instance->li_file), instance->li_line);
1544		panic("excl->ushare");
1545	}
1546	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1547	    (flags & LOP_EXCLUSIVE) != 0) {
1548		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1549		    lock->lo_name, fixup_filename(file), line);
1550		printf("while share locked from %s:%d\n",
1551		    fixup_filename(instance->li_file),
1552		    instance->li_line);
1553		panic("share->uexcl");
1554	}
1555	/* If we are recursed, unrecurse. */
1556	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1557		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1558		    td->td_proc->p_pid, instance->li_lock->lo_name,
1559		    instance->li_flags);
1560		instance->li_flags--;
1561		return;
1562	}
1563	/* The lock is now being dropped, check for NORELEASE flag */
1564	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1565		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1566		    lock->lo_name, fixup_filename(file), line);
1567		panic("lock marked norelease");
1568	}
1569
1570	/* Otherwise, remove this item from the list. */
1571	s = intr_disable();
1572	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1573	    td->td_proc->p_pid, instance->li_lock->lo_name,
1574	    (*lock_list)->ll_count - 1);
1575	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1576		(*lock_list)->ll_children[j] =
1577		    (*lock_list)->ll_children[j + 1];
1578	(*lock_list)->ll_count--;
1579	intr_restore(s);
1580
1581	/*
1582	 * In order to reduce contention on w_mtx, we want to keep always an
1583	 * head object into lists so that frequent allocation from the
1584	 * free witness pool (and subsequent locking) is avoided.
1585	 * In order to maintain the current code simple, when the head
1586	 * object is totally unloaded it means also that we do not have
1587	 * further objects in the list, so the list ownership needs to be
1588	 * hand over to another object if the current head needs to be freed.
1589	 */
1590	if ((*lock_list)->ll_count == 0) {
1591		if (*lock_list == lle) {
1592			if (lle->ll_next == NULL)
1593				return;
1594		} else
1595			lle = *lock_list;
1596		*lock_list = lle->ll_next;
1597		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1598		    td->td_proc->p_pid, lle);
1599		witness_lock_list_free(lle);
1600	}
1601}
1602
1603void
1604witness_thread_exit(struct thread *td)
1605{
1606	struct lock_list_entry *lle;
1607	int i, n;
1608
1609	lle = td->td_sleeplocks;
1610	if (lle == NULL || panicstr != NULL)
1611		return;
1612	if (lle->ll_count != 0) {
1613		for (n = 0; lle != NULL; lle = lle->ll_next)
1614			for (i = lle->ll_count - 1; i >= 0; i--) {
1615				if (n == 0)
1616		printf("Thread %p exiting with the following locks held:\n",
1617					    td);
1618				n++;
1619				witness_list_lock(&lle->ll_children[i], printf);
1620
1621			}
1622		panic("Thread %p cannot exit while holding sleeplocks\n", td);
1623	}
1624	witness_lock_list_free(lle);
1625}
1626
1627/*
1628 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1629 * exempt Giant and sleepable locks from the checks as well.  If any
1630 * non-exempt locks are held, then a supplied message is printed to the
1631 * console along with a list of the offending locks.  If indicated in the
1632 * flags then a failure results in a panic as well.
1633 */
1634int
1635witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1636{
1637	struct lock_list_entry *lock_list, *lle;
1638	struct lock_instance *lock1;
1639	struct thread *td;
1640	va_list ap;
1641	int i, n;
1642
1643	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1644		return (0);
1645	n = 0;
1646	td = curthread;
1647	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1648		for (i = lle->ll_count - 1; i >= 0; i--) {
1649			lock1 = &lle->ll_children[i];
1650			if (lock1->li_lock == lock)
1651				continue;
1652			if (flags & WARN_GIANTOK &&
1653			    lock1->li_lock == &Giant.lock_object)
1654				continue;
1655			if (flags & WARN_SLEEPOK &&
1656			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1657				continue;
1658			if (n == 0) {
1659				va_start(ap, fmt);
1660				vprintf(fmt, ap);
1661				va_end(ap);
1662				printf(" with the following");
1663				if (flags & WARN_SLEEPOK)
1664					printf(" non-sleepable");
1665				printf(" locks held:\n");
1666			}
1667			n++;
1668			witness_list_lock(lock1, printf);
1669		}
1670
1671	/*
1672	 * Pin the thread in order to avoid problems with thread migration.
1673	 * Once that all verifies are passed about spinlocks ownership,
1674	 * the thread is in a safe path and it can be unpinned.
1675	 */
1676	sched_pin();
1677	lock_list = PCPU_GET(spinlocks);
1678	if (lock_list != NULL && lock_list->ll_count != 0) {
1679		sched_unpin();
1680
1681		/*
1682		 * We should only have one spinlock and as long as
1683		 * the flags cannot match for this locks class,
1684		 * check if the first spinlock is the one curthread
1685		 * should hold.
1686		 */
1687		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1688		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1689		    lock1->li_lock == lock && n == 0)
1690			return (0);
1691
1692		va_start(ap, fmt);
1693		vprintf(fmt, ap);
1694		va_end(ap);
1695		printf(" with the following");
1696		if (flags & WARN_SLEEPOK)
1697			printf(" non-sleepable");
1698		printf(" locks held:\n");
1699		n += witness_list_locks(&lock_list, printf);
1700	} else
1701		sched_unpin();
1702	if (flags & WARN_PANIC && n)
1703		panic("%s", __func__);
1704	else
1705		witness_debugger(n);
1706	return (n);
1707}
1708
1709const char *
1710witness_file(struct lock_object *lock)
1711{
1712	struct witness *w;
1713
1714	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1715		return ("?");
1716	w = lock->lo_witness;
1717	return (w->w_file);
1718}
1719
1720int
1721witness_line(struct lock_object *lock)
1722{
1723	struct witness *w;
1724
1725	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1726		return (0);
1727	w = lock->lo_witness;
1728	return (w->w_line);
1729}
1730
1731static struct witness *
1732enroll(const char *description, struct lock_class *lock_class)
1733{
1734	struct witness *w;
1735	struct witness_list *typelist;
1736
1737	MPASS(description != NULL);
1738
1739	if (witness_watch == -1 || panicstr != NULL)
1740		return (NULL);
1741	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1742		if (witness_skipspin)
1743			return (NULL);
1744		else
1745			typelist = &w_spin;
1746	} else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1747		typelist = &w_sleep;
1748	else
1749		panic("lock class %s is not sleep or spin",
1750		    lock_class->lc_name);
1751
1752	mtx_lock_spin(&w_mtx);
1753	w = witness_hash_get(description);
1754	if (w)
1755		goto found;
1756	if ((w = witness_get()) == NULL)
1757		return (NULL);
1758	MPASS(strlen(description) < MAX_W_NAME);
1759	strcpy(w->w_name, description);
1760	w->w_class = lock_class;
1761	w->w_refcount = 1;
1762	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1763	if (lock_class->lc_flags & LC_SPINLOCK) {
1764		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1765		w_spin_cnt++;
1766	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1767		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1768		w_sleep_cnt++;
1769	}
1770
1771	/* Insert new witness into the hash */
1772	witness_hash_put(w);
1773	witness_increment_graph_generation();
1774	mtx_unlock_spin(&w_mtx);
1775	return (w);
1776found:
1777	w->w_refcount++;
1778	mtx_unlock_spin(&w_mtx);
1779	if (lock_class != w->w_class)
1780		panic(
1781			"lock (%s) %s does not match earlier (%s) lock",
1782			description, lock_class->lc_name,
1783			w->w_class->lc_name);
1784	return (w);
1785}
1786
1787static void
1788depart(struct witness *w)
1789{
1790	struct witness_list *list;
1791
1792	MPASS(w->w_refcount == 0);
1793	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1794		list = &w_sleep;
1795		w_sleep_cnt--;
1796	} else {
1797		list = &w_spin;
1798		w_spin_cnt--;
1799	}
1800	/*
1801	 * Set file to NULL as it may point into a loadable module.
1802	 */
1803	w->w_file = NULL;
1804	w->w_line = 0;
1805	witness_increment_graph_generation();
1806}
1807
1808
1809static void
1810adopt(struct witness *parent, struct witness *child)
1811{
1812	int pi, ci, i, j;
1813
1814	if (witness_cold == 0)
1815		mtx_assert(&w_mtx, MA_OWNED);
1816
1817	/* If the relationship is already known, there's no work to be done. */
1818	if (isitmychild(parent, child))
1819		return;
1820
1821	/* When the structure of the graph changes, bump up the generation. */
1822	witness_increment_graph_generation();
1823
1824	/*
1825	 * The hard part ... create the direct relationship, then propagate all
1826	 * indirect relationships.
1827	 */
1828	pi = parent->w_index;
1829	ci = child->w_index;
1830	WITNESS_INDEX_ASSERT(pi);
1831	WITNESS_INDEX_ASSERT(ci);
1832	MPASS(pi != ci);
1833	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1834	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1835
1836	/*
1837	 * If parent was not already an ancestor of child,
1838	 * then we increment the descendant and ancestor counters.
1839	 */
1840	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1841		parent->w_num_descendants++;
1842		child->w_num_ancestors++;
1843	}
1844
1845	/*
1846	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1847	 * an ancestor of 'pi' during this loop.
1848	 */
1849	for (i = 1; i <= w_max_used_index; i++) {
1850		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1851		    (i != pi))
1852			continue;
1853
1854		/* Find each descendant of 'i' and mark it as a descendant. */
1855		for (j = 1; j <= w_max_used_index; j++) {
1856
1857			/*
1858			 * Skip children that are already marked as
1859			 * descendants of 'i'.
1860			 */
1861			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1862				continue;
1863
1864			/*
1865			 * We are only interested in descendants of 'ci'. Note
1866			 * that 'ci' itself is counted as a descendant of 'ci'.
1867			 */
1868			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1869			    (j != ci))
1870				continue;
1871			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1872			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1873			w_data[i].w_num_descendants++;
1874			w_data[j].w_num_ancestors++;
1875
1876			/*
1877			 * Make sure we aren't marking a node as both an
1878			 * ancestor and descendant. We should have caught
1879			 * this as a lock order reversal earlier.
1880			 */
1881			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1882			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1883				printf("witness rmatrix paradox! [%d][%d]=%d "
1884				    "both ancestor and descendant\n",
1885				    i, j, w_rmatrix[i][j]);
1886				kdb_backtrace();
1887				printf("Witness disabled.\n");
1888				witness_watch = -1;
1889			}
1890			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1891			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1892				printf("witness rmatrix paradox! [%d][%d]=%d "
1893				    "both ancestor and descendant\n",
1894				    j, i, w_rmatrix[j][i]);
1895				kdb_backtrace();
1896				printf("Witness disabled.\n");
1897				witness_watch = -1;
1898			}
1899		}
1900	}
1901}
1902
1903static void
1904itismychild(struct witness *parent, struct witness *child)
1905{
1906
1907	MPASS(child != NULL && parent != NULL);
1908	if (witness_cold == 0)
1909		mtx_assert(&w_mtx, MA_OWNED);
1910
1911	if (!witness_lock_type_equal(parent, child)) {
1912		if (witness_cold == 0)
1913			mtx_unlock_spin(&w_mtx);
1914		panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1915		    "the same lock type", __func__, parent->w_name,
1916		    parent->w_class->lc_name, child->w_name,
1917		    child->w_class->lc_name);
1918	}
1919	adopt(parent, child);
1920}
1921
1922/*
1923 * Generic code for the isitmy*() functions. The rmask parameter is the
1924 * expected relationship of w1 to w2.
1925 */
1926static int
1927_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1928{
1929	unsigned char r1, r2;
1930	int i1, i2;
1931
1932	i1 = w1->w_index;
1933	i2 = w2->w_index;
1934	WITNESS_INDEX_ASSERT(i1);
1935	WITNESS_INDEX_ASSERT(i2);
1936	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1937	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1938
1939	/* The flags on one better be the inverse of the flags on the other */
1940	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1941		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1942		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1943		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1944		    "w_rmatrix[%d][%d] == %hhx\n",
1945		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1946		    i2, i1, r2);
1947		kdb_backtrace();
1948		printf("Witness disabled.\n");
1949		witness_watch = -1;
1950	}
1951	return (r1 & rmask);
1952}
1953
1954/*
1955 * Checks if @child is a direct child of @parent.
1956 */
1957static int
1958isitmychild(struct witness *parent, struct witness *child)
1959{
1960
1961	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1962}
1963
1964/*
1965 * Checks if @descendant is a direct or inderect descendant of @ancestor.
1966 */
1967static int
1968isitmydescendant(struct witness *ancestor, struct witness *descendant)
1969{
1970
1971	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1972	    __func__));
1973}
1974
1975#ifdef BLESSING
1976static int
1977blessed(struct witness *w1, struct witness *w2)
1978{
1979	int i;
1980	struct witness_blessed *b;
1981
1982	for (i = 0; i < blessed_count; i++) {
1983		b = &blessed_list[i];
1984		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1985			if (strcmp(w2->w_name, b->b_lock2) == 0)
1986				return (1);
1987			continue;
1988		}
1989		if (strcmp(w1->w_name, b->b_lock2) == 0)
1990			if (strcmp(w2->w_name, b->b_lock1) == 0)
1991				return (1);
1992	}
1993	return (0);
1994}
1995#endif
1996
1997static struct witness *
1998witness_get(void)
1999{
2000	struct witness *w;
2001	int index;
2002
2003	if (witness_cold == 0)
2004		mtx_assert(&w_mtx, MA_OWNED);
2005
2006	if (witness_watch == -1) {
2007		mtx_unlock_spin(&w_mtx);
2008		return (NULL);
2009	}
2010	if (STAILQ_EMPTY(&w_free)) {
2011		witness_watch = -1;
2012		mtx_unlock_spin(&w_mtx);
2013		printf("WITNESS: unable to allocate a new witness object\n");
2014		return (NULL);
2015	}
2016	w = STAILQ_FIRST(&w_free);
2017	STAILQ_REMOVE_HEAD(&w_free, w_list);
2018	w_free_cnt--;
2019	index = w->w_index;
2020	MPASS(index > 0 && index == w_max_used_index+1 &&
2021	    index < WITNESS_COUNT);
2022	bzero(w, sizeof(*w));
2023	w->w_index = index;
2024	if (index > w_max_used_index)
2025		w_max_used_index = index;
2026	return (w);
2027}
2028
2029static void
2030witness_free(struct witness *w)
2031{
2032
2033	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2034	w_free_cnt++;
2035}
2036
2037static struct lock_list_entry *
2038witness_lock_list_get(void)
2039{
2040	struct lock_list_entry *lle;
2041
2042	if (witness_watch == -1)
2043		return (NULL);
2044	mtx_lock_spin(&w_mtx);
2045	lle = w_lock_list_free;
2046	if (lle == NULL) {
2047		witness_watch = -1;
2048		mtx_unlock_spin(&w_mtx);
2049		printf("%s: witness exhausted\n", __func__);
2050		return (NULL);
2051	}
2052	w_lock_list_free = lle->ll_next;
2053	mtx_unlock_spin(&w_mtx);
2054	bzero(lle, sizeof(*lle));
2055	return (lle);
2056}
2057
2058static void
2059witness_lock_list_free(struct lock_list_entry *lle)
2060{
2061
2062	mtx_lock_spin(&w_mtx);
2063	lle->ll_next = w_lock_list_free;
2064	w_lock_list_free = lle;
2065	mtx_unlock_spin(&w_mtx);
2066}
2067
2068static struct lock_instance *
2069find_instance(struct lock_list_entry *list, struct lock_object *lock)
2070{
2071	struct lock_list_entry *lle;
2072	struct lock_instance *instance;
2073	int i;
2074
2075	for (lle = list; lle != NULL; lle = lle->ll_next)
2076		for (i = lle->ll_count - 1; i >= 0; i--) {
2077			instance = &lle->ll_children[i];
2078			if (instance->li_lock == lock)
2079				return (instance);
2080		}
2081	return (NULL);
2082}
2083
2084static void
2085witness_list_lock(struct lock_instance *instance,
2086    int (*prnt)(const char *fmt, ...))
2087{
2088	struct lock_object *lock;
2089
2090	lock = instance->li_lock;
2091	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2092	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2093	if (lock->lo_witness->w_name != lock->lo_name)
2094		prnt(" (%s)", lock->lo_witness->w_name);
2095	prnt(" r = %d (%p) locked @ %s:%d\n",
2096	    instance->li_flags & LI_RECURSEMASK, lock,
2097	    fixup_filename(instance->li_file), instance->li_line);
2098}
2099
2100#ifdef DDB
2101static int
2102witness_thread_has_locks(struct thread *td)
2103{
2104
2105	if (td->td_sleeplocks == NULL)
2106		return (0);
2107	return (td->td_sleeplocks->ll_count != 0);
2108}
2109
2110static int
2111witness_proc_has_locks(struct proc *p)
2112{
2113	struct thread *td;
2114
2115	FOREACH_THREAD_IN_PROC(p, td) {
2116		if (witness_thread_has_locks(td))
2117			return (1);
2118	}
2119	return (0);
2120}
2121#endif
2122
2123int
2124witness_list_locks(struct lock_list_entry **lock_list,
2125    int (*prnt)(const char *fmt, ...))
2126{
2127	struct lock_list_entry *lle;
2128	int i, nheld;
2129
2130	nheld = 0;
2131	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2132		for (i = lle->ll_count - 1; i >= 0; i--) {
2133			witness_list_lock(&lle->ll_children[i], prnt);
2134			nheld++;
2135		}
2136	return (nheld);
2137}
2138
2139/*
2140 * This is a bit risky at best.  We call this function when we have timed
2141 * out acquiring a spin lock, and we assume that the other CPU is stuck
2142 * with this lock held.  So, we go groveling around in the other CPU's
2143 * per-cpu data to try to find the lock instance for this spin lock to
2144 * see when it was last acquired.
2145 */
2146void
2147witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2148    int (*prnt)(const char *fmt, ...))
2149{
2150	struct lock_instance *instance;
2151	struct pcpu *pc;
2152
2153	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2154		return;
2155	pc = pcpu_find(owner->td_oncpu);
2156	instance = find_instance(pc->pc_spinlocks, lock);
2157	if (instance != NULL)
2158		witness_list_lock(instance, prnt);
2159}
2160
2161void
2162witness_save(struct lock_object *lock, const char **filep, int *linep)
2163{
2164	struct lock_list_entry *lock_list;
2165	struct lock_instance *instance;
2166	struct lock_class *class;
2167
2168	/*
2169	 * This function is used independently in locking code to deal with
2170	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2171	 * is gone.
2172	 */
2173	if (SCHEDULER_STOPPED())
2174		return;
2175	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2176	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2177		return;
2178	class = LOCK_CLASS(lock);
2179	if (class->lc_flags & LC_SLEEPLOCK)
2180		lock_list = curthread->td_sleeplocks;
2181	else {
2182		if (witness_skipspin)
2183			return;
2184		lock_list = PCPU_GET(spinlocks);
2185	}
2186	instance = find_instance(lock_list, lock);
2187	if (instance == NULL)
2188		panic("%s: lock (%s) %s not locked", __func__,
2189		    class->lc_name, lock->lo_name);
2190	*filep = instance->li_file;
2191	*linep = instance->li_line;
2192}
2193
2194void
2195witness_restore(struct lock_object *lock, const char *file, int line)
2196{
2197	struct lock_list_entry *lock_list;
2198	struct lock_instance *instance;
2199	struct lock_class *class;
2200
2201	/*
2202	 * This function is used independently in locking code to deal with
2203	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2204	 * is gone.
2205	 */
2206	if (SCHEDULER_STOPPED())
2207		return;
2208	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2209	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2210		return;
2211	class = LOCK_CLASS(lock);
2212	if (class->lc_flags & LC_SLEEPLOCK)
2213		lock_list = curthread->td_sleeplocks;
2214	else {
2215		if (witness_skipspin)
2216			return;
2217		lock_list = PCPU_GET(spinlocks);
2218	}
2219	instance = find_instance(lock_list, lock);
2220	if (instance == NULL)
2221		panic("%s: lock (%s) %s not locked", __func__,
2222		    class->lc_name, lock->lo_name);
2223	lock->lo_witness->w_file = file;
2224	lock->lo_witness->w_line = line;
2225	instance->li_file = file;
2226	instance->li_line = line;
2227}
2228
2229void
2230witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2231{
2232#ifdef INVARIANT_SUPPORT
2233	struct lock_instance *instance;
2234	struct lock_class *class;
2235
2236	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2237		return;
2238	class = LOCK_CLASS(lock);
2239	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2240		instance = find_instance(curthread->td_sleeplocks, lock);
2241	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2242		instance = find_instance(PCPU_GET(spinlocks), lock);
2243	else {
2244		panic("Lock (%s) %s is not sleep or spin!",
2245		    class->lc_name, lock->lo_name);
2246	}
2247	switch (flags) {
2248	case LA_UNLOCKED:
2249		if (instance != NULL)
2250			panic("Lock (%s) %s locked @ %s:%d.",
2251			    class->lc_name, lock->lo_name,
2252			    fixup_filename(file), line);
2253		break;
2254	case LA_LOCKED:
2255	case LA_LOCKED | LA_RECURSED:
2256	case LA_LOCKED | LA_NOTRECURSED:
2257	case LA_SLOCKED:
2258	case LA_SLOCKED | LA_RECURSED:
2259	case LA_SLOCKED | LA_NOTRECURSED:
2260	case LA_XLOCKED:
2261	case LA_XLOCKED | LA_RECURSED:
2262	case LA_XLOCKED | LA_NOTRECURSED:
2263		if (instance == NULL) {
2264			panic("Lock (%s) %s not locked @ %s:%d.",
2265			    class->lc_name, lock->lo_name,
2266			    fixup_filename(file), line);
2267			break;
2268		}
2269		if ((flags & LA_XLOCKED) != 0 &&
2270		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2271			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2272			    class->lc_name, lock->lo_name,
2273			    fixup_filename(file), line);
2274		if ((flags & LA_SLOCKED) != 0 &&
2275		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2276			panic("Lock (%s) %s exclusively locked @ %s:%d.",
2277			    class->lc_name, lock->lo_name,
2278			    fixup_filename(file), line);
2279		if ((flags & LA_RECURSED) != 0 &&
2280		    (instance->li_flags & LI_RECURSEMASK) == 0)
2281			panic("Lock (%s) %s not recursed @ %s:%d.",
2282			    class->lc_name, lock->lo_name,
2283			    fixup_filename(file), line);
2284		if ((flags & LA_NOTRECURSED) != 0 &&
2285		    (instance->li_flags & LI_RECURSEMASK) != 0)
2286			panic("Lock (%s) %s recursed @ %s:%d.",
2287			    class->lc_name, lock->lo_name,
2288			    fixup_filename(file), line);
2289		break;
2290	default:
2291		panic("Invalid lock assertion at %s:%d.",
2292		    fixup_filename(file), line);
2293
2294	}
2295#endif	/* INVARIANT_SUPPORT */
2296}
2297
2298static void
2299witness_setflag(struct lock_object *lock, int flag, int set)
2300{
2301	struct lock_list_entry *lock_list;
2302	struct lock_instance *instance;
2303	struct lock_class *class;
2304
2305	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2306		return;
2307	class = LOCK_CLASS(lock);
2308	if (class->lc_flags & LC_SLEEPLOCK)
2309		lock_list = curthread->td_sleeplocks;
2310	else {
2311		if (witness_skipspin)
2312			return;
2313		lock_list = PCPU_GET(spinlocks);
2314	}
2315	instance = find_instance(lock_list, lock);
2316	if (instance == NULL)
2317		panic("%s: lock (%s) %s not locked", __func__,
2318		    class->lc_name, lock->lo_name);
2319
2320	if (set)
2321		instance->li_flags |= flag;
2322	else
2323		instance->li_flags &= ~flag;
2324}
2325
2326void
2327witness_norelease(struct lock_object *lock)
2328{
2329
2330	witness_setflag(lock, LI_NORELEASE, 1);
2331}
2332
2333void
2334witness_releaseok(struct lock_object *lock)
2335{
2336
2337	witness_setflag(lock, LI_NORELEASE, 0);
2338}
2339
2340#ifdef DDB
2341static void
2342witness_ddb_list(struct thread *td)
2343{
2344
2345	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2346	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2347
2348	if (witness_watch < 1)
2349		return;
2350
2351	witness_list_locks(&td->td_sleeplocks, db_printf);
2352
2353	/*
2354	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2355	 * if td is currently executing on some other CPU and holds spin locks
2356	 * as we won't display those locks.  If we had a MI way of getting
2357	 * the per-cpu data for a given cpu then we could use
2358	 * td->td_oncpu to get the list of spinlocks for this thread
2359	 * and "fix" this.
2360	 *
2361	 * That still wouldn't really fix this unless we locked the scheduler
2362	 * lock or stopped the other CPU to make sure it wasn't changing the
2363	 * list out from under us.  It is probably best to just not try to
2364	 * handle threads on other CPU's for now.
2365	 */
2366	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2367		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2368}
2369
2370DB_SHOW_COMMAND(locks, db_witness_list)
2371{
2372	struct thread *td;
2373
2374	if (have_addr)
2375		td = db_lookup_thread(addr, TRUE);
2376	else
2377		td = kdb_thread;
2378	witness_ddb_list(td);
2379}
2380
2381DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2382{
2383	struct thread *td;
2384	struct proc *p;
2385
2386	/*
2387	 * It would be nice to list only threads and processes that actually
2388	 * held sleep locks, but that information is currently not exported
2389	 * by WITNESS.
2390	 */
2391	FOREACH_PROC_IN_SYSTEM(p) {
2392		if (!witness_proc_has_locks(p))
2393			continue;
2394		FOREACH_THREAD_IN_PROC(p, td) {
2395			if (!witness_thread_has_locks(td))
2396				continue;
2397			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2398			    p->p_comm, td, td->td_tid);
2399			witness_ddb_list(td);
2400			if (db_pager_quit)
2401				return;
2402		}
2403	}
2404}
2405DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2406
2407DB_SHOW_COMMAND(witness, db_witness_display)
2408{
2409
2410	witness_ddb_display(db_printf);
2411}
2412#endif
2413
2414static int
2415sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2416{
2417	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2418	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2419	struct sbuf *sb;
2420	u_int w_rmatrix1, w_rmatrix2;
2421	int error, generation, i, j;
2422
2423	tmp_data1 = NULL;
2424	tmp_data2 = NULL;
2425	tmp_w1 = NULL;
2426	tmp_w2 = NULL;
2427	if (witness_watch < 1) {
2428		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2429		return (error);
2430	}
2431	if (witness_cold) {
2432		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2433		return (error);
2434	}
2435	error = 0;
2436	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2437	if (sb == NULL)
2438		return (ENOMEM);
2439
2440	/* Allocate and init temporary storage space. */
2441	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2442	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2443	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2444	    M_WAITOK | M_ZERO);
2445	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2446	    M_WAITOK | M_ZERO);
2447	stack_zero(&tmp_data1->wlod_stack);
2448	stack_zero(&tmp_data2->wlod_stack);
2449
2450restart:
2451	mtx_lock_spin(&w_mtx);
2452	generation = w_generation;
2453	mtx_unlock_spin(&w_mtx);
2454	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2455	    w_lohash.wloh_count);
2456	for (i = 1; i < w_max_used_index; i++) {
2457		mtx_lock_spin(&w_mtx);
2458		if (generation != w_generation) {
2459			mtx_unlock_spin(&w_mtx);
2460
2461			/* The graph has changed, try again. */
2462			req->oldidx = 0;
2463			sbuf_clear(sb);
2464			goto restart;
2465		}
2466
2467		w1 = &w_data[i];
2468		if (w1->w_reversed == 0) {
2469			mtx_unlock_spin(&w_mtx);
2470			continue;
2471		}
2472
2473		/* Copy w1 locally so we can release the spin lock. */
2474		*tmp_w1 = *w1;
2475		mtx_unlock_spin(&w_mtx);
2476
2477		if (tmp_w1->w_reversed == 0)
2478			continue;
2479		for (j = 1; j < w_max_used_index; j++) {
2480			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2481				continue;
2482
2483			mtx_lock_spin(&w_mtx);
2484			if (generation != w_generation) {
2485				mtx_unlock_spin(&w_mtx);
2486
2487				/* The graph has changed, try again. */
2488				req->oldidx = 0;
2489				sbuf_clear(sb);
2490				goto restart;
2491			}
2492
2493			w2 = &w_data[j];
2494			data1 = witness_lock_order_get(w1, w2);
2495			data2 = witness_lock_order_get(w2, w1);
2496
2497			/*
2498			 * Copy information locally so we can release the
2499			 * spin lock.
2500			 */
2501			*tmp_w2 = *w2;
2502			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2503			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2504
2505			if (data1) {
2506				stack_zero(&tmp_data1->wlod_stack);
2507				stack_copy(&data1->wlod_stack,
2508				    &tmp_data1->wlod_stack);
2509			}
2510			if (data2 && data2 != data1) {
2511				stack_zero(&tmp_data2->wlod_stack);
2512				stack_copy(&data2->wlod_stack,
2513				    &tmp_data2->wlod_stack);
2514			}
2515			mtx_unlock_spin(&w_mtx);
2516
2517			sbuf_printf(sb,
2518	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2519			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2520			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2521#if 0
2522 			sbuf_printf(sb,
2523			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2524 			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2525 			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2526#endif
2527			if (data1) {
2528				sbuf_printf(sb,
2529			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2530				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2531				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2532				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2533				sbuf_printf(sb, "\n");
2534			}
2535			if (data2 && data2 != data1) {
2536				sbuf_printf(sb,
2537			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2538				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2539				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2540				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2541				sbuf_printf(sb, "\n");
2542			}
2543		}
2544	}
2545	mtx_lock_spin(&w_mtx);
2546	if (generation != w_generation) {
2547		mtx_unlock_spin(&w_mtx);
2548
2549		/*
2550		 * The graph changed while we were printing stack data,
2551		 * try again.
2552		 */
2553		req->oldidx = 0;
2554		sbuf_clear(sb);
2555		goto restart;
2556	}
2557	mtx_unlock_spin(&w_mtx);
2558
2559	/* Free temporary storage space. */
2560	free(tmp_data1, M_TEMP);
2561	free(tmp_data2, M_TEMP);
2562	free(tmp_w1, M_TEMP);
2563	free(tmp_w2, M_TEMP);
2564
2565	sbuf_finish(sb);
2566	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2567	sbuf_delete(sb);
2568
2569	return (error);
2570}
2571
2572static int
2573sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2574{
2575	struct witness *w;
2576	struct sbuf *sb;
2577	int error;
2578
2579	if (witness_watch < 1) {
2580		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2581		return (error);
2582	}
2583	if (witness_cold) {
2584		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2585		return (error);
2586	}
2587	error = 0;
2588
2589	error = sysctl_wire_old_buffer(req, 0);
2590	if (error != 0)
2591		return (error);
2592	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2593	if (sb == NULL)
2594		return (ENOMEM);
2595	sbuf_printf(sb, "\n");
2596
2597	mtx_lock_spin(&w_mtx);
2598	STAILQ_FOREACH(w, &w_all, w_list)
2599		w->w_displayed = 0;
2600	STAILQ_FOREACH(w, &w_all, w_list)
2601		witness_add_fullgraph(sb, w);
2602	mtx_unlock_spin(&w_mtx);
2603
2604	/*
2605	 * Close the sbuf and return to userland.
2606	 */
2607	error = sbuf_finish(sb);
2608	sbuf_delete(sb);
2609
2610	return (error);
2611}
2612
2613static int
2614sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2615{
2616	int error, value;
2617
2618	value = witness_watch;
2619	error = sysctl_handle_int(oidp, &value, 0, req);
2620	if (error != 0 || req->newptr == NULL)
2621		return (error);
2622	if (value > 1 || value < -1 ||
2623	    (witness_watch == -1 && value != witness_watch))
2624		return (EINVAL);
2625	witness_watch = value;
2626	return (0);
2627}
2628
2629static void
2630witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2631{
2632	int i;
2633
2634	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2635		return;
2636	w->w_displayed = 1;
2637
2638	WITNESS_INDEX_ASSERT(w->w_index);
2639	for (i = 1; i <= w_max_used_index; i++) {
2640		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2641			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2642			    w_data[i].w_name);
2643			witness_add_fullgraph(sb, &w_data[i]);
2644		}
2645	}
2646}
2647
2648/*
2649 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2650 * interprets the key as a string and reads until the null
2651 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2652 * hash value computed from the key.
2653 */
2654static uint32_t
2655witness_hash_djb2(const uint8_t *key, uint32_t size)
2656{
2657	unsigned int hash = 5381;
2658	int i;
2659
2660	/* hash = hash * 33 + key[i] */
2661	if (size)
2662		for (i = 0; i < size; i++)
2663			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2664	else
2665		for (i = 0; key[i] != 0; i++)
2666			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2667
2668	return (hash);
2669}
2670
2671
2672/*
2673 * Initializes the two witness hash tables. Called exactly once from
2674 * witness_initialize().
2675 */
2676static void
2677witness_init_hash_tables(void)
2678{
2679	int i;
2680
2681	MPASS(witness_cold);
2682
2683	/* Initialize the hash tables. */
2684	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2685		w_hash.wh_array[i] = NULL;
2686
2687	w_hash.wh_size = WITNESS_HASH_SIZE;
2688	w_hash.wh_count = 0;
2689
2690	/* Initialize the lock order data hash. */
2691	w_lofree = NULL;
2692	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2693		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2694		w_lodata[i].wlod_next = w_lofree;
2695		w_lofree = &w_lodata[i];
2696	}
2697	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2698	w_lohash.wloh_count = 0;
2699	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2700		w_lohash.wloh_array[i] = NULL;
2701}
2702
2703static struct witness *
2704witness_hash_get(const char *key)
2705{
2706	struct witness *w;
2707	uint32_t hash;
2708
2709	MPASS(key != NULL);
2710	if (witness_cold == 0)
2711		mtx_assert(&w_mtx, MA_OWNED);
2712	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2713	w = w_hash.wh_array[hash];
2714	while (w != NULL) {
2715		if (strcmp(w->w_name, key) == 0)
2716			goto out;
2717		w = w->w_hash_next;
2718	}
2719
2720out:
2721	return (w);
2722}
2723
2724static void
2725witness_hash_put(struct witness *w)
2726{
2727	uint32_t hash;
2728
2729	MPASS(w != NULL);
2730	MPASS(w->w_name != NULL);
2731	if (witness_cold == 0)
2732		mtx_assert(&w_mtx, MA_OWNED);
2733	KASSERT(witness_hash_get(w->w_name) == NULL,
2734	    ("%s: trying to add a hash entry that already exists!", __func__));
2735	KASSERT(w->w_hash_next == NULL,
2736	    ("%s: w->w_hash_next != NULL", __func__));
2737
2738	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2739	w->w_hash_next = w_hash.wh_array[hash];
2740	w_hash.wh_array[hash] = w;
2741	w_hash.wh_count++;
2742}
2743
2744
2745static struct witness_lock_order_data *
2746witness_lock_order_get(struct witness *parent, struct witness *child)
2747{
2748	struct witness_lock_order_data *data = NULL;
2749	struct witness_lock_order_key key;
2750	unsigned int hash;
2751
2752	MPASS(parent != NULL && child != NULL);
2753	key.from = parent->w_index;
2754	key.to = child->w_index;
2755	WITNESS_INDEX_ASSERT(key.from);
2756	WITNESS_INDEX_ASSERT(key.to);
2757	if ((w_rmatrix[parent->w_index][child->w_index]
2758	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2759		goto out;
2760
2761	hash = witness_hash_djb2((const char*)&key,
2762	    sizeof(key)) % w_lohash.wloh_size;
2763	data = w_lohash.wloh_array[hash];
2764	while (data != NULL) {
2765		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2766			break;
2767		data = data->wlod_next;
2768	}
2769
2770out:
2771	return (data);
2772}
2773
2774/*
2775 * Verify that parent and child have a known relationship, are not the same,
2776 * and child is actually a child of parent.  This is done without w_mtx
2777 * to avoid contention in the common case.
2778 */
2779static int
2780witness_lock_order_check(struct witness *parent, struct witness *child)
2781{
2782
2783	if (parent != child &&
2784	    w_rmatrix[parent->w_index][child->w_index]
2785	    & WITNESS_LOCK_ORDER_KNOWN &&
2786	    isitmychild(parent, child))
2787		return (1);
2788
2789	return (0);
2790}
2791
2792static int
2793witness_lock_order_add(struct witness *parent, struct witness *child)
2794{
2795	struct witness_lock_order_data *data = NULL;
2796	struct witness_lock_order_key key;
2797	unsigned int hash;
2798
2799	MPASS(parent != NULL && child != NULL);
2800	key.from = parent->w_index;
2801	key.to = child->w_index;
2802	WITNESS_INDEX_ASSERT(key.from);
2803	WITNESS_INDEX_ASSERT(key.to);
2804	if (w_rmatrix[parent->w_index][child->w_index]
2805	    & WITNESS_LOCK_ORDER_KNOWN)
2806		return (1);
2807
2808	hash = witness_hash_djb2((const char*)&key,
2809	    sizeof(key)) % w_lohash.wloh_size;
2810	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2811	data = w_lofree;
2812	if (data == NULL)
2813		return (0);
2814	w_lofree = data->wlod_next;
2815	data->wlod_next = w_lohash.wloh_array[hash];
2816	data->wlod_key = key;
2817	w_lohash.wloh_array[hash] = data;
2818	w_lohash.wloh_count++;
2819	stack_zero(&data->wlod_stack);
2820	stack_save(&data->wlod_stack);
2821	return (1);
2822}
2823
2824/* Call this whenver the structure of the witness graph changes. */
2825static void
2826witness_increment_graph_generation(void)
2827{
2828
2829	if (witness_cold == 0)
2830		mtx_assert(&w_mtx, MA_OWNED);
2831	w_generation++;
2832}
2833
2834#ifdef KDB
2835static void
2836_witness_debugger(int cond, const char *msg)
2837{
2838
2839	if (witness_trace && cond)
2840		kdb_backtrace();
2841	if (witness_kdb && cond)
2842		kdb_enter(KDB_WHY_WITNESS, msg);
2843}
2844#endif
2845