subr_witness.c revision 291217
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 *    promote products derived from this software without specific prior
17 *    written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier.  Originally implemented for
37 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 *	Main Entry: witness
43 *	Pronunciation: 'wit-n&s
44 *	Function: noun
45 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46 *	    testimony, witness, from 2wit
47 *	Date: before 12th century
48 *	1 : attestation of a fact or event : TESTIMONY
49 *	2 : one that gives evidence; specifically : one who testifies in
50 *	    a cause or before a judicial tribunal
51 *	3 : one asked to be present at a transaction so as to be able to
52 *	    testify to its having taken place
53 *	4 : one who has personal knowledge of something
54 *	5 a : something serving as evidence or proof : SIGN
55 *	  b : public affirmation by word or example of usually
56 *	      religious faith or conviction <the heroic witness to divine
57 *	      life -- Pilot>
58 *	6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes.  Stated another way,
65 *    no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl().  Basically, when a thread sleeps, it must release
71 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious.  Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock.  The second
79 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute.  Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 291217 2015-11-23 18:45:35Z markj $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/syslog.h>
110#include <sys/systm.h>
111
112#ifdef DDB
113#include <ddb/ddb.h>
114#endif
115
116#include <machine/stdarg.h>
117
118#if !defined(DDB) && !defined(STACK)
119#error "DDB or STACK options are required for WITNESS"
120#endif
121
122/* Note that these traces do not work with KTR_ALQ. */
123#if 0
124#define	KTR_WITNESS	KTR_SUBSYS
125#else
126#define	KTR_WITNESS	0
127#endif
128
129#define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
130#define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
131#define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
132
133/* Define this to check for blessed mutexes */
134#undef BLESSING
135
136#ifndef WITNESS_COUNT
137#define	WITNESS_COUNT 		1536
138#endif
139#define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
140#define	WITNESS_PENDLIST	(1024 + MAXCPU)
141
142/* Allocate 256 KB of stack data space */
143#define	WITNESS_LO_DATA_COUNT	2048
144
145/* Prime, gives load factor of ~2 at full load */
146#define	WITNESS_LO_HASH_SIZE	1021
147
148/*
149 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
150 * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
151 * probably be safe for the most part, but it's still a SWAG.
152 */
153#define	LOCK_NCHILDREN	5
154#define	LOCK_CHILDCOUNT	2048
155
156#define	MAX_W_NAME	64
157
158#define	FULLGRAPH_SBUF_SIZE	512
159
160/*
161 * These flags go in the witness relationship matrix and describe the
162 * relationship between any two struct witness objects.
163 */
164#define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
165#define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
166#define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
167#define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
168#define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
169#define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
170#define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
171#define	WITNESS_RELATED_MASK						\
172	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173#define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
174					  * observed. */
175#define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
176#define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
177#define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
178
179/* Descendant to ancestor flags */
180#define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
181
182/* Ancestor to descendant flags */
183#define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
184
185#define	WITNESS_INDEX_ASSERT(i)						\
186	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
187
188static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
189
190/*
191 * Lock instances.  A lock instance is the data associated with a lock while
192 * it is held by witness.  For example, a lock instance will hold the
193 * recursion count of a lock.  Lock instances are held in lists.  Spin locks
194 * are held in a per-cpu list while sleep locks are held in per-thread list.
195 */
196struct lock_instance {
197	struct lock_object	*li_lock;
198	const char		*li_file;
199	int			li_line;
200	u_int			li_flags;
201};
202
203/*
204 * A simple list type used to build the list of locks held by a thread
205 * or CPU.  We can't simply embed the list in struct lock_object since a
206 * lock may be held by more than one thread if it is a shared lock.  Locks
207 * are added to the head of the list, so we fill up each list entry from
208 * "the back" logically.  To ease some of the arithmetic, we actually fill
209 * in each list entry the normal way (children[0] then children[1], etc.) but
210 * when we traverse the list we read children[count-1] as the first entry
211 * down to children[0] as the final entry.
212 */
213struct lock_list_entry {
214	struct lock_list_entry	*ll_next;
215	struct lock_instance	ll_children[LOCK_NCHILDREN];
216	u_int			ll_count;
217};
218
219/*
220 * The main witness structure. One of these per named lock type in the system
221 * (for example, "vnode interlock").
222 */
223struct witness {
224	char  			w_name[MAX_W_NAME];
225	uint32_t 		w_index;  /* Index in the relationship matrix */
226	struct lock_class	*w_class;
227	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
228	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
229	struct witness		*w_hash_next; /* Linked list in hash buckets. */
230	const char		*w_file; /* File where last acquired */
231	uint32_t 		w_line; /* Line where last acquired */
232	uint32_t 		w_refcount;
233	uint16_t 		w_num_ancestors; /* direct/indirect
234						  * ancestor count */
235	uint16_t 		w_num_descendants; /* direct/indirect
236						    * descendant count */
237	int16_t 		w_ddb_level;
238	unsigned		w_displayed:1;
239	unsigned		w_reversed:1;
240};
241
242STAILQ_HEAD(witness_list, witness);
243
244/*
245 * The witness hash table. Keys are witness names (const char *), elements are
246 * witness objects (struct witness *).
247 */
248struct witness_hash {
249	struct witness	*wh_array[WITNESS_HASH_SIZE];
250	uint32_t	wh_size;
251	uint32_t	wh_count;
252};
253
254/*
255 * Key type for the lock order data hash table.
256 */
257struct witness_lock_order_key {
258	uint16_t	from;
259	uint16_t	to;
260};
261
262struct witness_lock_order_data {
263	struct stack			wlod_stack;
264	struct witness_lock_order_key	wlod_key;
265	struct witness_lock_order_data	*wlod_next;
266};
267
268/*
269 * The witness lock order data hash table. Keys are witness index tuples
270 * (struct witness_lock_order_key), elements are lock order data objects
271 * (struct witness_lock_order_data).
272 */
273struct witness_lock_order_hash {
274	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
275	u_int	wloh_size;
276	u_int	wloh_count;
277};
278
279#ifdef BLESSING
280struct witness_blessed {
281	const char	*b_lock1;
282	const char	*b_lock2;
283};
284#endif
285
286struct witness_pendhelp {
287	const char		*wh_type;
288	struct lock_object	*wh_lock;
289};
290
291struct witness_order_list_entry {
292	const char		*w_name;
293	struct lock_class	*w_class;
294};
295
296/*
297 * Returns 0 if one of the locks is a spin lock and the other is not.
298 * Returns 1 otherwise.
299 */
300static __inline int
301witness_lock_type_equal(struct witness *w1, struct witness *w2)
302{
303
304	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
305		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
306}
307
308static __inline int
309witness_lock_order_key_equal(const struct witness_lock_order_key *a,
310    const struct witness_lock_order_key *b)
311{
312
313	return (a->from == b->from && a->to == b->to);
314}
315
316static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
317		    const char *fname);
318static void	adopt(struct witness *parent, struct witness *child);
319#ifdef BLESSING
320static int	blessed(struct witness *, struct witness *);
321#endif
322static void	depart(struct witness *w);
323static struct witness	*enroll(const char *description,
324			    struct lock_class *lock_class);
325static struct lock_instance	*find_instance(struct lock_list_entry *list,
326				    const struct lock_object *lock);
327static int	isitmychild(struct witness *parent, struct witness *child);
328static int	isitmydescendant(struct witness *parent, struct witness *child);
329static void	itismychild(struct witness *parent, struct witness *child);
330static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
331static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
332static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
333static int	sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS);
334static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
335#ifdef DDB
336static void	witness_ddb_compute_levels(void);
337static void	witness_ddb_display(int(*)(const char *fmt, ...));
338static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
339		    struct witness *, int indent);
340static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
341		    struct witness_list *list);
342static void	witness_ddb_level_descendants(struct witness *parent, int l);
343static void	witness_ddb_list(struct thread *td);
344#endif
345static void	witness_debugger(int cond, const char *msg);
346static void	witness_free(struct witness *m);
347static struct witness	*witness_get(void);
348static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
349static struct witness	*witness_hash_get(const char *key);
350static void	witness_hash_put(struct witness *w);
351static void	witness_init_hash_tables(void);
352static void	witness_increment_graph_generation(void);
353static void	witness_lock_list_free(struct lock_list_entry *lle);
354static struct lock_list_entry	*witness_lock_list_get(void);
355static int	witness_lock_order_add(struct witness *parent,
356		    struct witness *child);
357static int	witness_lock_order_check(struct witness *parent,
358		    struct witness *child);
359static struct witness_lock_order_data	*witness_lock_order_get(
360					    struct witness *parent,
361					    struct witness *child);
362static void	witness_list_lock(struct lock_instance *instance,
363		    int (*prnt)(const char *fmt, ...));
364static int	witness_output(const char *fmt, ...) __printflike(1, 2);
365static int	witness_voutput(const char *fmt, va_list ap) __printflike(1, 0);
366static void	witness_setflag(struct lock_object *lock, int flag, int set);
367
368static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
369    "Witness Locking");
370
371/*
372 * If set to 0, lock order checking is disabled.  If set to -1,
373 * witness is completely disabled.  Otherwise witness performs full
374 * lock order checking for all locks.  At runtime, lock order checking
375 * may be toggled.  However, witness cannot be reenabled once it is
376 * completely disabled.
377 */
378static int witness_watch = 1;
379SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RWTUN | CTLTYPE_INT, NULL, 0,
380    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
381
382#ifdef KDB
383/*
384 * When KDB is enabled and witness_kdb is 1, it will cause the system
385 * to drop into kdebug() when:
386 *	- a lock hierarchy violation occurs
387 *	- locks are held when going to sleep.
388 */
389#ifdef WITNESS_KDB
390int	witness_kdb = 1;
391#else
392int	witness_kdb = 0;
393#endif
394SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
395#endif /* KDB */
396
397#if defined(DDB) || defined(KDB)
398/*
399 * When DDB or KDB is enabled and witness_trace is 1, it will cause the system
400 * to print a stack trace:
401 *	- a lock hierarchy violation occurs
402 *	- locks are held when going to sleep.
403 */
404int	witness_trace = 1;
405SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
406#endif /* DDB || KDB */
407
408#ifdef WITNESS_SKIPSPIN
409int	witness_skipspin = 1;
410#else
411int	witness_skipspin = 0;
412#endif
413SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
414
415int badstack_sbuf_size;
416
417int witness_count = WITNESS_COUNT;
418SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
419    &witness_count, 0, "");
420
421/*
422 * Output channel for witness messages.  By default we print to the console.
423 */
424enum witness_channel {
425	WITNESS_CONSOLE,
426	WITNESS_LOG,
427	WITNESS_NONE,
428};
429
430static enum witness_channel witness_channel = WITNESS_CONSOLE;
431SYSCTL_PROC(_debug_witness, OID_AUTO, output_channel, CTLTYPE_STRING |
432    CTLFLAG_RWTUN, NULL, 0, sysctl_debug_witness_channel, "A",
433    "Output channel for warnings");
434
435/*
436 * Call this to print out the relations between locks.
437 */
438SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
439    NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
440
441/*
442 * Call this to print out the witness faulty stacks.
443 */
444SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
445    NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
446
447static struct mtx w_mtx;
448
449/* w_list */
450static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
451static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
452
453/* w_typelist */
454static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
455static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
456
457/* lock list */
458static struct lock_list_entry *w_lock_list_free = NULL;
459static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
460static u_int pending_cnt;
461
462static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
463SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
464SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
465SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
466    "");
467
468static struct witness *w_data;
469static uint8_t **w_rmatrix;
470static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
471static struct witness_hash w_hash;	/* The witness hash table. */
472
473/* The lock order data hash */
474static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
475static struct witness_lock_order_data *w_lofree = NULL;
476static struct witness_lock_order_hash w_lohash;
477static int w_max_used_index = 0;
478static unsigned int w_generation = 0;
479static const char w_notrunning[] = "Witness not running\n";
480static const char w_stillcold[] = "Witness is still cold\n";
481
482
483static struct witness_order_list_entry order_lists[] = {
484	/*
485	 * sx locks
486	 */
487	{ "proctree", &lock_class_sx },
488	{ "allproc", &lock_class_sx },
489	{ "allprison", &lock_class_sx },
490	{ NULL, NULL },
491	/*
492	 * Various mutexes
493	 */
494	{ "Giant", &lock_class_mtx_sleep },
495	{ "pipe mutex", &lock_class_mtx_sleep },
496	{ "sigio lock", &lock_class_mtx_sleep },
497	{ "process group", &lock_class_mtx_sleep },
498	{ "process lock", &lock_class_mtx_sleep },
499	{ "session", &lock_class_mtx_sleep },
500	{ "uidinfo hash", &lock_class_rw },
501#ifdef	HWPMC_HOOKS
502	{ "pmc-sleep", &lock_class_mtx_sleep },
503#endif
504	{ "time lock", &lock_class_mtx_sleep },
505	{ NULL, NULL },
506	/*
507	 * umtx
508	 */
509	{ "umtx lock", &lock_class_mtx_sleep },
510	{ NULL, NULL },
511	/*
512	 * Sockets
513	 */
514	{ "accept", &lock_class_mtx_sleep },
515	{ "so_snd", &lock_class_mtx_sleep },
516	{ "so_rcv", &lock_class_mtx_sleep },
517	{ "sellck", &lock_class_mtx_sleep },
518	{ NULL, NULL },
519	/*
520	 * Routing
521	 */
522	{ "so_rcv", &lock_class_mtx_sleep },
523	{ "radix node head", &lock_class_rw },
524	{ "rtentry", &lock_class_mtx_sleep },
525	{ "ifaddr", &lock_class_mtx_sleep },
526	{ NULL, NULL },
527	/*
528	 * IPv4 multicast:
529	 * protocol locks before interface locks, after UDP locks.
530	 */
531	{ "udpinp", &lock_class_rw },
532	{ "in_multi_mtx", &lock_class_mtx_sleep },
533	{ "igmp_mtx", &lock_class_mtx_sleep },
534	{ "if_addr_lock", &lock_class_rw },
535	{ NULL, NULL },
536	/*
537	 * IPv6 multicast:
538	 * protocol locks before interface locks, after UDP locks.
539	 */
540	{ "udpinp", &lock_class_rw },
541	{ "in6_multi_mtx", &lock_class_mtx_sleep },
542	{ "mld_mtx", &lock_class_mtx_sleep },
543	{ "if_addr_lock", &lock_class_rw },
544	{ NULL, NULL },
545	/*
546	 * UNIX Domain Sockets
547	 */
548	{ "unp_link_rwlock", &lock_class_rw },
549	{ "unp_list_lock", &lock_class_mtx_sleep },
550	{ "unp", &lock_class_mtx_sleep },
551	{ "so_snd", &lock_class_mtx_sleep },
552	{ NULL, NULL },
553	/*
554	 * UDP/IP
555	 */
556	{ "udp", &lock_class_rw },
557	{ "udpinp", &lock_class_rw },
558	{ "so_snd", &lock_class_mtx_sleep },
559	{ NULL, NULL },
560	/*
561	 * TCP/IP
562	 */
563	{ "tcp", &lock_class_rw },
564	{ "tcpinp", &lock_class_rw },
565	{ "so_snd", &lock_class_mtx_sleep },
566	{ NULL, NULL },
567	/*
568	 * BPF
569	 */
570	{ "bpf global lock", &lock_class_mtx_sleep },
571	{ "bpf interface lock", &lock_class_rw },
572	{ "bpf cdev lock", &lock_class_mtx_sleep },
573	{ NULL, NULL },
574	/*
575	 * NFS server
576	 */
577	{ "nfsd_mtx", &lock_class_mtx_sleep },
578	{ "so_snd", &lock_class_mtx_sleep },
579	{ NULL, NULL },
580
581	/*
582	 * IEEE 802.11
583	 */
584	{ "802.11 com lock", &lock_class_mtx_sleep},
585	{ NULL, NULL },
586	/*
587	 * Network drivers
588	 */
589	{ "network driver", &lock_class_mtx_sleep},
590	{ NULL, NULL },
591
592	/*
593	 * Netgraph
594	 */
595	{ "ng_node", &lock_class_mtx_sleep },
596	{ "ng_worklist", &lock_class_mtx_sleep },
597	{ NULL, NULL },
598	/*
599	 * CDEV
600	 */
601	{ "vm map (system)", &lock_class_mtx_sleep },
602	{ "vm page queue", &lock_class_mtx_sleep },
603	{ "vnode interlock", &lock_class_mtx_sleep },
604	{ "cdev", &lock_class_mtx_sleep },
605	{ NULL, NULL },
606	/*
607	 * VM
608	 */
609	{ "vm map (user)", &lock_class_sx },
610	{ "vm object", &lock_class_rw },
611	{ "vm page", &lock_class_mtx_sleep },
612	{ "vm page queue", &lock_class_mtx_sleep },
613	{ "pmap pv global", &lock_class_rw },
614	{ "pmap", &lock_class_mtx_sleep },
615	{ "pmap pv list", &lock_class_rw },
616	{ "vm page free queue", &lock_class_mtx_sleep },
617	{ NULL, NULL },
618	/*
619	 * kqueue/VFS interaction
620	 */
621	{ "kqueue", &lock_class_mtx_sleep },
622	{ "struct mount mtx", &lock_class_mtx_sleep },
623	{ "vnode interlock", &lock_class_mtx_sleep },
624	{ NULL, NULL },
625	/*
626	 * ZFS locking
627	 */
628	{ "dn->dn_mtx", &lock_class_sx },
629	{ "dr->dt.di.dr_mtx", &lock_class_sx },
630	{ "db->db_mtx", &lock_class_sx },
631	{ NULL, NULL },
632	/*
633	 * spin locks
634	 */
635#ifdef SMP
636	{ "ap boot", &lock_class_mtx_spin },
637#endif
638	{ "rm.mutex_mtx", &lock_class_mtx_spin },
639	{ "sio", &lock_class_mtx_spin },
640	{ "scrlock", &lock_class_mtx_spin },
641#ifdef __i386__
642	{ "cy", &lock_class_mtx_spin },
643#endif
644#ifdef __sparc64__
645	{ "pcib_mtx", &lock_class_mtx_spin },
646	{ "rtc_mtx", &lock_class_mtx_spin },
647#endif
648	{ "scc_hwmtx", &lock_class_mtx_spin },
649	{ "uart_hwmtx", &lock_class_mtx_spin },
650	{ "fast_taskqueue", &lock_class_mtx_spin },
651	{ "intr table", &lock_class_mtx_spin },
652#ifdef	HWPMC_HOOKS
653	{ "pmc-per-proc", &lock_class_mtx_spin },
654#endif
655	{ "process slock", &lock_class_mtx_spin },
656	{ "sleepq chain", &lock_class_mtx_spin },
657	{ "rm_spinlock", &lock_class_mtx_spin },
658	{ "turnstile chain", &lock_class_mtx_spin },
659	{ "turnstile lock", &lock_class_mtx_spin },
660	{ "sched lock", &lock_class_mtx_spin },
661	{ "td_contested", &lock_class_mtx_spin },
662	{ "callout", &lock_class_mtx_spin },
663	{ "entropy harvest mutex", &lock_class_mtx_spin },
664	{ "syscons video lock", &lock_class_mtx_spin },
665#ifdef SMP
666	{ "smp rendezvous", &lock_class_mtx_spin },
667#endif
668#ifdef __powerpc__
669	{ "tlb0", &lock_class_mtx_spin },
670#endif
671	/*
672	 * leaf locks
673	 */
674	{ "intrcnt", &lock_class_mtx_spin },
675	{ "icu", &lock_class_mtx_spin },
676#if defined(SMP) && defined(__sparc64__)
677	{ "ipi", &lock_class_mtx_spin },
678#endif
679#ifdef __i386__
680	{ "allpmaps", &lock_class_mtx_spin },
681	{ "descriptor tables", &lock_class_mtx_spin },
682#endif
683	{ "clk", &lock_class_mtx_spin },
684	{ "cpuset", &lock_class_mtx_spin },
685	{ "mprof lock", &lock_class_mtx_spin },
686	{ "zombie lock", &lock_class_mtx_spin },
687	{ "ALD Queue", &lock_class_mtx_spin },
688#if defined(__i386__) || defined(__amd64__)
689	{ "pcicfg", &lock_class_mtx_spin },
690	{ "NDIS thread lock", &lock_class_mtx_spin },
691#endif
692	{ "tw_osl_io_lock", &lock_class_mtx_spin },
693	{ "tw_osl_q_lock", &lock_class_mtx_spin },
694	{ "tw_cl_io_lock", &lock_class_mtx_spin },
695	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
696	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
697#ifdef	HWPMC_HOOKS
698	{ "pmc-leaf", &lock_class_mtx_spin },
699#endif
700	{ "blocked lock", &lock_class_mtx_spin },
701	{ NULL, NULL },
702	{ NULL, NULL }
703};
704
705#ifdef BLESSING
706/*
707 * Pairs of locks which have been blessed
708 * Don't complain about order problems with blessed locks
709 */
710static struct witness_blessed blessed_list[] = {
711};
712static int blessed_count =
713	sizeof(blessed_list) / sizeof(struct witness_blessed);
714#endif
715
716/*
717 * This global is set to 0 once it becomes safe to use the witness code.
718 */
719static int witness_cold = 1;
720
721/*
722 * This global is set to 1 once the static lock orders have been enrolled
723 * so that a warning can be issued for any spin locks enrolled later.
724 */
725static int witness_spin_warn = 0;
726
727/* Trim useless garbage from filenames. */
728static const char *
729fixup_filename(const char *file)
730{
731
732	if (file == NULL)
733		return (NULL);
734	while (strncmp(file, "../", 3) == 0)
735		file += 3;
736	return (file);
737}
738
739/*
740 * The WITNESS-enabled diagnostic code.  Note that the witness code does
741 * assume that the early boot is single-threaded at least until after this
742 * routine is completed.
743 */
744static void
745witness_initialize(void *dummy __unused)
746{
747	struct lock_object *lock;
748	struct witness_order_list_entry *order;
749	struct witness *w, *w1;
750	int i;
751
752	w_data = malloc(sizeof (struct witness) * witness_count, M_WITNESS,
753	    M_WAITOK | M_ZERO);
754
755	w_rmatrix = malloc(sizeof(*w_rmatrix) * (witness_count + 1),
756	    M_WITNESS, M_WAITOK | M_ZERO);
757
758	for (i = 0; i < witness_count + 1; i++) {
759		w_rmatrix[i] = malloc(sizeof(*w_rmatrix[i]) *
760		    (witness_count + 1), M_WITNESS, M_WAITOK | M_ZERO);
761	}
762	badstack_sbuf_size = witness_count * 256;
763
764	/*
765	 * We have to release Giant before initializing its witness
766	 * structure so that WITNESS doesn't get confused.
767	 */
768	mtx_unlock(&Giant);
769	mtx_assert(&Giant, MA_NOTOWNED);
770
771	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
772	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
773	    MTX_NOWITNESS | MTX_NOPROFILE);
774	for (i = witness_count - 1; i >= 0; i--) {
775		w = &w_data[i];
776		memset(w, 0, sizeof(*w));
777		w_data[i].w_index = i;	/* Witness index never changes. */
778		witness_free(w);
779	}
780	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
781	    ("%s: Invalid list of free witness objects", __func__));
782
783	/* Witness with index 0 is not used to aid in debugging. */
784	STAILQ_REMOVE_HEAD(&w_free, w_list);
785	w_free_cnt--;
786
787	for (i = 0; i < witness_count; i++) {
788		memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
789		    (witness_count + 1));
790	}
791
792	for (i = 0; i < LOCK_CHILDCOUNT; i++)
793		witness_lock_list_free(&w_locklistdata[i]);
794	witness_init_hash_tables();
795
796	/* First add in all the specified order lists. */
797	for (order = order_lists; order->w_name != NULL; order++) {
798		w = enroll(order->w_name, order->w_class);
799		if (w == NULL)
800			continue;
801		w->w_file = "order list";
802		for (order++; order->w_name != NULL; order++) {
803			w1 = enroll(order->w_name, order->w_class);
804			if (w1 == NULL)
805				continue;
806			w1->w_file = "order list";
807			itismychild(w, w1);
808			w = w1;
809		}
810	}
811	witness_spin_warn = 1;
812
813	/* Iterate through all locks and add them to witness. */
814	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
815		lock = pending_locks[i].wh_lock;
816		KASSERT(lock->lo_flags & LO_WITNESS,
817		    ("%s: lock %s is on pending list but not LO_WITNESS",
818		    __func__, lock->lo_name));
819		lock->lo_witness = enroll(pending_locks[i].wh_type,
820		    LOCK_CLASS(lock));
821	}
822
823	/* Mark the witness code as being ready for use. */
824	witness_cold = 0;
825
826	mtx_lock(&Giant);
827}
828SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
829    NULL);
830
831void
832witness_init(struct lock_object *lock, const char *type)
833{
834	struct lock_class *class;
835
836	/* Various sanity checks. */
837	class = LOCK_CLASS(lock);
838	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
839	    (class->lc_flags & LC_RECURSABLE) == 0)
840		kassert_panic("%s: lock (%s) %s can not be recursable",
841		    __func__, class->lc_name, lock->lo_name);
842	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
843	    (class->lc_flags & LC_SLEEPABLE) == 0)
844		kassert_panic("%s: lock (%s) %s can not be sleepable",
845		    __func__, class->lc_name, lock->lo_name);
846	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
847	    (class->lc_flags & LC_UPGRADABLE) == 0)
848		kassert_panic("%s: lock (%s) %s can not be upgradable",
849		    __func__, class->lc_name, lock->lo_name);
850
851	/*
852	 * If we shouldn't watch this lock, then just clear lo_witness.
853	 * Otherwise, if witness_cold is set, then it is too early to
854	 * enroll this lock, so defer it to witness_initialize() by adding
855	 * it to the pending_locks list.  If it is not too early, then enroll
856	 * the lock now.
857	 */
858	if (witness_watch < 1 || panicstr != NULL ||
859	    (lock->lo_flags & LO_WITNESS) == 0)
860		lock->lo_witness = NULL;
861	else if (witness_cold) {
862		pending_locks[pending_cnt].wh_lock = lock;
863		pending_locks[pending_cnt++].wh_type = type;
864		if (pending_cnt > WITNESS_PENDLIST)
865			panic("%s: pending locks list is too small, "
866			    "increase WITNESS_PENDLIST\n",
867			    __func__);
868	} else
869		lock->lo_witness = enroll(type, class);
870}
871
872void
873witness_destroy(struct lock_object *lock)
874{
875	struct lock_class *class;
876	struct witness *w;
877
878	class = LOCK_CLASS(lock);
879
880	if (witness_cold)
881		panic("lock (%s) %s destroyed while witness_cold",
882		    class->lc_name, lock->lo_name);
883
884	/* XXX: need to verify that no one holds the lock */
885	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
886		return;
887	w = lock->lo_witness;
888
889	mtx_lock_spin(&w_mtx);
890	MPASS(w->w_refcount > 0);
891	w->w_refcount--;
892
893	if (w->w_refcount == 0)
894		depart(w);
895	mtx_unlock_spin(&w_mtx);
896}
897
898#ifdef DDB
899static void
900witness_ddb_compute_levels(void)
901{
902	struct witness *w;
903
904	/*
905	 * First clear all levels.
906	 */
907	STAILQ_FOREACH(w, &w_all, w_list)
908		w->w_ddb_level = -1;
909
910	/*
911	 * Look for locks with no parents and level all their descendants.
912	 */
913	STAILQ_FOREACH(w, &w_all, w_list) {
914
915		/* If the witness has ancestors (is not a root), skip it. */
916		if (w->w_num_ancestors > 0)
917			continue;
918		witness_ddb_level_descendants(w, 0);
919	}
920}
921
922static void
923witness_ddb_level_descendants(struct witness *w, int l)
924{
925	int i;
926
927	if (w->w_ddb_level >= l)
928		return;
929
930	w->w_ddb_level = l;
931	l++;
932
933	for (i = 1; i <= w_max_used_index; i++) {
934		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
935			witness_ddb_level_descendants(&w_data[i], l);
936	}
937}
938
939static void
940witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
941    struct witness *w, int indent)
942{
943	int i;
944
945 	for (i = 0; i < indent; i++)
946 		prnt(" ");
947	prnt("%s (type: %s, depth: %d, active refs: %d)",
948	     w->w_name, w->w_class->lc_name,
949	     w->w_ddb_level, w->w_refcount);
950 	if (w->w_displayed) {
951 		prnt(" -- (already displayed)\n");
952 		return;
953 	}
954 	w->w_displayed = 1;
955	if (w->w_file != NULL && w->w_line != 0)
956		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
957		    w->w_line);
958	else
959		prnt(" -- never acquired\n");
960	indent++;
961	WITNESS_INDEX_ASSERT(w->w_index);
962	for (i = 1; i <= w_max_used_index; i++) {
963		if (db_pager_quit)
964			return;
965		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
966			witness_ddb_display_descendants(prnt, &w_data[i],
967			    indent);
968	}
969}
970
971static void
972witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
973    struct witness_list *list)
974{
975	struct witness *w;
976
977	STAILQ_FOREACH(w, list, w_typelist) {
978		if (w->w_file == NULL || w->w_ddb_level > 0)
979			continue;
980
981		/* This lock has no anscestors - display its descendants. */
982		witness_ddb_display_descendants(prnt, w, 0);
983		if (db_pager_quit)
984			return;
985	}
986}
987
988static void
989witness_ddb_display(int(*prnt)(const char *fmt, ...))
990{
991	struct witness *w;
992
993	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
994	witness_ddb_compute_levels();
995
996	/* Clear all the displayed flags. */
997	STAILQ_FOREACH(w, &w_all, w_list)
998		w->w_displayed = 0;
999
1000	/*
1001	 * First, handle sleep locks which have been acquired at least
1002	 * once.
1003	 */
1004	prnt("Sleep locks:\n");
1005	witness_ddb_display_list(prnt, &w_sleep);
1006	if (db_pager_quit)
1007		return;
1008
1009	/*
1010	 * Now do spin locks which have been acquired at least once.
1011	 */
1012	prnt("\nSpin locks:\n");
1013	witness_ddb_display_list(prnt, &w_spin);
1014	if (db_pager_quit)
1015		return;
1016
1017	/*
1018	 * Finally, any locks which have not been acquired yet.
1019	 */
1020	prnt("\nLocks which were never acquired:\n");
1021	STAILQ_FOREACH(w, &w_all, w_list) {
1022		if (w->w_file != NULL || w->w_refcount == 0)
1023			continue;
1024		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1025		    w->w_class->lc_name, w->w_ddb_level);
1026		if (db_pager_quit)
1027			return;
1028	}
1029}
1030#endif /* DDB */
1031
1032int
1033witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1034{
1035
1036	if (witness_watch == -1 || panicstr != NULL)
1037		return (0);
1038
1039	/* Require locks that witness knows about. */
1040	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1041	    lock2->lo_witness == NULL)
1042		return (EINVAL);
1043
1044	mtx_assert(&w_mtx, MA_NOTOWNED);
1045	mtx_lock_spin(&w_mtx);
1046
1047	/*
1048	 * If we already have either an explicit or implied lock order that
1049	 * is the other way around, then return an error.
1050	 */
1051	if (witness_watch &&
1052	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1053		mtx_unlock_spin(&w_mtx);
1054		return (EDOOFUS);
1055	}
1056
1057	/* Try to add the new order. */
1058	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1059	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1060	itismychild(lock1->lo_witness, lock2->lo_witness);
1061	mtx_unlock_spin(&w_mtx);
1062	return (0);
1063}
1064
1065void
1066witness_checkorder(struct lock_object *lock, int flags, const char *file,
1067    int line, struct lock_object *interlock)
1068{
1069	struct lock_list_entry *lock_list, *lle;
1070	struct lock_instance *lock1, *lock2, *plock;
1071	struct lock_class *class, *iclass;
1072	struct witness *w, *w1;
1073	struct thread *td;
1074	int i, j;
1075
1076	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1077	    panicstr != NULL)
1078		return;
1079
1080	w = lock->lo_witness;
1081	class = LOCK_CLASS(lock);
1082	td = curthread;
1083
1084	if (class->lc_flags & LC_SLEEPLOCK) {
1085
1086		/*
1087		 * Since spin locks include a critical section, this check
1088		 * implicitly enforces a lock order of all sleep locks before
1089		 * all spin locks.
1090		 */
1091		if (td->td_critnest != 0 && !kdb_active)
1092			kassert_panic("acquiring blockable sleep lock with "
1093			    "spinlock or critical section held (%s) %s @ %s:%d",
1094			    class->lc_name, lock->lo_name,
1095			    fixup_filename(file), line);
1096
1097		/*
1098		 * If this is the first lock acquired then just return as
1099		 * no order checking is needed.
1100		 */
1101		lock_list = td->td_sleeplocks;
1102		if (lock_list == NULL || lock_list->ll_count == 0)
1103			return;
1104	} else {
1105
1106		/*
1107		 * If this is the first lock, just return as no order
1108		 * checking is needed.  Avoid problems with thread
1109		 * migration pinning the thread while checking if
1110		 * spinlocks are held.  If at least one spinlock is held
1111		 * the thread is in a safe path and it is allowed to
1112		 * unpin it.
1113		 */
1114		sched_pin();
1115		lock_list = PCPU_GET(spinlocks);
1116		if (lock_list == NULL || lock_list->ll_count == 0) {
1117			sched_unpin();
1118			return;
1119		}
1120		sched_unpin();
1121	}
1122
1123	/*
1124	 * Check to see if we are recursing on a lock we already own.  If
1125	 * so, make sure that we don't mismatch exclusive and shared lock
1126	 * acquires.
1127	 */
1128	lock1 = find_instance(lock_list, lock);
1129	if (lock1 != NULL) {
1130		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1131		    (flags & LOP_EXCLUSIVE) == 0) {
1132			witness_output("shared lock of (%s) %s @ %s:%d\n",
1133			    class->lc_name, lock->lo_name,
1134			    fixup_filename(file), line);
1135			witness_output("while exclusively locked from %s:%d\n",
1136			    fixup_filename(lock1->li_file), lock1->li_line);
1137			kassert_panic("excl->share");
1138		}
1139		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1140		    (flags & LOP_EXCLUSIVE) != 0) {
1141			witness_output("exclusive lock of (%s) %s @ %s:%d\n",
1142			    class->lc_name, lock->lo_name,
1143			    fixup_filename(file), line);
1144			witness_output("while share locked from %s:%d\n",
1145			    fixup_filename(lock1->li_file), lock1->li_line);
1146			kassert_panic("share->excl");
1147		}
1148		return;
1149	}
1150
1151	/* Warn if the interlock is not locked exactly once. */
1152	if (interlock != NULL) {
1153		iclass = LOCK_CLASS(interlock);
1154		lock1 = find_instance(lock_list, interlock);
1155		if (lock1 == NULL)
1156			kassert_panic("interlock (%s) %s not locked @ %s:%d",
1157			    iclass->lc_name, interlock->lo_name,
1158			    fixup_filename(file), line);
1159		else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1160			kassert_panic("interlock (%s) %s recursed @ %s:%d",
1161			    iclass->lc_name, interlock->lo_name,
1162			    fixup_filename(file), line);
1163	}
1164
1165	/*
1166	 * Find the previously acquired lock, but ignore interlocks.
1167	 */
1168	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1169	if (interlock != NULL && plock->li_lock == interlock) {
1170		if (lock_list->ll_count > 1)
1171			plock =
1172			    &lock_list->ll_children[lock_list->ll_count - 2];
1173		else {
1174			lle = lock_list->ll_next;
1175
1176			/*
1177			 * The interlock is the only lock we hold, so
1178			 * simply return.
1179			 */
1180			if (lle == NULL)
1181				return;
1182			plock = &lle->ll_children[lle->ll_count - 1];
1183		}
1184	}
1185
1186	/*
1187	 * Try to perform most checks without a lock.  If this succeeds we
1188	 * can skip acquiring the lock and return success.  Otherwise we redo
1189	 * the check with the lock held to handle races with concurrent updates.
1190	 */
1191	w1 = plock->li_lock->lo_witness;
1192	if (witness_lock_order_check(w1, w))
1193		return;
1194
1195	mtx_lock_spin(&w_mtx);
1196	if (witness_lock_order_check(w1, w)) {
1197		mtx_unlock_spin(&w_mtx);
1198		return;
1199	}
1200	witness_lock_order_add(w1, w);
1201
1202	/*
1203	 * Check for duplicate locks of the same type.  Note that we only
1204	 * have to check for this on the last lock we just acquired.  Any
1205	 * other cases will be caught as lock order violations.
1206	 */
1207	if (w1 == w) {
1208		i = w->w_index;
1209		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1210		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1211		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1212			w->w_reversed = 1;
1213			mtx_unlock_spin(&w_mtx);
1214			witness_output(
1215			    "acquiring duplicate lock of same type: \"%s\"\n",
1216			    w->w_name);
1217			witness_output(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1218			    fixup_filename(plock->li_file), plock->li_line);
1219			witness_output(" 2nd %s @ %s:%d\n", lock->lo_name,
1220			    fixup_filename(file), line);
1221			witness_debugger(1, __func__);
1222		} else
1223			mtx_unlock_spin(&w_mtx);
1224		return;
1225	}
1226	mtx_assert(&w_mtx, MA_OWNED);
1227
1228	/*
1229	 * If we know that the lock we are acquiring comes after
1230	 * the lock we most recently acquired in the lock order tree,
1231	 * then there is no need for any further checks.
1232	 */
1233	if (isitmychild(w1, w))
1234		goto out;
1235
1236	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1237		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1238
1239			MPASS(j < LOCK_CHILDCOUNT * LOCK_NCHILDREN);
1240			lock1 = &lle->ll_children[i];
1241
1242			/*
1243			 * Ignore the interlock.
1244			 */
1245			if (interlock == lock1->li_lock)
1246				continue;
1247
1248			/*
1249			 * If this lock doesn't undergo witness checking,
1250			 * then skip it.
1251			 */
1252			w1 = lock1->li_lock->lo_witness;
1253			if (w1 == NULL) {
1254				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1255				    ("lock missing witness structure"));
1256				continue;
1257			}
1258
1259			/*
1260			 * If we are locking Giant and this is a sleepable
1261			 * lock, then skip it.
1262			 */
1263			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1264			    lock == &Giant.lock_object)
1265				continue;
1266
1267			/*
1268			 * If we are locking a sleepable lock and this lock
1269			 * is Giant, then skip it.
1270			 */
1271			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1272			    lock1->li_lock == &Giant.lock_object)
1273				continue;
1274
1275			/*
1276			 * If we are locking a sleepable lock and this lock
1277			 * isn't sleepable, we want to treat it as a lock
1278			 * order violation to enfore a general lock order of
1279			 * sleepable locks before non-sleepable locks.
1280			 */
1281			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1282			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1283				goto reversal;
1284
1285			/*
1286			 * If we are locking Giant and this is a non-sleepable
1287			 * lock, then treat it as a reversal.
1288			 */
1289			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1290			    lock == &Giant.lock_object)
1291				goto reversal;
1292
1293			/*
1294			 * Check the lock order hierarchy for a reveresal.
1295			 */
1296			if (!isitmydescendant(w, w1))
1297				continue;
1298		reversal:
1299
1300			/*
1301			 * We have a lock order violation, check to see if it
1302			 * is allowed or has already been yelled about.
1303			 */
1304#ifdef BLESSING
1305
1306			/*
1307			 * If the lock order is blessed, just bail.  We don't
1308			 * look for other lock order violations though, which
1309			 * may be a bug.
1310			 */
1311			if (blessed(w, w1))
1312				goto out;
1313#endif
1314
1315			/* Bail if this violation is known */
1316			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1317				goto out;
1318
1319			/* Record this as a violation */
1320			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1321			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1322			w->w_reversed = w1->w_reversed = 1;
1323			witness_increment_graph_generation();
1324			mtx_unlock_spin(&w_mtx);
1325
1326#ifdef WITNESS_NO_VNODE
1327			/*
1328			 * There are known LORs between VNODE locks. They are
1329			 * not an indication of a bug. VNODE locks are flagged
1330			 * as such (LO_IS_VNODE) and we don't yell if the LOR
1331			 * is between 2 VNODE locks.
1332			 */
1333			if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1334			    (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1335				return;
1336#endif
1337
1338			/*
1339			 * Ok, yell about it.
1340			 */
1341			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1342			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1343				witness_output(
1344		"lock order reversal: (sleepable after non-sleepable)\n");
1345			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1346			    && lock == &Giant.lock_object)
1347				witness_output(
1348		"lock order reversal: (Giant after non-sleepable)\n");
1349			else
1350				witness_output("lock order reversal:\n");
1351
1352			/*
1353			 * Try to locate an earlier lock with
1354			 * witness w in our list.
1355			 */
1356			do {
1357				lock2 = &lle->ll_children[i];
1358				MPASS(lock2->li_lock != NULL);
1359				if (lock2->li_lock->lo_witness == w)
1360					break;
1361				if (i == 0 && lle->ll_next != NULL) {
1362					lle = lle->ll_next;
1363					i = lle->ll_count - 1;
1364					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1365				} else
1366					i--;
1367			} while (i >= 0);
1368			if (i < 0) {
1369				witness_output(" 1st %p %s (%s) @ %s:%d\n",
1370				    lock1->li_lock, lock1->li_lock->lo_name,
1371				    w1->w_name, fixup_filename(lock1->li_file),
1372				    lock1->li_line);
1373				witness_output(" 2nd %p %s (%s) @ %s:%d\n", lock,
1374				    lock->lo_name, w->w_name,
1375				    fixup_filename(file), line);
1376			} else {
1377				witness_output(" 1st %p %s (%s) @ %s:%d\n",
1378				    lock2->li_lock, lock2->li_lock->lo_name,
1379				    lock2->li_lock->lo_witness->w_name,
1380				    fixup_filename(lock2->li_file),
1381				    lock2->li_line);
1382				witness_output(" 2nd %p %s (%s) @ %s:%d\n",
1383				    lock1->li_lock, lock1->li_lock->lo_name,
1384				    w1->w_name, fixup_filename(lock1->li_file),
1385				    lock1->li_line);
1386				witness_output(" 3rd %p %s (%s) @ %s:%d\n", lock,
1387				    lock->lo_name, w->w_name,
1388				    fixup_filename(file), line);
1389			}
1390			witness_debugger(1, __func__);
1391			return;
1392		}
1393	}
1394
1395	/*
1396	 * If requested, build a new lock order.  However, don't build a new
1397	 * relationship between a sleepable lock and Giant if it is in the
1398	 * wrong direction.  The correct lock order is that sleepable locks
1399	 * always come before Giant.
1400	 */
1401	if (flags & LOP_NEWORDER &&
1402	    !(plock->li_lock == &Giant.lock_object &&
1403	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1404		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1405		    w->w_name, plock->li_lock->lo_witness->w_name);
1406		itismychild(plock->li_lock->lo_witness, w);
1407	}
1408out:
1409	mtx_unlock_spin(&w_mtx);
1410}
1411
1412void
1413witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1414{
1415	struct lock_list_entry **lock_list, *lle;
1416	struct lock_instance *instance;
1417	struct witness *w;
1418	struct thread *td;
1419
1420	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1421	    panicstr != NULL)
1422		return;
1423	w = lock->lo_witness;
1424	td = curthread;
1425
1426	/* Determine lock list for this lock. */
1427	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1428		lock_list = &td->td_sleeplocks;
1429	else
1430		lock_list = PCPU_PTR(spinlocks);
1431
1432	/* Check to see if we are recursing on a lock we already own. */
1433	instance = find_instance(*lock_list, lock);
1434	if (instance != NULL) {
1435		instance->li_flags++;
1436		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1437		    td->td_proc->p_pid, lock->lo_name,
1438		    instance->li_flags & LI_RECURSEMASK);
1439		instance->li_file = file;
1440		instance->li_line = line;
1441		return;
1442	}
1443
1444	/* Update per-witness last file and line acquire. */
1445	w->w_file = file;
1446	w->w_line = line;
1447
1448	/* Find the next open lock instance in the list and fill it. */
1449	lle = *lock_list;
1450	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1451		lle = witness_lock_list_get();
1452		if (lle == NULL)
1453			return;
1454		lle->ll_next = *lock_list;
1455		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1456		    td->td_proc->p_pid, lle);
1457		*lock_list = lle;
1458	}
1459	instance = &lle->ll_children[lle->ll_count++];
1460	instance->li_lock = lock;
1461	instance->li_line = line;
1462	instance->li_file = file;
1463	if ((flags & LOP_EXCLUSIVE) != 0)
1464		instance->li_flags = LI_EXCLUSIVE;
1465	else
1466		instance->li_flags = 0;
1467	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1468	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1469}
1470
1471void
1472witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1473{
1474	struct lock_instance *instance;
1475	struct lock_class *class;
1476
1477	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1478	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1479		return;
1480	class = LOCK_CLASS(lock);
1481	if (witness_watch) {
1482		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1483			kassert_panic(
1484			    "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1485			    class->lc_name, lock->lo_name,
1486			    fixup_filename(file), line);
1487		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1488			kassert_panic(
1489			    "upgrade of non-sleep lock (%s) %s @ %s:%d",
1490			    class->lc_name, lock->lo_name,
1491			    fixup_filename(file), line);
1492	}
1493	instance = find_instance(curthread->td_sleeplocks, lock);
1494	if (instance == NULL) {
1495		kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1496		    class->lc_name, lock->lo_name,
1497		    fixup_filename(file), line);
1498		return;
1499	}
1500	if (witness_watch) {
1501		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1502			kassert_panic(
1503			    "upgrade of exclusive lock (%s) %s @ %s:%d",
1504			    class->lc_name, lock->lo_name,
1505			    fixup_filename(file), line);
1506		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1507			kassert_panic(
1508			    "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1509			    class->lc_name, lock->lo_name,
1510			    instance->li_flags & LI_RECURSEMASK,
1511			    fixup_filename(file), line);
1512	}
1513	instance->li_flags |= LI_EXCLUSIVE;
1514}
1515
1516void
1517witness_downgrade(struct lock_object *lock, int flags, const char *file,
1518    int line)
1519{
1520	struct lock_instance *instance;
1521	struct lock_class *class;
1522
1523	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1524	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1525		return;
1526	class = LOCK_CLASS(lock);
1527	if (witness_watch) {
1528		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1529			kassert_panic(
1530			    "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1531			    class->lc_name, lock->lo_name,
1532			    fixup_filename(file), line);
1533		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1534			kassert_panic(
1535			    "downgrade of non-sleep lock (%s) %s @ %s:%d",
1536			    class->lc_name, lock->lo_name,
1537			    fixup_filename(file), line);
1538	}
1539	instance = find_instance(curthread->td_sleeplocks, lock);
1540	if (instance == NULL) {
1541		kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1542		    class->lc_name, lock->lo_name,
1543		    fixup_filename(file), line);
1544		return;
1545	}
1546	if (witness_watch) {
1547		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1548			kassert_panic(
1549			    "downgrade of shared lock (%s) %s @ %s:%d",
1550			    class->lc_name, lock->lo_name,
1551			    fixup_filename(file), line);
1552		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1553			kassert_panic(
1554			    "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1555			    class->lc_name, lock->lo_name,
1556			    instance->li_flags & LI_RECURSEMASK,
1557			    fixup_filename(file), line);
1558	}
1559	instance->li_flags &= ~LI_EXCLUSIVE;
1560}
1561
1562void
1563witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1564{
1565	struct lock_list_entry **lock_list, *lle;
1566	struct lock_instance *instance;
1567	struct lock_class *class;
1568	struct thread *td;
1569	register_t s;
1570	int i, j;
1571
1572	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1573		return;
1574	td = curthread;
1575	class = LOCK_CLASS(lock);
1576
1577	/* Find lock instance associated with this lock. */
1578	if (class->lc_flags & LC_SLEEPLOCK)
1579		lock_list = &td->td_sleeplocks;
1580	else
1581		lock_list = PCPU_PTR(spinlocks);
1582	lle = *lock_list;
1583	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1584		for (i = 0; i < (*lock_list)->ll_count; i++) {
1585			instance = &(*lock_list)->ll_children[i];
1586			if (instance->li_lock == lock)
1587				goto found;
1588		}
1589
1590	/*
1591	 * When disabling WITNESS through witness_watch we could end up in
1592	 * having registered locks in the td_sleeplocks queue.
1593	 * We have to make sure we flush these queues, so just search for
1594	 * eventual register locks and remove them.
1595	 */
1596	if (witness_watch > 0) {
1597		kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1598		    lock->lo_name, fixup_filename(file), line);
1599		return;
1600	} else {
1601		return;
1602	}
1603found:
1604
1605	/* First, check for shared/exclusive mismatches. */
1606	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1607	    (flags & LOP_EXCLUSIVE) == 0) {
1608		witness_output("shared unlock of (%s) %s @ %s:%d\n",
1609		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1610		witness_output("while exclusively locked from %s:%d\n",
1611		    fixup_filename(instance->li_file), instance->li_line);
1612		kassert_panic("excl->ushare");
1613	}
1614	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1615	    (flags & LOP_EXCLUSIVE) != 0) {
1616		witness_output("exclusive unlock of (%s) %s @ %s:%d\n",
1617		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1618		witness_output("while share locked from %s:%d\n",
1619		    fixup_filename(instance->li_file),
1620		    instance->li_line);
1621		kassert_panic("share->uexcl");
1622	}
1623	/* If we are recursed, unrecurse. */
1624	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1625		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1626		    td->td_proc->p_pid, instance->li_lock->lo_name,
1627		    instance->li_flags);
1628		instance->li_flags--;
1629		return;
1630	}
1631	/* The lock is now being dropped, check for NORELEASE flag */
1632	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1633		witness_output("forbidden unlock of (%s) %s @ %s:%d\n",
1634		    class->lc_name, lock->lo_name, fixup_filename(file), line);
1635		kassert_panic("lock marked norelease");
1636	}
1637
1638	/* Otherwise, remove this item from the list. */
1639	s = intr_disable();
1640	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1641	    td->td_proc->p_pid, instance->li_lock->lo_name,
1642	    (*lock_list)->ll_count - 1);
1643	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1644		(*lock_list)->ll_children[j] =
1645		    (*lock_list)->ll_children[j + 1];
1646	(*lock_list)->ll_count--;
1647	intr_restore(s);
1648
1649	/*
1650	 * In order to reduce contention on w_mtx, we want to keep always an
1651	 * head object into lists so that frequent allocation from the
1652	 * free witness pool (and subsequent locking) is avoided.
1653	 * In order to maintain the current code simple, when the head
1654	 * object is totally unloaded it means also that we do not have
1655	 * further objects in the list, so the list ownership needs to be
1656	 * hand over to another object if the current head needs to be freed.
1657	 */
1658	if ((*lock_list)->ll_count == 0) {
1659		if (*lock_list == lle) {
1660			if (lle->ll_next == NULL)
1661				return;
1662		} else
1663			lle = *lock_list;
1664		*lock_list = lle->ll_next;
1665		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1666		    td->td_proc->p_pid, lle);
1667		witness_lock_list_free(lle);
1668	}
1669}
1670
1671void
1672witness_thread_exit(struct thread *td)
1673{
1674	struct lock_list_entry *lle;
1675	int i, n;
1676
1677	lle = td->td_sleeplocks;
1678	if (lle == NULL || panicstr != NULL)
1679		return;
1680	if (lle->ll_count != 0) {
1681		for (n = 0; lle != NULL; lle = lle->ll_next)
1682			for (i = lle->ll_count - 1; i >= 0; i--) {
1683				if (n == 0)
1684					witness_output(
1685		    "Thread %p exiting with the following locks held:\n", td);
1686				n++;
1687				witness_list_lock(&lle->ll_children[i],
1688				    witness_output);
1689
1690			}
1691		kassert_panic(
1692		    "Thread %p cannot exit while holding sleeplocks\n", td);
1693	}
1694	witness_lock_list_free(lle);
1695}
1696
1697/*
1698 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1699 * exempt Giant and sleepable locks from the checks as well.  If any
1700 * non-exempt locks are held, then a supplied message is printed to the
1701 * output channel along with a list of the offending locks.  If indicated in the
1702 * flags then a failure results in a panic as well.
1703 */
1704int
1705witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1706{
1707	struct lock_list_entry *lock_list, *lle;
1708	struct lock_instance *lock1;
1709	struct thread *td;
1710	va_list ap;
1711	int i, n;
1712
1713	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1714		return (0);
1715	n = 0;
1716	td = curthread;
1717	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1718		for (i = lle->ll_count - 1; i >= 0; i--) {
1719			lock1 = &lle->ll_children[i];
1720			if (lock1->li_lock == lock)
1721				continue;
1722			if (flags & WARN_GIANTOK &&
1723			    lock1->li_lock == &Giant.lock_object)
1724				continue;
1725			if (flags & WARN_SLEEPOK &&
1726			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1727				continue;
1728			if (n == 0) {
1729				va_start(ap, fmt);
1730				witness_voutput(fmt, ap);
1731				va_end(ap);
1732				witness_output(
1733				    " with the following %slocks held:\n",
1734				    (flags & WARN_SLEEPOK) != 0 ?
1735				    "non-sleepable " : "");
1736			}
1737			n++;
1738			witness_list_lock(lock1, witness_output);
1739		}
1740
1741	/*
1742	 * Pin the thread in order to avoid problems with thread migration.
1743	 * Once that all verifies are passed about spinlocks ownership,
1744	 * the thread is in a safe path and it can be unpinned.
1745	 */
1746	sched_pin();
1747	lock_list = PCPU_GET(spinlocks);
1748	if (lock_list != NULL && lock_list->ll_count != 0) {
1749		sched_unpin();
1750
1751		/*
1752		 * We should only have one spinlock and as long as
1753		 * the flags cannot match for this locks class,
1754		 * check if the first spinlock is the one curthread
1755		 * should hold.
1756		 */
1757		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1758		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1759		    lock1->li_lock == lock && n == 0)
1760			return (0);
1761
1762		va_start(ap, fmt);
1763		witness_voutput(fmt, ap);
1764		va_end(ap);
1765		witness_output(" with the following %slocks held:\n",
1766		    (flags & WARN_SLEEPOK) != 0 ?  "non-sleepable " : "");
1767		n += witness_list_locks(&lock_list, witness_output);
1768	} else
1769		sched_unpin();
1770	if (flags & WARN_PANIC && n)
1771		kassert_panic("%s", __func__);
1772	else
1773		witness_debugger(n, __func__);
1774	return (n);
1775}
1776
1777const char *
1778witness_file(struct lock_object *lock)
1779{
1780	struct witness *w;
1781
1782	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1783		return ("?");
1784	w = lock->lo_witness;
1785	return (w->w_file);
1786}
1787
1788int
1789witness_line(struct lock_object *lock)
1790{
1791	struct witness *w;
1792
1793	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1794		return (0);
1795	w = lock->lo_witness;
1796	return (w->w_line);
1797}
1798
1799static struct witness *
1800enroll(const char *description, struct lock_class *lock_class)
1801{
1802	struct witness *w;
1803	struct witness_list *typelist;
1804
1805	MPASS(description != NULL);
1806
1807	if (witness_watch == -1 || panicstr != NULL)
1808		return (NULL);
1809	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1810		if (witness_skipspin)
1811			return (NULL);
1812		else
1813			typelist = &w_spin;
1814	} else if ((lock_class->lc_flags & LC_SLEEPLOCK)) {
1815		typelist = &w_sleep;
1816	} else {
1817		kassert_panic("lock class %s is not sleep or spin",
1818		    lock_class->lc_name);
1819		return (NULL);
1820	}
1821
1822	mtx_lock_spin(&w_mtx);
1823	w = witness_hash_get(description);
1824	if (w)
1825		goto found;
1826	if ((w = witness_get()) == NULL)
1827		return (NULL);
1828	MPASS(strlen(description) < MAX_W_NAME);
1829	strcpy(w->w_name, description);
1830	w->w_class = lock_class;
1831	w->w_refcount = 1;
1832	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1833	if (lock_class->lc_flags & LC_SPINLOCK) {
1834		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1835		w_spin_cnt++;
1836	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1837		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1838		w_sleep_cnt++;
1839	}
1840
1841	/* Insert new witness into the hash */
1842	witness_hash_put(w);
1843	witness_increment_graph_generation();
1844	mtx_unlock_spin(&w_mtx);
1845	return (w);
1846found:
1847	w->w_refcount++;
1848	mtx_unlock_spin(&w_mtx);
1849	if (lock_class != w->w_class)
1850		kassert_panic(
1851			"lock (%s) %s does not match earlier (%s) lock",
1852			description, lock_class->lc_name,
1853			w->w_class->lc_name);
1854	return (w);
1855}
1856
1857static void
1858depart(struct witness *w)
1859{
1860	struct witness_list *list;
1861
1862	MPASS(w->w_refcount == 0);
1863	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1864		list = &w_sleep;
1865		w_sleep_cnt--;
1866	} else {
1867		list = &w_spin;
1868		w_spin_cnt--;
1869	}
1870	/*
1871	 * Set file to NULL as it may point into a loadable module.
1872	 */
1873	w->w_file = NULL;
1874	w->w_line = 0;
1875	witness_increment_graph_generation();
1876}
1877
1878
1879static void
1880adopt(struct witness *parent, struct witness *child)
1881{
1882	int pi, ci, i, j;
1883
1884	if (witness_cold == 0)
1885		mtx_assert(&w_mtx, MA_OWNED);
1886
1887	/* If the relationship is already known, there's no work to be done. */
1888	if (isitmychild(parent, child))
1889		return;
1890
1891	/* When the structure of the graph changes, bump up the generation. */
1892	witness_increment_graph_generation();
1893
1894	/*
1895	 * The hard part ... create the direct relationship, then propagate all
1896	 * indirect relationships.
1897	 */
1898	pi = parent->w_index;
1899	ci = child->w_index;
1900	WITNESS_INDEX_ASSERT(pi);
1901	WITNESS_INDEX_ASSERT(ci);
1902	MPASS(pi != ci);
1903	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1904	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1905
1906	/*
1907	 * If parent was not already an ancestor of child,
1908	 * then we increment the descendant and ancestor counters.
1909	 */
1910	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1911		parent->w_num_descendants++;
1912		child->w_num_ancestors++;
1913	}
1914
1915	/*
1916	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1917	 * an ancestor of 'pi' during this loop.
1918	 */
1919	for (i = 1; i <= w_max_used_index; i++) {
1920		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1921		    (i != pi))
1922			continue;
1923
1924		/* Find each descendant of 'i' and mark it as a descendant. */
1925		for (j = 1; j <= w_max_used_index; j++) {
1926
1927			/*
1928			 * Skip children that are already marked as
1929			 * descendants of 'i'.
1930			 */
1931			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1932				continue;
1933
1934			/*
1935			 * We are only interested in descendants of 'ci'. Note
1936			 * that 'ci' itself is counted as a descendant of 'ci'.
1937			 */
1938			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1939			    (j != ci))
1940				continue;
1941			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1942			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1943			w_data[i].w_num_descendants++;
1944			w_data[j].w_num_ancestors++;
1945
1946			/*
1947			 * Make sure we aren't marking a node as both an
1948			 * ancestor and descendant. We should have caught
1949			 * this as a lock order reversal earlier.
1950			 */
1951			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1952			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1953				printf("witness rmatrix paradox! [%d][%d]=%d "
1954				    "both ancestor and descendant\n",
1955				    i, j, w_rmatrix[i][j]);
1956				kdb_backtrace();
1957				printf("Witness disabled.\n");
1958				witness_watch = -1;
1959			}
1960			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1961			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1962				printf("witness rmatrix paradox! [%d][%d]=%d "
1963				    "both ancestor and descendant\n",
1964				    j, i, w_rmatrix[j][i]);
1965				kdb_backtrace();
1966				printf("Witness disabled.\n");
1967				witness_watch = -1;
1968			}
1969		}
1970	}
1971}
1972
1973static void
1974itismychild(struct witness *parent, struct witness *child)
1975{
1976	int unlocked;
1977
1978	MPASS(child != NULL && parent != NULL);
1979	if (witness_cold == 0)
1980		mtx_assert(&w_mtx, MA_OWNED);
1981
1982	if (!witness_lock_type_equal(parent, child)) {
1983		if (witness_cold == 0) {
1984			unlocked = 1;
1985			mtx_unlock_spin(&w_mtx);
1986		} else {
1987			unlocked = 0;
1988		}
1989		kassert_panic(
1990		    "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1991		    "the same lock type", __func__, parent->w_name,
1992		    parent->w_class->lc_name, child->w_name,
1993		    child->w_class->lc_name);
1994		if (unlocked)
1995			mtx_lock_spin(&w_mtx);
1996	}
1997	adopt(parent, child);
1998}
1999
2000/*
2001 * Generic code for the isitmy*() functions. The rmask parameter is the
2002 * expected relationship of w1 to w2.
2003 */
2004static int
2005_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
2006{
2007	unsigned char r1, r2;
2008	int i1, i2;
2009
2010	i1 = w1->w_index;
2011	i2 = w2->w_index;
2012	WITNESS_INDEX_ASSERT(i1);
2013	WITNESS_INDEX_ASSERT(i2);
2014	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
2015	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
2016
2017	/* The flags on one better be the inverse of the flags on the other */
2018	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
2019	    (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
2020		/* Don't squawk if we're potentially racing with an update. */
2021		if (!mtx_owned(&w_mtx))
2022			return (0);
2023		printf("%s: rmatrix mismatch between %s (index %d) and %s "
2024		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
2025		    "w_rmatrix[%d][%d] == %hhx\n",
2026		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2027		    i2, i1, r2);
2028		kdb_backtrace();
2029		printf("Witness disabled.\n");
2030		witness_watch = -1;
2031	}
2032	return (r1 & rmask);
2033}
2034
2035/*
2036 * Checks if @child is a direct child of @parent.
2037 */
2038static int
2039isitmychild(struct witness *parent, struct witness *child)
2040{
2041
2042	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2043}
2044
2045/*
2046 * Checks if @descendant is a direct or inderect descendant of @ancestor.
2047 */
2048static int
2049isitmydescendant(struct witness *ancestor, struct witness *descendant)
2050{
2051
2052	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2053	    __func__));
2054}
2055
2056#ifdef BLESSING
2057static int
2058blessed(struct witness *w1, struct witness *w2)
2059{
2060	int i;
2061	struct witness_blessed *b;
2062
2063	for (i = 0; i < blessed_count; i++) {
2064		b = &blessed_list[i];
2065		if (strcmp(w1->w_name, b->b_lock1) == 0) {
2066			if (strcmp(w2->w_name, b->b_lock2) == 0)
2067				return (1);
2068			continue;
2069		}
2070		if (strcmp(w1->w_name, b->b_lock2) == 0)
2071			if (strcmp(w2->w_name, b->b_lock1) == 0)
2072				return (1);
2073	}
2074	return (0);
2075}
2076#endif
2077
2078static struct witness *
2079witness_get(void)
2080{
2081	struct witness *w;
2082	int index;
2083
2084	if (witness_cold == 0)
2085		mtx_assert(&w_mtx, MA_OWNED);
2086
2087	if (witness_watch == -1) {
2088		mtx_unlock_spin(&w_mtx);
2089		return (NULL);
2090	}
2091	if (STAILQ_EMPTY(&w_free)) {
2092		witness_watch = -1;
2093		mtx_unlock_spin(&w_mtx);
2094		printf("WITNESS: unable to allocate a new witness object\n");
2095		return (NULL);
2096	}
2097	w = STAILQ_FIRST(&w_free);
2098	STAILQ_REMOVE_HEAD(&w_free, w_list);
2099	w_free_cnt--;
2100	index = w->w_index;
2101	MPASS(index > 0 && index == w_max_used_index+1 &&
2102	    index < witness_count);
2103	bzero(w, sizeof(*w));
2104	w->w_index = index;
2105	if (index > w_max_used_index)
2106		w_max_used_index = index;
2107	return (w);
2108}
2109
2110static void
2111witness_free(struct witness *w)
2112{
2113
2114	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2115	w_free_cnt++;
2116}
2117
2118static struct lock_list_entry *
2119witness_lock_list_get(void)
2120{
2121	struct lock_list_entry *lle;
2122
2123	if (witness_watch == -1)
2124		return (NULL);
2125	mtx_lock_spin(&w_mtx);
2126	lle = w_lock_list_free;
2127	if (lle == NULL) {
2128		witness_watch = -1;
2129		mtx_unlock_spin(&w_mtx);
2130		printf("%s: witness exhausted\n", __func__);
2131		return (NULL);
2132	}
2133	w_lock_list_free = lle->ll_next;
2134	mtx_unlock_spin(&w_mtx);
2135	bzero(lle, sizeof(*lle));
2136	return (lle);
2137}
2138
2139static void
2140witness_lock_list_free(struct lock_list_entry *lle)
2141{
2142
2143	mtx_lock_spin(&w_mtx);
2144	lle->ll_next = w_lock_list_free;
2145	w_lock_list_free = lle;
2146	mtx_unlock_spin(&w_mtx);
2147}
2148
2149static struct lock_instance *
2150find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2151{
2152	struct lock_list_entry *lle;
2153	struct lock_instance *instance;
2154	int i;
2155
2156	for (lle = list; lle != NULL; lle = lle->ll_next)
2157		for (i = lle->ll_count - 1; i >= 0; i--) {
2158			instance = &lle->ll_children[i];
2159			if (instance->li_lock == lock)
2160				return (instance);
2161		}
2162	return (NULL);
2163}
2164
2165static void
2166witness_list_lock(struct lock_instance *instance,
2167    int (*prnt)(const char *fmt, ...))
2168{
2169	struct lock_object *lock;
2170
2171	lock = instance->li_lock;
2172	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2173	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2174	if (lock->lo_witness->w_name != lock->lo_name)
2175		prnt(" (%s)", lock->lo_witness->w_name);
2176	prnt(" r = %d (%p) locked @ %s:%d\n",
2177	    instance->li_flags & LI_RECURSEMASK, lock,
2178	    fixup_filename(instance->li_file), instance->li_line);
2179}
2180
2181static int
2182witness_output(const char *fmt, ...)
2183{
2184	va_list ap;
2185	int ret;
2186
2187	va_start(ap, fmt);
2188	ret = witness_voutput(fmt, ap);
2189	va_end(ap);
2190	return (ret);
2191}
2192
2193static int
2194witness_voutput(const char *fmt, va_list ap)
2195{
2196	int ret;
2197
2198	ret = 0;
2199	switch (witness_channel) {
2200	case WITNESS_CONSOLE:
2201		ret = vprintf(fmt, ap);
2202		break;
2203	case WITNESS_LOG:
2204		vlog(LOG_NOTICE, fmt, ap);
2205		break;
2206	case WITNESS_NONE:
2207		break;
2208	}
2209	return (ret);
2210}
2211
2212#ifdef DDB
2213static int
2214witness_thread_has_locks(struct thread *td)
2215{
2216
2217	if (td->td_sleeplocks == NULL)
2218		return (0);
2219	return (td->td_sleeplocks->ll_count != 0);
2220}
2221
2222static int
2223witness_proc_has_locks(struct proc *p)
2224{
2225	struct thread *td;
2226
2227	FOREACH_THREAD_IN_PROC(p, td) {
2228		if (witness_thread_has_locks(td))
2229			return (1);
2230	}
2231	return (0);
2232}
2233#endif
2234
2235int
2236witness_list_locks(struct lock_list_entry **lock_list,
2237    int (*prnt)(const char *fmt, ...))
2238{
2239	struct lock_list_entry *lle;
2240	int i, nheld;
2241
2242	nheld = 0;
2243	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2244		for (i = lle->ll_count - 1; i >= 0; i--) {
2245			witness_list_lock(&lle->ll_children[i], prnt);
2246			nheld++;
2247		}
2248	return (nheld);
2249}
2250
2251/*
2252 * This is a bit risky at best.  We call this function when we have timed
2253 * out acquiring a spin lock, and we assume that the other CPU is stuck
2254 * with this lock held.  So, we go groveling around in the other CPU's
2255 * per-cpu data to try to find the lock instance for this spin lock to
2256 * see when it was last acquired.
2257 */
2258void
2259witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2260    int (*prnt)(const char *fmt, ...))
2261{
2262	struct lock_instance *instance;
2263	struct pcpu *pc;
2264
2265	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2266		return;
2267	pc = pcpu_find(owner->td_oncpu);
2268	instance = find_instance(pc->pc_spinlocks, lock);
2269	if (instance != NULL)
2270		witness_list_lock(instance, prnt);
2271}
2272
2273void
2274witness_save(struct lock_object *lock, const char **filep, int *linep)
2275{
2276	struct lock_list_entry *lock_list;
2277	struct lock_instance *instance;
2278	struct lock_class *class;
2279
2280	/*
2281	 * This function is used independently in locking code to deal with
2282	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2283	 * is gone.
2284	 */
2285	if (SCHEDULER_STOPPED())
2286		return;
2287	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2288	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2289		return;
2290	class = LOCK_CLASS(lock);
2291	if (class->lc_flags & LC_SLEEPLOCK)
2292		lock_list = curthread->td_sleeplocks;
2293	else {
2294		if (witness_skipspin)
2295			return;
2296		lock_list = PCPU_GET(spinlocks);
2297	}
2298	instance = find_instance(lock_list, lock);
2299	if (instance == NULL) {
2300		kassert_panic("%s: lock (%s) %s not locked", __func__,
2301		    class->lc_name, lock->lo_name);
2302		return;
2303	}
2304	*filep = instance->li_file;
2305	*linep = instance->li_line;
2306}
2307
2308void
2309witness_restore(struct lock_object *lock, const char *file, int line)
2310{
2311	struct lock_list_entry *lock_list;
2312	struct lock_instance *instance;
2313	struct lock_class *class;
2314
2315	/*
2316	 * This function is used independently in locking code to deal with
2317	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2318	 * is gone.
2319	 */
2320	if (SCHEDULER_STOPPED())
2321		return;
2322	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2323	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2324		return;
2325	class = LOCK_CLASS(lock);
2326	if (class->lc_flags & LC_SLEEPLOCK)
2327		lock_list = curthread->td_sleeplocks;
2328	else {
2329		if (witness_skipspin)
2330			return;
2331		lock_list = PCPU_GET(spinlocks);
2332	}
2333	instance = find_instance(lock_list, lock);
2334	if (instance == NULL)
2335		kassert_panic("%s: lock (%s) %s not locked", __func__,
2336		    class->lc_name, lock->lo_name);
2337	lock->lo_witness->w_file = file;
2338	lock->lo_witness->w_line = line;
2339	if (instance == NULL)
2340		return;
2341	instance->li_file = file;
2342	instance->li_line = line;
2343}
2344
2345void
2346witness_assert(const struct lock_object *lock, int flags, const char *file,
2347    int line)
2348{
2349#ifdef INVARIANT_SUPPORT
2350	struct lock_instance *instance;
2351	struct lock_class *class;
2352
2353	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2354		return;
2355	class = LOCK_CLASS(lock);
2356	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2357		instance = find_instance(curthread->td_sleeplocks, lock);
2358	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2359		instance = find_instance(PCPU_GET(spinlocks), lock);
2360	else {
2361		kassert_panic("Lock (%s) %s is not sleep or spin!",
2362		    class->lc_name, lock->lo_name);
2363		return;
2364	}
2365	switch (flags) {
2366	case LA_UNLOCKED:
2367		if (instance != NULL)
2368			kassert_panic("Lock (%s) %s locked @ %s:%d.",
2369			    class->lc_name, lock->lo_name,
2370			    fixup_filename(file), line);
2371		break;
2372	case LA_LOCKED:
2373	case LA_LOCKED | LA_RECURSED:
2374	case LA_LOCKED | LA_NOTRECURSED:
2375	case LA_SLOCKED:
2376	case LA_SLOCKED | LA_RECURSED:
2377	case LA_SLOCKED | LA_NOTRECURSED:
2378	case LA_XLOCKED:
2379	case LA_XLOCKED | LA_RECURSED:
2380	case LA_XLOCKED | LA_NOTRECURSED:
2381		if (instance == NULL) {
2382			kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2383			    class->lc_name, lock->lo_name,
2384			    fixup_filename(file), line);
2385			break;
2386		}
2387		if ((flags & LA_XLOCKED) != 0 &&
2388		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2389			kassert_panic(
2390			    "Lock (%s) %s not exclusively locked @ %s:%d.",
2391			    class->lc_name, lock->lo_name,
2392			    fixup_filename(file), line);
2393		if ((flags & LA_SLOCKED) != 0 &&
2394		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2395			kassert_panic(
2396			    "Lock (%s) %s exclusively locked @ %s:%d.",
2397			    class->lc_name, lock->lo_name,
2398			    fixup_filename(file), line);
2399		if ((flags & LA_RECURSED) != 0 &&
2400		    (instance->li_flags & LI_RECURSEMASK) == 0)
2401			kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2402			    class->lc_name, lock->lo_name,
2403			    fixup_filename(file), line);
2404		if ((flags & LA_NOTRECURSED) != 0 &&
2405		    (instance->li_flags & LI_RECURSEMASK) != 0)
2406			kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2407			    class->lc_name, lock->lo_name,
2408			    fixup_filename(file), line);
2409		break;
2410	default:
2411		kassert_panic("Invalid lock assertion at %s:%d.",
2412		    fixup_filename(file), line);
2413
2414	}
2415#endif	/* INVARIANT_SUPPORT */
2416}
2417
2418static void
2419witness_setflag(struct lock_object *lock, int flag, int set)
2420{
2421	struct lock_list_entry *lock_list;
2422	struct lock_instance *instance;
2423	struct lock_class *class;
2424
2425	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2426		return;
2427	class = LOCK_CLASS(lock);
2428	if (class->lc_flags & LC_SLEEPLOCK)
2429		lock_list = curthread->td_sleeplocks;
2430	else {
2431		if (witness_skipspin)
2432			return;
2433		lock_list = PCPU_GET(spinlocks);
2434	}
2435	instance = find_instance(lock_list, lock);
2436	if (instance == NULL) {
2437		kassert_panic("%s: lock (%s) %s not locked", __func__,
2438		    class->lc_name, lock->lo_name);
2439		return;
2440	}
2441
2442	if (set)
2443		instance->li_flags |= flag;
2444	else
2445		instance->li_flags &= ~flag;
2446}
2447
2448void
2449witness_norelease(struct lock_object *lock)
2450{
2451
2452	witness_setflag(lock, LI_NORELEASE, 1);
2453}
2454
2455void
2456witness_releaseok(struct lock_object *lock)
2457{
2458
2459	witness_setflag(lock, LI_NORELEASE, 0);
2460}
2461
2462#ifdef DDB
2463static void
2464witness_ddb_list(struct thread *td)
2465{
2466
2467	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2468	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2469
2470	if (witness_watch < 1)
2471		return;
2472
2473	witness_list_locks(&td->td_sleeplocks, db_printf);
2474
2475	/*
2476	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2477	 * if td is currently executing on some other CPU and holds spin locks
2478	 * as we won't display those locks.  If we had a MI way of getting
2479	 * the per-cpu data for a given cpu then we could use
2480	 * td->td_oncpu to get the list of spinlocks for this thread
2481	 * and "fix" this.
2482	 *
2483	 * That still wouldn't really fix this unless we locked the scheduler
2484	 * lock or stopped the other CPU to make sure it wasn't changing the
2485	 * list out from under us.  It is probably best to just not try to
2486	 * handle threads on other CPU's for now.
2487	 */
2488	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2489		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2490}
2491
2492DB_SHOW_COMMAND(locks, db_witness_list)
2493{
2494	struct thread *td;
2495
2496	if (have_addr)
2497		td = db_lookup_thread(addr, true);
2498	else
2499		td = kdb_thread;
2500	witness_ddb_list(td);
2501}
2502
2503DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2504{
2505	struct thread *td;
2506	struct proc *p;
2507
2508	/*
2509	 * It would be nice to list only threads and processes that actually
2510	 * held sleep locks, but that information is currently not exported
2511	 * by WITNESS.
2512	 */
2513	FOREACH_PROC_IN_SYSTEM(p) {
2514		if (!witness_proc_has_locks(p))
2515			continue;
2516		FOREACH_THREAD_IN_PROC(p, td) {
2517			if (!witness_thread_has_locks(td))
2518				continue;
2519			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2520			    p->p_comm, td, td->td_tid);
2521			witness_ddb_list(td);
2522			if (db_pager_quit)
2523				return;
2524		}
2525	}
2526}
2527DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2528
2529DB_SHOW_COMMAND(witness, db_witness_display)
2530{
2531
2532	witness_ddb_display(db_printf);
2533}
2534#endif
2535
2536static int
2537sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2538{
2539	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2540	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2541	struct sbuf *sb;
2542	u_int w_rmatrix1, w_rmatrix2;
2543	int error, generation, i, j;
2544
2545	tmp_data1 = NULL;
2546	tmp_data2 = NULL;
2547	tmp_w1 = NULL;
2548	tmp_w2 = NULL;
2549	if (witness_watch < 1) {
2550		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2551		return (error);
2552	}
2553	if (witness_cold) {
2554		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2555		return (error);
2556	}
2557	error = 0;
2558	sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2559	if (sb == NULL)
2560		return (ENOMEM);
2561
2562	/* Allocate and init temporary storage space. */
2563	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2564	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2565	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2566	    M_WAITOK | M_ZERO);
2567	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2568	    M_WAITOK | M_ZERO);
2569	stack_zero(&tmp_data1->wlod_stack);
2570	stack_zero(&tmp_data2->wlod_stack);
2571
2572restart:
2573	mtx_lock_spin(&w_mtx);
2574	generation = w_generation;
2575	mtx_unlock_spin(&w_mtx);
2576	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2577	    w_lohash.wloh_count);
2578	for (i = 1; i < w_max_used_index; i++) {
2579		mtx_lock_spin(&w_mtx);
2580		if (generation != w_generation) {
2581			mtx_unlock_spin(&w_mtx);
2582
2583			/* The graph has changed, try again. */
2584			req->oldidx = 0;
2585			sbuf_clear(sb);
2586			goto restart;
2587		}
2588
2589		w1 = &w_data[i];
2590		if (w1->w_reversed == 0) {
2591			mtx_unlock_spin(&w_mtx);
2592			continue;
2593		}
2594
2595		/* Copy w1 locally so we can release the spin lock. */
2596		*tmp_w1 = *w1;
2597		mtx_unlock_spin(&w_mtx);
2598
2599		if (tmp_w1->w_reversed == 0)
2600			continue;
2601		for (j = 1; j < w_max_used_index; j++) {
2602			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2603				continue;
2604
2605			mtx_lock_spin(&w_mtx);
2606			if (generation != w_generation) {
2607				mtx_unlock_spin(&w_mtx);
2608
2609				/* The graph has changed, try again. */
2610				req->oldidx = 0;
2611				sbuf_clear(sb);
2612				goto restart;
2613			}
2614
2615			w2 = &w_data[j];
2616			data1 = witness_lock_order_get(w1, w2);
2617			data2 = witness_lock_order_get(w2, w1);
2618
2619			/*
2620			 * Copy information locally so we can release the
2621			 * spin lock.
2622			 */
2623			*tmp_w2 = *w2;
2624			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2625			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2626
2627			if (data1) {
2628				stack_zero(&tmp_data1->wlod_stack);
2629				stack_copy(&data1->wlod_stack,
2630				    &tmp_data1->wlod_stack);
2631			}
2632			if (data2 && data2 != data1) {
2633				stack_zero(&tmp_data2->wlod_stack);
2634				stack_copy(&data2->wlod_stack,
2635				    &tmp_data2->wlod_stack);
2636			}
2637			mtx_unlock_spin(&w_mtx);
2638
2639			sbuf_printf(sb,
2640	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2641			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2642			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2643			if (data1) {
2644				sbuf_printf(sb,
2645			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2646				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2647				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2648				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2649				sbuf_printf(sb, "\n");
2650			}
2651			if (data2 && data2 != data1) {
2652				sbuf_printf(sb,
2653			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2654				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2655				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2656				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2657				sbuf_printf(sb, "\n");
2658			}
2659		}
2660	}
2661	mtx_lock_spin(&w_mtx);
2662	if (generation != w_generation) {
2663		mtx_unlock_spin(&w_mtx);
2664
2665		/*
2666		 * The graph changed while we were printing stack data,
2667		 * try again.
2668		 */
2669		req->oldidx = 0;
2670		sbuf_clear(sb);
2671		goto restart;
2672	}
2673	mtx_unlock_spin(&w_mtx);
2674
2675	/* Free temporary storage space. */
2676	free(tmp_data1, M_TEMP);
2677	free(tmp_data2, M_TEMP);
2678	free(tmp_w1, M_TEMP);
2679	free(tmp_w2, M_TEMP);
2680
2681	sbuf_finish(sb);
2682	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2683	sbuf_delete(sb);
2684
2685	return (error);
2686}
2687
2688static int
2689sysctl_debug_witness_channel(SYSCTL_HANDLER_ARGS)
2690{
2691	static const struct {
2692		enum witness_channel channel;
2693		const char *name;
2694	} channels[] = {
2695		{ WITNESS_CONSOLE, "console" },
2696		{ WITNESS_LOG, "log" },
2697		{ WITNESS_NONE, "none" },
2698	};
2699	char buf[16];
2700	u_int i;
2701	int error;
2702
2703	buf[0] = '\0';
2704	for (i = 0; i < nitems(channels); i++)
2705		if (witness_channel == channels[i].channel) {
2706			snprintf(buf, sizeof(buf), "%s", channels[i].name);
2707			break;
2708		}
2709
2710	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2711	if (error != 0 || req->newptr == NULL)
2712		return (error);
2713
2714	error = EINVAL;
2715	for (i = 0; i < nitems(channels); i++)
2716		if (strcmp(channels[i].name, buf) == 0) {
2717			witness_channel = channels[i].channel;
2718			error = 0;
2719			break;
2720		}
2721	return (error);
2722}
2723
2724static int
2725sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2726{
2727	struct witness *w;
2728	struct sbuf *sb;
2729	int error;
2730
2731	if (witness_watch < 1) {
2732		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2733		return (error);
2734	}
2735	if (witness_cold) {
2736		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2737		return (error);
2738	}
2739	error = 0;
2740
2741	error = sysctl_wire_old_buffer(req, 0);
2742	if (error != 0)
2743		return (error);
2744	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2745	if (sb == NULL)
2746		return (ENOMEM);
2747	sbuf_printf(sb, "\n");
2748
2749	mtx_lock_spin(&w_mtx);
2750	STAILQ_FOREACH(w, &w_all, w_list)
2751		w->w_displayed = 0;
2752	STAILQ_FOREACH(w, &w_all, w_list)
2753		witness_add_fullgraph(sb, w);
2754	mtx_unlock_spin(&w_mtx);
2755
2756	/*
2757	 * Close the sbuf and return to userland.
2758	 */
2759	error = sbuf_finish(sb);
2760	sbuf_delete(sb);
2761
2762	return (error);
2763}
2764
2765static int
2766sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2767{
2768	int error, value;
2769
2770	value = witness_watch;
2771	error = sysctl_handle_int(oidp, &value, 0, req);
2772	if (error != 0 || req->newptr == NULL)
2773		return (error);
2774	if (value > 1 || value < -1 ||
2775	    (witness_watch == -1 && value != witness_watch))
2776		return (EINVAL);
2777	witness_watch = value;
2778	return (0);
2779}
2780
2781static void
2782witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2783{
2784	int i;
2785
2786	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2787		return;
2788	w->w_displayed = 1;
2789
2790	WITNESS_INDEX_ASSERT(w->w_index);
2791	for (i = 1; i <= w_max_used_index; i++) {
2792		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2793			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2794			    w_data[i].w_name);
2795			witness_add_fullgraph(sb, &w_data[i]);
2796		}
2797	}
2798}
2799
2800/*
2801 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2802 * interprets the key as a string and reads until the null
2803 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2804 * hash value computed from the key.
2805 */
2806static uint32_t
2807witness_hash_djb2(const uint8_t *key, uint32_t size)
2808{
2809	unsigned int hash = 5381;
2810	int i;
2811
2812	/* hash = hash * 33 + key[i] */
2813	if (size)
2814		for (i = 0; i < size; i++)
2815			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2816	else
2817		for (i = 0; key[i] != 0; i++)
2818			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2819
2820	return (hash);
2821}
2822
2823
2824/*
2825 * Initializes the two witness hash tables. Called exactly once from
2826 * witness_initialize().
2827 */
2828static void
2829witness_init_hash_tables(void)
2830{
2831	int i;
2832
2833	MPASS(witness_cold);
2834
2835	/* Initialize the hash tables. */
2836	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2837		w_hash.wh_array[i] = NULL;
2838
2839	w_hash.wh_size = WITNESS_HASH_SIZE;
2840	w_hash.wh_count = 0;
2841
2842	/* Initialize the lock order data hash. */
2843	w_lofree = NULL;
2844	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2845		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2846		w_lodata[i].wlod_next = w_lofree;
2847		w_lofree = &w_lodata[i];
2848	}
2849	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2850	w_lohash.wloh_count = 0;
2851	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2852		w_lohash.wloh_array[i] = NULL;
2853}
2854
2855static struct witness *
2856witness_hash_get(const char *key)
2857{
2858	struct witness *w;
2859	uint32_t hash;
2860
2861	MPASS(key != NULL);
2862	if (witness_cold == 0)
2863		mtx_assert(&w_mtx, MA_OWNED);
2864	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2865	w = w_hash.wh_array[hash];
2866	while (w != NULL) {
2867		if (strcmp(w->w_name, key) == 0)
2868			goto out;
2869		w = w->w_hash_next;
2870	}
2871
2872out:
2873	return (w);
2874}
2875
2876static void
2877witness_hash_put(struct witness *w)
2878{
2879	uint32_t hash;
2880
2881	MPASS(w != NULL);
2882	MPASS(w->w_name != NULL);
2883	if (witness_cold == 0)
2884		mtx_assert(&w_mtx, MA_OWNED);
2885	KASSERT(witness_hash_get(w->w_name) == NULL,
2886	    ("%s: trying to add a hash entry that already exists!", __func__));
2887	KASSERT(w->w_hash_next == NULL,
2888	    ("%s: w->w_hash_next != NULL", __func__));
2889
2890	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2891	w->w_hash_next = w_hash.wh_array[hash];
2892	w_hash.wh_array[hash] = w;
2893	w_hash.wh_count++;
2894}
2895
2896
2897static struct witness_lock_order_data *
2898witness_lock_order_get(struct witness *parent, struct witness *child)
2899{
2900	struct witness_lock_order_data *data = NULL;
2901	struct witness_lock_order_key key;
2902	unsigned int hash;
2903
2904	MPASS(parent != NULL && child != NULL);
2905	key.from = parent->w_index;
2906	key.to = child->w_index;
2907	WITNESS_INDEX_ASSERT(key.from);
2908	WITNESS_INDEX_ASSERT(key.to);
2909	if ((w_rmatrix[parent->w_index][child->w_index]
2910	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2911		goto out;
2912
2913	hash = witness_hash_djb2((const char*)&key,
2914	    sizeof(key)) % w_lohash.wloh_size;
2915	data = w_lohash.wloh_array[hash];
2916	while (data != NULL) {
2917		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2918			break;
2919		data = data->wlod_next;
2920	}
2921
2922out:
2923	return (data);
2924}
2925
2926/*
2927 * Verify that parent and child have a known relationship, are not the same,
2928 * and child is actually a child of parent.  This is done without w_mtx
2929 * to avoid contention in the common case.
2930 */
2931static int
2932witness_lock_order_check(struct witness *parent, struct witness *child)
2933{
2934
2935	if (parent != child &&
2936	    w_rmatrix[parent->w_index][child->w_index]
2937	    & WITNESS_LOCK_ORDER_KNOWN &&
2938	    isitmychild(parent, child))
2939		return (1);
2940
2941	return (0);
2942}
2943
2944static int
2945witness_lock_order_add(struct witness *parent, struct witness *child)
2946{
2947	struct witness_lock_order_data *data = NULL;
2948	struct witness_lock_order_key key;
2949	unsigned int hash;
2950
2951	MPASS(parent != NULL && child != NULL);
2952	key.from = parent->w_index;
2953	key.to = child->w_index;
2954	WITNESS_INDEX_ASSERT(key.from);
2955	WITNESS_INDEX_ASSERT(key.to);
2956	if (w_rmatrix[parent->w_index][child->w_index]
2957	    & WITNESS_LOCK_ORDER_KNOWN)
2958		return (1);
2959
2960	hash = witness_hash_djb2((const char*)&key,
2961	    sizeof(key)) % w_lohash.wloh_size;
2962	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2963	data = w_lofree;
2964	if (data == NULL)
2965		return (0);
2966	w_lofree = data->wlod_next;
2967	data->wlod_next = w_lohash.wloh_array[hash];
2968	data->wlod_key = key;
2969	w_lohash.wloh_array[hash] = data;
2970	w_lohash.wloh_count++;
2971	stack_zero(&data->wlod_stack);
2972	stack_save(&data->wlod_stack);
2973	return (1);
2974}
2975
2976/* Call this whenver the structure of the witness graph changes. */
2977static void
2978witness_increment_graph_generation(void)
2979{
2980
2981	if (witness_cold == 0)
2982		mtx_assert(&w_mtx, MA_OWNED);
2983	w_generation++;
2984}
2985
2986static int
2987witness_output_drain(void *arg __unused, const char *data, int len)
2988{
2989
2990	witness_output("%.*s", len, data);
2991	return (len);
2992}
2993
2994static void
2995witness_debugger(int cond, const char *msg)
2996{
2997	char buf[32];
2998	struct sbuf sb;
2999	struct stack st;
3000
3001	if (!cond)
3002		return;
3003
3004	if (witness_trace) {
3005		sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
3006		sbuf_set_drain(&sb, witness_output_drain, NULL);
3007
3008		stack_zero(&st);
3009		stack_save(&st);
3010		witness_output("stack backtrace:\n");
3011		stack_sbuf_print_ddb(&sb, &st);
3012
3013		sbuf_finish(&sb);
3014	}
3015
3016#ifdef KDB
3017	if (witness_kdb)
3018		kdb_enter(KDB_WHY_WITNESS, msg);
3019#endif
3020}
3021