subr_witness.c revision 207922
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 *    promote products derived from this software without specific prior
17 *    written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier.  Originally implemented for
37 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 *	Main Entry: witness
43 *	Pronunciation: 'wit-n&s
44 *	Function: noun
45 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46 *	    testimony, witness, from 2wit
47 *	Date: before 12th century
48 *	1 : attestation of a fact or event : TESTIMONY
49 *	2 : one that gives evidence; specifically : one who testifies in
50 *	    a cause or before a judicial tribunal
51 *	3 : one asked to be present at a transaction so as to be able to
52 *	    testify to its having taken place
53 *	4 : one who has personal knowledge of something
54 *	5 a : something serving as evidence or proof : SIGN
55 *	  b : public affirmation by word or example of usually
56 *	      religious faith or conviction <the heroic witness to divine
57 *	      life -- Pilot>
58 *	6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes.  Stated another way,
65 *    no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl().  Basically, when a thread sleeps, it must release
71 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious.  Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock.  The second
79 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute.  Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 207922 2010-05-11 17:01:14Z attilio $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define	KTR_WITNESS	KTR_SUBSYS
124#else
125#define	KTR_WITNESS	0
126#endif
127
128#define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129#define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130#define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#define	WITNESS_COUNT 		1024
136#define	WITNESS_CHILDCOUNT 	(WITNESS_COUNT * 4)
137#define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
138#define	WITNESS_PENDLIST	512
139
140/* Allocate 256 KB of stack data space */
141#define	WITNESS_LO_DATA_COUNT	2048
142
143/* Prime, gives load factor of ~2 at full load */
144#define	WITNESS_LO_HASH_SIZE	1021
145
146/*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151#define	LOCK_NCHILDREN	5
152#define	LOCK_CHILDCOUNT	2048
153
154#define	MAX_W_NAME	64
155
156#define	BADSTACK_SBUF_SIZE	(256 * WITNESS_COUNT)
157#define	CYCLEGRAPH_SBUF_SIZE	8192
158#define	FULLGRAPH_SBUF_SIZE	32768
159
160/*
161 * These flags go in the witness relationship matrix and describe the
162 * relationship between any two struct witness objects.
163 */
164#define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
165#define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
166#define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
167#define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
168#define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
169#define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
170#define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
171#define	WITNESS_RELATED_MASK						\
172	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173#define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
174					  * observed. */
175#define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
176#define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
177#define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
178
179/* Descendant to ancestor flags */
180#define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
181
182/* Ancestor to descendant flags */
183#define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
184
185#define	WITNESS_INDEX_ASSERT(i)						\
186	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
187
188MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
189
190/*
191 * Lock instances.  A lock instance is the data associated with a lock while
192 * it is held by witness.  For example, a lock instance will hold the
193 * recursion count of a lock.  Lock instances are held in lists.  Spin locks
194 * are held in a per-cpu list while sleep locks are held in per-thread list.
195 */
196struct lock_instance {
197	struct lock_object	*li_lock;
198	const char		*li_file;
199	int			li_line;
200	u_int			li_flags;
201};
202
203/*
204 * A simple list type used to build the list of locks held by a thread
205 * or CPU.  We can't simply embed the list in struct lock_object since a
206 * lock may be held by more than one thread if it is a shared lock.  Locks
207 * are added to the head of the list, so we fill up each list entry from
208 * "the back" logically.  To ease some of the arithmetic, we actually fill
209 * in each list entry the normal way (children[0] then children[1], etc.) but
210 * when we traverse the list we read children[count-1] as the first entry
211 * down to children[0] as the final entry.
212 */
213struct lock_list_entry {
214	struct lock_list_entry	*ll_next;
215	struct lock_instance	ll_children[LOCK_NCHILDREN];
216	u_int			ll_count;
217};
218
219/*
220 * The main witness structure. One of these per named lock type in the system
221 * (for example, "vnode interlock").
222 */
223struct witness {
224	char  			w_name[MAX_W_NAME];
225	uint32_t 		w_index;  /* Index in the relationship matrix */
226	struct lock_class	*w_class;
227	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
228	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
229	struct witness		*w_hash_next; /* Linked list in hash buckets. */
230	const char		*w_file; /* File where last acquired */
231	uint32_t 		w_line; /* Line where last acquired */
232	uint32_t 		w_refcount;
233	uint16_t 		w_num_ancestors; /* direct/indirect
234						  * ancestor count */
235	uint16_t 		w_num_descendants; /* direct/indirect
236						    * descendant count */
237	int16_t 		w_ddb_level;
238	unsigned		w_displayed:1;
239	unsigned		w_reversed:1;
240};
241
242STAILQ_HEAD(witness_list, witness);
243
244/*
245 * The witness hash table. Keys are witness names (const char *), elements are
246 * witness objects (struct witness *).
247 */
248struct witness_hash {
249	struct witness	*wh_array[WITNESS_HASH_SIZE];
250	uint32_t	wh_size;
251	uint32_t	wh_count;
252};
253
254/*
255 * Key type for the lock order data hash table.
256 */
257struct witness_lock_order_key {
258	uint16_t	from;
259	uint16_t	to;
260};
261
262struct witness_lock_order_data {
263	struct stack			wlod_stack;
264	struct witness_lock_order_key	wlod_key;
265	struct witness_lock_order_data	*wlod_next;
266};
267
268/*
269 * The witness lock order data hash table. Keys are witness index tuples
270 * (struct witness_lock_order_key), elements are lock order data objects
271 * (struct witness_lock_order_data).
272 */
273struct witness_lock_order_hash {
274	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
275	u_int	wloh_size;
276	u_int	wloh_count;
277};
278
279#ifdef BLESSING
280struct witness_blessed {
281	const char	*b_lock1;
282	const char	*b_lock2;
283};
284#endif
285
286struct witness_pendhelp {
287	const char		*wh_type;
288	struct lock_object	*wh_lock;
289};
290
291struct witness_order_list_entry {
292	const char		*w_name;
293	struct lock_class	*w_class;
294};
295
296/*
297 * Returns 0 if one of the locks is a spin lock and the other is not.
298 * Returns 1 otherwise.
299 */
300static __inline int
301witness_lock_type_equal(struct witness *w1, struct witness *w2)
302{
303
304	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
305		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
306}
307
308static __inline int
309witness_lock_order_key_empty(const struct witness_lock_order_key *key)
310{
311
312	return (key->from == 0 && key->to == 0);
313}
314
315static __inline int
316witness_lock_order_key_equal(const struct witness_lock_order_key *a,
317    const struct witness_lock_order_key *b)
318{
319
320	return (a->from == b->from && a->to == b->to);
321}
322
323static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
324		    const char *fname);
325#ifdef KDB
326static void	_witness_debugger(int cond, const char *msg);
327#endif
328static void	adopt(struct witness *parent, struct witness *child);
329#ifdef BLESSING
330static int	blessed(struct witness *, struct witness *);
331#endif
332static void	depart(struct witness *w);
333static struct witness	*enroll(const char *description,
334			    struct lock_class *lock_class);
335static struct lock_instance	*find_instance(struct lock_list_entry *list,
336				    struct lock_object *lock);
337static int	isitmychild(struct witness *parent, struct witness *child);
338static int	isitmydescendant(struct witness *parent, struct witness *child);
339static void	itismychild(struct witness *parent, struct witness *child);
340static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
341static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
342static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
343static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
344#ifdef DDB
345static void	witness_ddb_compute_levels(void);
346static void	witness_ddb_display(int(*)(const char *fmt, ...));
347static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
348		    struct witness *, int indent);
349static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
350		    struct witness_list *list);
351static void	witness_ddb_level_descendants(struct witness *parent, int l);
352static void	witness_ddb_list(struct thread *td);
353#endif
354static void	witness_free(struct witness *m);
355static struct witness	*witness_get(void);
356static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
357static struct witness	*witness_hash_get(const char *key);
358static void	witness_hash_put(struct witness *w);
359static void	witness_init_hash_tables(void);
360static void	witness_increment_graph_generation(void);
361static void	witness_lock_list_free(struct lock_list_entry *lle);
362static struct lock_list_entry	*witness_lock_list_get(void);
363static int	witness_lock_order_add(struct witness *parent,
364		    struct witness *child);
365static int	witness_lock_order_check(struct witness *parent,
366		    struct witness *child);
367static struct witness_lock_order_data	*witness_lock_order_get(
368					    struct witness *parent,
369					    struct witness *child);
370static void	witness_list_lock(struct lock_instance *instance);
371static void	witness_setflag(struct lock_object *lock, int flag, int set);
372
373#ifdef KDB
374#define	witness_debugger(c)	_witness_debugger(c, __func__)
375#else
376#define	witness_debugger(c)
377#endif
378
379SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking");
380
381/*
382 * If set to 0, lock order checking is disabled.  If set to -1,
383 * witness is completely disabled.  Otherwise witness performs full
384 * lock order checking for all locks.  At runtime, lock order checking
385 * may be toggled.  However, witness cannot be reenabled once it is
386 * completely disabled.
387 */
388static int witness_watch = 1;
389TUNABLE_INT("debug.witness.watch", &witness_watch);
390SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
391    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
392
393#ifdef KDB
394/*
395 * When KDB is enabled and witness_kdb is 1, it will cause the system
396 * to drop into kdebug() when:
397 *	- a lock hierarchy violation occurs
398 *	- locks are held when going to sleep.
399 */
400#ifdef WITNESS_KDB
401int	witness_kdb = 1;
402#else
403int	witness_kdb = 0;
404#endif
405TUNABLE_INT("debug.witness.kdb", &witness_kdb);
406SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
407
408/*
409 * When KDB is enabled and witness_trace is 1, it will cause the system
410 * to print a stack trace:
411 *	- a lock hierarchy violation occurs
412 *	- locks are held when going to sleep.
413 */
414int	witness_trace = 1;
415TUNABLE_INT("debug.witness.trace", &witness_trace);
416SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
417#endif /* KDB */
418
419#ifdef WITNESS_SKIPSPIN
420int	witness_skipspin = 1;
421#else
422int	witness_skipspin = 0;
423#endif
424TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
425SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
426    0, "");
427
428/*
429 * Call this to print out the relations between locks.
430 */
431SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
432    NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
433
434/*
435 * Call this to print out the witness faulty stacks.
436 */
437SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
438    NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
439
440static struct mtx w_mtx;
441
442/* w_list */
443static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
444static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
445
446/* w_typelist */
447static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
448static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
449
450/* lock list */
451static struct lock_list_entry *w_lock_list_free = NULL;
452static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
453static u_int pending_cnt;
454
455static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
456SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
457SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
458SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
459    "");
460
461static struct witness *w_data;
462static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
463static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
464static struct witness_hash w_hash;	/* The witness hash table. */
465
466/* The lock order data hash */
467static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
468static struct witness_lock_order_data *w_lofree = NULL;
469static struct witness_lock_order_hash w_lohash;
470static int w_max_used_index = 0;
471static unsigned int w_generation = 0;
472static const char w_notrunning[] = "Witness not running\n";
473static const char w_stillcold[] = "Witness is still cold\n";
474
475
476static struct witness_order_list_entry order_lists[] = {
477	/*
478	 * sx locks
479	 */
480	{ "proctree", &lock_class_sx },
481	{ "allproc", &lock_class_sx },
482	{ "allprison", &lock_class_sx },
483	{ NULL, NULL },
484	/*
485	 * Various mutexes
486	 */
487	{ "Giant", &lock_class_mtx_sleep },
488	{ "pipe mutex", &lock_class_mtx_sleep },
489	{ "sigio lock", &lock_class_mtx_sleep },
490	{ "process group", &lock_class_mtx_sleep },
491	{ "process lock", &lock_class_mtx_sleep },
492	{ "session", &lock_class_mtx_sleep },
493	{ "uidinfo hash", &lock_class_rw },
494#ifdef	HWPMC_HOOKS
495	{ "pmc-sleep", &lock_class_mtx_sleep },
496#endif
497	{ NULL, NULL },
498	/*
499	 * Sockets
500	 */
501	{ "accept", &lock_class_mtx_sleep },
502	{ "so_snd", &lock_class_mtx_sleep },
503	{ "so_rcv", &lock_class_mtx_sleep },
504	{ "sellck", &lock_class_mtx_sleep },
505	{ NULL, NULL },
506	/*
507	 * Routing
508	 */
509	{ "so_rcv", &lock_class_mtx_sleep },
510	{ "radix node head", &lock_class_rw },
511	{ "rtentry", &lock_class_mtx_sleep },
512	{ "ifaddr", &lock_class_mtx_sleep },
513	{ NULL, NULL },
514	/*
515	 * IPv4 multicast:
516	 * protocol locks before interface locks, after UDP locks.
517	 */
518	{ "udpinp", &lock_class_rw },
519	{ "in_multi_mtx", &lock_class_mtx_sleep },
520	{ "igmp_mtx", &lock_class_mtx_sleep },
521	{ "if_addr_mtx", &lock_class_mtx_sleep },
522	{ NULL, NULL },
523	/*
524	 * IPv6 multicast:
525	 * protocol locks before interface locks, after UDP locks.
526	 */
527	{ "udpinp", &lock_class_rw },
528	{ "in6_multi_mtx", &lock_class_mtx_sleep },
529	{ "mld_mtx", &lock_class_mtx_sleep },
530	{ "if_addr_mtx", &lock_class_mtx_sleep },
531	{ NULL, NULL },
532	/*
533	 * UNIX Domain Sockets
534	 */
535	{ "unp_global_rwlock", &lock_class_rw },
536	{ "unp_list_lock", &lock_class_mtx_sleep },
537	{ "unp", &lock_class_mtx_sleep },
538	{ "so_snd", &lock_class_mtx_sleep },
539	{ NULL, NULL },
540	/*
541	 * UDP/IP
542	 */
543	{ "udp", &lock_class_rw },
544	{ "udpinp", &lock_class_rw },
545	{ "so_snd", &lock_class_mtx_sleep },
546	{ NULL, NULL },
547	/*
548	 * TCP/IP
549	 */
550	{ "tcp", &lock_class_rw },
551	{ "tcpinp", &lock_class_rw },
552	{ "so_snd", &lock_class_mtx_sleep },
553	{ NULL, NULL },
554	/*
555	 * netatalk
556	 */
557	{ "ddp_list_mtx", &lock_class_mtx_sleep },
558	{ "ddp_mtx", &lock_class_mtx_sleep },
559	{ NULL, NULL },
560	/*
561	 * BPF
562	 */
563	{ "bpf global lock", &lock_class_mtx_sleep },
564	{ "bpf interface lock", &lock_class_mtx_sleep },
565	{ "bpf cdev lock", &lock_class_mtx_sleep },
566	{ NULL, NULL },
567	/*
568	 * NFS server
569	 */
570	{ "nfsd_mtx", &lock_class_mtx_sleep },
571	{ "so_snd", &lock_class_mtx_sleep },
572	{ NULL, NULL },
573
574	/*
575	 * IEEE 802.11
576	 */
577	{ "802.11 com lock", &lock_class_mtx_sleep},
578	{ NULL, NULL },
579	/*
580	 * Network drivers
581	 */
582	{ "network driver", &lock_class_mtx_sleep},
583	{ NULL, NULL },
584
585	/*
586	 * Netgraph
587	 */
588	{ "ng_node", &lock_class_mtx_sleep },
589	{ "ng_worklist", &lock_class_mtx_sleep },
590	{ NULL, NULL },
591	/*
592	 * CDEV
593	 */
594	{ "system map", &lock_class_mtx_sleep },
595	{ "vm page queue mutex", &lock_class_mtx_sleep },
596	{ "vnode interlock", &lock_class_mtx_sleep },
597	{ "cdev", &lock_class_mtx_sleep },
598	{ NULL, NULL },
599	/*
600	 * VM
601	 *
602	 */
603	{ "vm object", &lock_class_mtx_sleep },
604	{ "page lock", &lock_class_mtx_sleep },
605	{ "vm page queue mutex", &lock_class_mtx_sleep },
606	{ "pmap", &lock_class_mtx_sleep },
607	{ NULL, NULL },
608	/*
609	 * kqueue/VFS interaction
610	 */
611	{ "kqueue", &lock_class_mtx_sleep },
612	{ "struct mount mtx", &lock_class_mtx_sleep },
613	{ "vnode interlock", &lock_class_mtx_sleep },
614	{ NULL, NULL },
615	/*
616	 * ZFS locking
617	 */
618	{ "dn->dn_mtx", &lock_class_sx },
619	{ "dr->dt.di.dr_mtx", &lock_class_sx },
620	{ "db->db_mtx", &lock_class_sx },
621	{ NULL, NULL },
622	/*
623	 * spin locks
624	 */
625#ifdef SMP
626	{ "ap boot", &lock_class_mtx_spin },
627#endif
628	{ "rm.mutex_mtx", &lock_class_mtx_spin },
629	{ "sio", &lock_class_mtx_spin },
630	{ "scrlock", &lock_class_mtx_spin },
631#ifdef __i386__
632	{ "cy", &lock_class_mtx_spin },
633#endif
634#ifdef __sparc64__
635	{ "pcib_mtx", &lock_class_mtx_spin },
636	{ "rtc_mtx", &lock_class_mtx_spin },
637#endif
638	{ "scc_hwmtx", &lock_class_mtx_spin },
639	{ "uart_hwmtx", &lock_class_mtx_spin },
640	{ "fast_taskqueue", &lock_class_mtx_spin },
641	{ "intr table", &lock_class_mtx_spin },
642#ifdef	HWPMC_HOOKS
643	{ "pmc-per-proc", &lock_class_mtx_spin },
644#endif
645	{ "process slock", &lock_class_mtx_spin },
646	{ "sleepq chain", &lock_class_mtx_spin },
647	{ "umtx lock", &lock_class_mtx_spin },
648	{ "rm_spinlock", &lock_class_mtx_spin },
649	{ "turnstile chain", &lock_class_mtx_spin },
650	{ "turnstile lock", &lock_class_mtx_spin },
651	{ "sched lock", &lock_class_mtx_spin },
652	{ "td_contested", &lock_class_mtx_spin },
653	{ "callout", &lock_class_mtx_spin },
654	{ "entropy harvest mutex", &lock_class_mtx_spin },
655	{ "syscons video lock", &lock_class_mtx_spin },
656	{ "time lock", &lock_class_mtx_spin },
657#ifdef SMP
658	{ "smp rendezvous", &lock_class_mtx_spin },
659#endif
660#ifdef __powerpc__
661	{ "tlb0", &lock_class_mtx_spin },
662#endif
663	/*
664	 * leaf locks
665	 */
666	{ "intrcnt", &lock_class_mtx_spin },
667	{ "icu", &lock_class_mtx_spin },
668#if defined(SMP) && defined(__sparc64__)
669	{ "ipi", &lock_class_mtx_spin },
670#endif
671#ifdef __i386__
672	{ "allpmaps", &lock_class_mtx_spin },
673	{ "descriptor tables", &lock_class_mtx_spin },
674#endif
675	{ "clk", &lock_class_mtx_spin },
676	{ "cpuset", &lock_class_mtx_spin },
677	{ "mprof lock", &lock_class_mtx_spin },
678	{ "zombie lock", &lock_class_mtx_spin },
679	{ "ALD Queue", &lock_class_mtx_spin },
680#ifdef __ia64__
681	{ "MCA spin lock", &lock_class_mtx_spin },
682#endif
683#if defined(__i386__) || defined(__amd64__)
684	{ "pcicfg", &lock_class_mtx_spin },
685	{ "NDIS thread lock", &lock_class_mtx_spin },
686#endif
687	{ "tw_osl_io_lock", &lock_class_mtx_spin },
688	{ "tw_osl_q_lock", &lock_class_mtx_spin },
689	{ "tw_cl_io_lock", &lock_class_mtx_spin },
690	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
691	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
692#ifdef	HWPMC_HOOKS
693	{ "pmc-leaf", &lock_class_mtx_spin },
694#endif
695	{ "blocked lock", &lock_class_mtx_spin },
696	{ NULL, NULL },
697	{ NULL, NULL }
698};
699
700#ifdef BLESSING
701/*
702 * Pairs of locks which have been blessed
703 * Don't complain about order problems with blessed locks
704 */
705static struct witness_blessed blessed_list[] = {
706};
707static int blessed_count =
708	sizeof(blessed_list) / sizeof(struct witness_blessed);
709#endif
710
711/*
712 * This global is set to 0 once it becomes safe to use the witness code.
713 */
714static int witness_cold = 1;
715
716/*
717 * This global is set to 1 once the static lock orders have been enrolled
718 * so that a warning can be issued for any spin locks enrolled later.
719 */
720static int witness_spin_warn = 0;
721
722/*
723 * The WITNESS-enabled diagnostic code.  Note that the witness code does
724 * assume that the early boot is single-threaded at least until after this
725 * routine is completed.
726 */
727static void
728witness_initialize(void *dummy __unused)
729{
730	struct lock_object *lock;
731	struct witness_order_list_entry *order;
732	struct witness *w, *w1;
733	int i;
734
735	w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
736	    M_NOWAIT | M_ZERO);
737
738	/*
739	 * We have to release Giant before initializing its witness
740	 * structure so that WITNESS doesn't get confused.
741	 */
742	mtx_unlock(&Giant);
743	mtx_assert(&Giant, MA_NOTOWNED);
744
745	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
746	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
747	    MTX_NOWITNESS | MTX_NOPROFILE);
748	for (i = WITNESS_COUNT - 1; i >= 0; i--) {
749		w = &w_data[i];
750		memset(w, 0, sizeof(*w));
751		w_data[i].w_index = i;	/* Witness index never changes. */
752		witness_free(w);
753	}
754	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
755	    ("%s: Invalid list of free witness objects", __func__));
756
757	/* Witness with index 0 is not used to aid in debugging. */
758	STAILQ_REMOVE_HEAD(&w_free, w_list);
759	w_free_cnt--;
760
761	memset(w_rmatrix, 0,
762	    (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
763
764	for (i = 0; i < LOCK_CHILDCOUNT; i++)
765		witness_lock_list_free(&w_locklistdata[i]);
766	witness_init_hash_tables();
767
768	/* First add in all the specified order lists. */
769	for (order = order_lists; order->w_name != NULL; order++) {
770		w = enroll(order->w_name, order->w_class);
771		if (w == NULL)
772			continue;
773		w->w_file = "order list";
774		for (order++; order->w_name != NULL; order++) {
775			w1 = enroll(order->w_name, order->w_class);
776			if (w1 == NULL)
777				continue;
778			w1->w_file = "order list";
779			itismychild(w, w1);
780			w = w1;
781		}
782	}
783	witness_spin_warn = 1;
784
785	/* Iterate through all locks and add them to witness. */
786	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
787		lock = pending_locks[i].wh_lock;
788		KASSERT(lock->lo_flags & LO_WITNESS,
789		    ("%s: lock %s is on pending list but not LO_WITNESS",
790		    __func__, lock->lo_name));
791		lock->lo_witness = enroll(pending_locks[i].wh_type,
792		    LOCK_CLASS(lock));
793	}
794
795	/* Mark the witness code as being ready for use. */
796	witness_cold = 0;
797
798	mtx_lock(&Giant);
799}
800SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
801    NULL);
802
803void
804witness_init(struct lock_object *lock, const char *type)
805{
806	struct lock_class *class;
807
808	/* Various sanity checks. */
809	class = LOCK_CLASS(lock);
810	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
811	    (class->lc_flags & LC_RECURSABLE) == 0)
812		panic("%s: lock (%s) %s can not be recursable", __func__,
813		    class->lc_name, lock->lo_name);
814	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
815	    (class->lc_flags & LC_SLEEPABLE) == 0)
816		panic("%s: lock (%s) %s can not be sleepable", __func__,
817		    class->lc_name, lock->lo_name);
818	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
819	    (class->lc_flags & LC_UPGRADABLE) == 0)
820		panic("%s: lock (%s) %s can not be upgradable", __func__,
821		    class->lc_name, lock->lo_name);
822
823	/*
824	 * If we shouldn't watch this lock, then just clear lo_witness.
825	 * Otherwise, if witness_cold is set, then it is too early to
826	 * enroll this lock, so defer it to witness_initialize() by adding
827	 * it to the pending_locks list.  If it is not too early, then enroll
828	 * the lock now.
829	 */
830	if (witness_watch < 1 || panicstr != NULL ||
831	    (lock->lo_flags & LO_WITNESS) == 0)
832		lock->lo_witness = NULL;
833	else if (witness_cold) {
834		pending_locks[pending_cnt].wh_lock = lock;
835		pending_locks[pending_cnt++].wh_type = type;
836		if (pending_cnt > WITNESS_PENDLIST)
837			panic("%s: pending locks list is too small, bump it\n",
838			    __func__);
839	} else
840		lock->lo_witness = enroll(type, class);
841}
842
843void
844witness_destroy(struct lock_object *lock)
845{
846	struct lock_class *class;
847	struct witness *w;
848
849	class = LOCK_CLASS(lock);
850
851	if (witness_cold)
852		panic("lock (%s) %s destroyed while witness_cold",
853		    class->lc_name, lock->lo_name);
854
855	/* XXX: need to verify that no one holds the lock */
856	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
857		return;
858	w = lock->lo_witness;
859
860	mtx_lock_spin(&w_mtx);
861	MPASS(w->w_refcount > 0);
862	w->w_refcount--;
863
864	if (w->w_refcount == 0)
865		depart(w);
866	mtx_unlock_spin(&w_mtx);
867}
868
869#ifdef DDB
870static void
871witness_ddb_compute_levels(void)
872{
873	struct witness *w;
874
875	/*
876	 * First clear all levels.
877	 */
878	STAILQ_FOREACH(w, &w_all, w_list)
879		w->w_ddb_level = -1;
880
881	/*
882	 * Look for locks with no parents and level all their descendants.
883	 */
884	STAILQ_FOREACH(w, &w_all, w_list) {
885
886		/* If the witness has ancestors (is not a root), skip it. */
887		if (w->w_num_ancestors > 0)
888			continue;
889		witness_ddb_level_descendants(w, 0);
890	}
891}
892
893static void
894witness_ddb_level_descendants(struct witness *w, int l)
895{
896	int i;
897
898	if (w->w_ddb_level >= l)
899		return;
900
901	w->w_ddb_level = l;
902	l++;
903
904	for (i = 1; i <= w_max_used_index; i++) {
905		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
906			witness_ddb_level_descendants(&w_data[i], l);
907	}
908}
909
910static void
911witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
912    struct witness *w, int indent)
913{
914	int i;
915
916 	for (i = 0; i < indent; i++)
917 		prnt(" ");
918	prnt("%s (type: %s, depth: %d, active refs: %d)",
919	     w->w_name, w->w_class->lc_name,
920	     w->w_ddb_level, w->w_refcount);
921 	if (w->w_displayed) {
922 		prnt(" -- (already displayed)\n");
923 		return;
924 	}
925 	w->w_displayed = 1;
926	if (w->w_file != NULL && w->w_line != 0)
927		prnt(" -- last acquired @ %s:%d\n", w->w_file,
928		    w->w_line);
929	else
930		prnt(" -- never acquired\n");
931	indent++;
932	WITNESS_INDEX_ASSERT(w->w_index);
933	for (i = 1; i <= w_max_used_index; i++) {
934		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
935			witness_ddb_display_descendants(prnt, &w_data[i],
936			    indent);
937	}
938}
939
940static void
941witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
942    struct witness_list *list)
943{
944	struct witness *w;
945
946	STAILQ_FOREACH(w, list, w_typelist) {
947		if (w->w_file == NULL || w->w_ddb_level > 0)
948			continue;
949
950		/* This lock has no anscestors - display its descendants. */
951		witness_ddb_display_descendants(prnt, w, 0);
952	}
953}
954
955static void
956witness_ddb_display(int(*prnt)(const char *fmt, ...))
957{
958	struct witness *w;
959
960	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
961	witness_ddb_compute_levels();
962
963	/* Clear all the displayed flags. */
964	STAILQ_FOREACH(w, &w_all, w_list)
965		w->w_displayed = 0;
966
967	/*
968	 * First, handle sleep locks which have been acquired at least
969	 * once.
970	 */
971	prnt("Sleep locks:\n");
972	witness_ddb_display_list(prnt, &w_sleep);
973
974	/*
975	 * Now do spin locks which have been acquired at least once.
976	 */
977	prnt("\nSpin locks:\n");
978	witness_ddb_display_list(prnt, &w_spin);
979
980	/*
981	 * Finally, any locks which have not been acquired yet.
982	 */
983	prnt("\nLocks which were never acquired:\n");
984	STAILQ_FOREACH(w, &w_all, w_list) {
985		if (w->w_file != NULL || w->w_refcount == 0)
986			continue;
987		prnt("%s (type: %s, depth: %d)\n", w->w_name,
988		    w->w_class->lc_name, w->w_ddb_level);
989	}
990}
991#endif /* DDB */
992
993/* Trim useless garbage from filenames. */
994static const char *
995fixup_filename(const char *file)
996{
997
998	if (file == NULL)
999		return (NULL);
1000	while (strncmp(file, "../", 3) == 0)
1001		file += 3;
1002	return (file);
1003}
1004
1005int
1006witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1007{
1008
1009	if (witness_watch == -1 || panicstr != NULL)
1010		return (0);
1011
1012	/* Require locks that witness knows about. */
1013	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1014	    lock2->lo_witness == NULL)
1015		return (EINVAL);
1016
1017	mtx_assert(&w_mtx, MA_NOTOWNED);
1018	mtx_lock_spin(&w_mtx);
1019
1020	/*
1021	 * If we already have either an explicit or implied lock order that
1022	 * is the other way around, then return an error.
1023	 */
1024	if (witness_watch &&
1025	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1026		mtx_unlock_spin(&w_mtx);
1027		return (EDOOFUS);
1028	}
1029
1030	/* Try to add the new order. */
1031	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1032	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1033	itismychild(lock1->lo_witness, lock2->lo_witness);
1034	mtx_unlock_spin(&w_mtx);
1035	return (0);
1036}
1037
1038void
1039witness_checkorder(struct lock_object *lock, int flags, const char *file,
1040    int line, struct lock_object *interlock)
1041{
1042	struct lock_list_entry *lock_list, *lle;
1043	struct lock_instance *lock1, *lock2, *plock;
1044	struct lock_class *class;
1045	struct witness *w, *w1;
1046	struct thread *td;
1047	int i, j;
1048
1049	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1050	    panicstr != NULL)
1051		return;
1052
1053	w = lock->lo_witness;
1054	class = LOCK_CLASS(lock);
1055	td = curthread;
1056	file = fixup_filename(file);
1057
1058	if (class->lc_flags & LC_SLEEPLOCK) {
1059
1060		/*
1061		 * Since spin locks include a critical section, this check
1062		 * implicitly enforces a lock order of all sleep locks before
1063		 * all spin locks.
1064		 */
1065		if (td->td_critnest != 0 && !kdb_active)
1066			panic("blockable sleep lock (%s) %s @ %s:%d",
1067			    class->lc_name, lock->lo_name, file, line);
1068
1069		/*
1070		 * If this is the first lock acquired then just return as
1071		 * no order checking is needed.
1072		 */
1073		lock_list = td->td_sleeplocks;
1074		if (lock_list == NULL || lock_list->ll_count == 0)
1075			return;
1076	} else {
1077
1078		/*
1079		 * If this is the first lock, just return as no order
1080		 * checking is needed.  Avoid problems with thread
1081		 * migration pinning the thread while checking if
1082		 * spinlocks are held.  If at least one spinlock is held
1083		 * the thread is in a safe path and it is allowed to
1084		 * unpin it.
1085		 */
1086		sched_pin();
1087		lock_list = PCPU_GET(spinlocks);
1088		if (lock_list == NULL || lock_list->ll_count == 0) {
1089			sched_unpin();
1090			return;
1091		}
1092		sched_unpin();
1093	}
1094
1095	/*
1096	 * Check to see if we are recursing on a lock we already own.  If
1097	 * so, make sure that we don't mismatch exclusive and shared lock
1098	 * acquires.
1099	 */
1100	lock1 = find_instance(lock_list, lock);
1101	if (lock1 != NULL) {
1102		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1103		    (flags & LOP_EXCLUSIVE) == 0) {
1104			printf("shared lock of (%s) %s @ %s:%d\n",
1105			    class->lc_name, lock->lo_name, file, line);
1106			printf("while exclusively locked from %s:%d\n",
1107			    lock1->li_file, lock1->li_line);
1108			panic("share->excl");
1109		}
1110		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1111		    (flags & LOP_EXCLUSIVE) != 0) {
1112			printf("exclusive lock of (%s) %s @ %s:%d\n",
1113			    class->lc_name, lock->lo_name, file, line);
1114			printf("while share locked from %s:%d\n",
1115			    lock1->li_file, lock1->li_line);
1116			panic("excl->share");
1117		}
1118		return;
1119	}
1120
1121	/*
1122	 * Find the previously acquired lock, but ignore interlocks.
1123	 */
1124	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1125	if (interlock != NULL && plock->li_lock == interlock) {
1126		if (lock_list->ll_count > 1)
1127			plock =
1128			    &lock_list->ll_children[lock_list->ll_count - 2];
1129		else {
1130			lle = lock_list->ll_next;
1131
1132			/*
1133			 * The interlock is the only lock we hold, so
1134			 * simply return.
1135			 */
1136			if (lle == NULL)
1137				return;
1138			plock = &lle->ll_children[lle->ll_count - 1];
1139		}
1140	}
1141
1142	/*
1143	 * Try to perform most checks without a lock.  If this succeeds we
1144	 * can skip acquiring the lock and return success.
1145	 */
1146	w1 = plock->li_lock->lo_witness;
1147	if (witness_lock_order_check(w1, w))
1148		return;
1149
1150	/*
1151	 * Check for duplicate locks of the same type.  Note that we only
1152	 * have to check for this on the last lock we just acquired.  Any
1153	 * other cases will be caught as lock order violations.
1154	 */
1155	mtx_lock_spin(&w_mtx);
1156	witness_lock_order_add(w1, w);
1157	if (w1 == w) {
1158		i = w->w_index;
1159		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1160		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1161		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1162			w->w_reversed = 1;
1163			mtx_unlock_spin(&w_mtx);
1164			printf(
1165			    "acquiring duplicate lock of same type: \"%s\"\n",
1166			    w->w_name);
1167			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1168			       plock->li_file, plock->li_line);
1169			printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
1170			witness_debugger(1);
1171		    } else
1172			    mtx_unlock_spin(&w_mtx);
1173		return;
1174	}
1175	mtx_assert(&w_mtx, MA_OWNED);
1176
1177	/*
1178	 * If we know that the the lock we are acquiring comes after
1179	 * the lock we most recently acquired in the lock order tree,
1180	 * then there is no need for any further checks.
1181	 */
1182	if (isitmychild(w1, w))
1183		goto out;
1184
1185	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1186		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1187
1188			MPASS(j < WITNESS_COUNT);
1189			lock1 = &lle->ll_children[i];
1190
1191			/*
1192			 * Ignore the interlock the first time we see it.
1193			 */
1194			if (interlock != NULL && interlock == lock1->li_lock) {
1195				interlock = NULL;
1196				continue;
1197			}
1198
1199			/*
1200			 * If this lock doesn't undergo witness checking,
1201			 * then skip it.
1202			 */
1203			w1 = lock1->li_lock->lo_witness;
1204			if (w1 == NULL) {
1205				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1206				    ("lock missing witness structure"));
1207				continue;
1208			}
1209
1210			/*
1211			 * If we are locking Giant and this is a sleepable
1212			 * lock, then skip it.
1213			 */
1214			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1215			    lock == &Giant.lock_object)
1216				continue;
1217
1218			/*
1219			 * If we are locking a sleepable lock and this lock
1220			 * is Giant, then skip it.
1221			 */
1222			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1223			    lock1->li_lock == &Giant.lock_object)
1224				continue;
1225
1226			/*
1227			 * If we are locking a sleepable lock and this lock
1228			 * isn't sleepable, we want to treat it as a lock
1229			 * order violation to enfore a general lock order of
1230			 * sleepable locks before non-sleepable locks.
1231			 */
1232			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1233			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1234				goto reversal;
1235
1236			/*
1237			 * If we are locking Giant and this is a non-sleepable
1238			 * lock, then treat it as a reversal.
1239			 */
1240			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1241			    lock == &Giant.lock_object)
1242				goto reversal;
1243
1244			/*
1245			 * Check the lock order hierarchy for a reveresal.
1246			 */
1247			if (!isitmydescendant(w, w1))
1248				continue;
1249		reversal:
1250
1251			/*
1252			 * We have a lock order violation, check to see if it
1253			 * is allowed or has already been yelled about.
1254			 */
1255#ifdef BLESSING
1256
1257			/*
1258			 * If the lock order is blessed, just bail.  We don't
1259			 * look for other lock order violations though, which
1260			 * may be a bug.
1261			 */
1262			if (blessed(w, w1))
1263				goto out;
1264#endif
1265
1266			/* Bail if this violation is known */
1267			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1268				goto out;
1269
1270			/* Record this as a violation */
1271			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1272			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1273			w->w_reversed = w1->w_reversed = 1;
1274			witness_increment_graph_generation();
1275			mtx_unlock_spin(&w_mtx);
1276
1277			/*
1278			 * Ok, yell about it.
1279			 */
1280			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1281			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1282				printf(
1283		"lock order reversal: (sleepable after non-sleepable)\n");
1284			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1285			    && lock == &Giant.lock_object)
1286				printf(
1287		"lock order reversal: (Giant after non-sleepable)\n");
1288			else
1289				printf("lock order reversal:\n");
1290
1291			/*
1292			 * Try to locate an earlier lock with
1293			 * witness w in our list.
1294			 */
1295			do {
1296				lock2 = &lle->ll_children[i];
1297				MPASS(lock2->li_lock != NULL);
1298				if (lock2->li_lock->lo_witness == w)
1299					break;
1300				if (i == 0 && lle->ll_next != NULL) {
1301					lle = lle->ll_next;
1302					i = lle->ll_count - 1;
1303					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1304				} else
1305					i--;
1306			} while (i >= 0);
1307			if (i < 0) {
1308				printf(" 1st %p %s (%s) @ %s:%d\n",
1309				    lock1->li_lock, lock1->li_lock->lo_name,
1310				    w1->w_name, lock1->li_file, lock1->li_line);
1311				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1312				    lock->lo_name, w->w_name, file, line);
1313			} else {
1314				printf(" 1st %p %s (%s) @ %s:%d\n",
1315				    lock2->li_lock, lock2->li_lock->lo_name,
1316				    lock2->li_lock->lo_witness->w_name,
1317				    lock2->li_file, lock2->li_line);
1318				printf(" 2nd %p %s (%s) @ %s:%d\n",
1319				    lock1->li_lock, lock1->li_lock->lo_name,
1320				    w1->w_name, lock1->li_file, lock1->li_line);
1321				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1322				    lock->lo_name, w->w_name, file, line);
1323			}
1324			witness_debugger(1);
1325			return;
1326		}
1327	}
1328
1329	/*
1330	 * If requested, build a new lock order.  However, don't build a new
1331	 * relationship between a sleepable lock and Giant if it is in the
1332	 * wrong direction.  The correct lock order is that sleepable locks
1333	 * always come before Giant.
1334	 */
1335	if (flags & LOP_NEWORDER &&
1336	    !(plock->li_lock == &Giant.lock_object &&
1337	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1338		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1339		    w->w_name, plock->li_lock->lo_witness->w_name);
1340		itismychild(plock->li_lock->lo_witness, w);
1341	}
1342out:
1343	mtx_unlock_spin(&w_mtx);
1344}
1345
1346void
1347witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1348{
1349	struct lock_list_entry **lock_list, *lle;
1350	struct lock_instance *instance;
1351	struct witness *w;
1352	struct thread *td;
1353
1354	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1355	    panicstr != NULL)
1356		return;
1357	w = lock->lo_witness;
1358	td = curthread;
1359	file = fixup_filename(file);
1360
1361	/* Determine lock list for this lock. */
1362	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1363		lock_list = &td->td_sleeplocks;
1364	else
1365		lock_list = PCPU_PTR(spinlocks);
1366
1367	/* Check to see if we are recursing on a lock we already own. */
1368	instance = find_instance(*lock_list, lock);
1369	if (instance != NULL) {
1370		instance->li_flags++;
1371		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1372		    td->td_proc->p_pid, lock->lo_name,
1373		    instance->li_flags & LI_RECURSEMASK);
1374		instance->li_file = file;
1375		instance->li_line = line;
1376		return;
1377	}
1378
1379	/* Update per-witness last file and line acquire. */
1380	w->w_file = file;
1381	w->w_line = line;
1382
1383	/* Find the next open lock instance in the list and fill it. */
1384	lle = *lock_list;
1385	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1386		lle = witness_lock_list_get();
1387		if (lle == NULL)
1388			return;
1389		lle->ll_next = *lock_list;
1390		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1391		    td->td_proc->p_pid, lle);
1392		*lock_list = lle;
1393	}
1394	instance = &lle->ll_children[lle->ll_count++];
1395	instance->li_lock = lock;
1396	instance->li_line = line;
1397	instance->li_file = file;
1398	if ((flags & LOP_EXCLUSIVE) != 0)
1399		instance->li_flags = LI_EXCLUSIVE;
1400	else
1401		instance->li_flags = 0;
1402	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1403	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1404}
1405
1406void
1407witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1408{
1409	struct lock_instance *instance;
1410	struct lock_class *class;
1411
1412	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1413	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1414		return;
1415	class = LOCK_CLASS(lock);
1416	file = fixup_filename(file);
1417	if (witness_watch) {
1418		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1419			panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1420			    class->lc_name, lock->lo_name, file, line);
1421		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1422			panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1423			    class->lc_name, lock->lo_name, file, line);
1424	}
1425	instance = find_instance(curthread->td_sleeplocks, lock);
1426	if (instance == NULL)
1427		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1428		    class->lc_name, lock->lo_name, file, line);
1429	if (witness_watch) {
1430		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1431			panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1432			    class->lc_name, lock->lo_name, file, line);
1433		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1434			panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1435			    class->lc_name, lock->lo_name,
1436			    instance->li_flags & LI_RECURSEMASK, file, line);
1437	}
1438	instance->li_flags |= LI_EXCLUSIVE;
1439}
1440
1441void
1442witness_downgrade(struct lock_object *lock, int flags, const char *file,
1443    int line)
1444{
1445	struct lock_instance *instance;
1446	struct lock_class *class;
1447
1448	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1449	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1450		return;
1451	class = LOCK_CLASS(lock);
1452	file = fixup_filename(file);
1453	if (witness_watch) {
1454		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1455		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1456			    class->lc_name, lock->lo_name, file, line);
1457		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1458			panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1459			    class->lc_name, lock->lo_name, file, line);
1460	}
1461	instance = find_instance(curthread->td_sleeplocks, lock);
1462	if (instance == NULL)
1463		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1464		    class->lc_name, lock->lo_name, file, line);
1465	if (witness_watch) {
1466		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1467			panic("downgrade of shared lock (%s) %s @ %s:%d",
1468			    class->lc_name, lock->lo_name, file, line);
1469		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1470			panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1471			    class->lc_name, lock->lo_name,
1472			    instance->li_flags & LI_RECURSEMASK, file, line);
1473	}
1474	instance->li_flags &= ~LI_EXCLUSIVE;
1475}
1476
1477void
1478witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1479{
1480	struct lock_list_entry **lock_list, *lle;
1481	struct lock_instance *instance;
1482	struct lock_class *class;
1483	struct thread *td;
1484	register_t s;
1485	int i, j;
1486
1487	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1488		return;
1489	td = curthread;
1490	class = LOCK_CLASS(lock);
1491	file = fixup_filename(file);
1492
1493	/* Find lock instance associated with this lock. */
1494	if (class->lc_flags & LC_SLEEPLOCK)
1495		lock_list = &td->td_sleeplocks;
1496	else
1497		lock_list = PCPU_PTR(spinlocks);
1498	lle = *lock_list;
1499	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1500		for (i = 0; i < (*lock_list)->ll_count; i++) {
1501			instance = &(*lock_list)->ll_children[i];
1502			if (instance->li_lock == lock)
1503				goto found;
1504		}
1505
1506	/*
1507	 * When disabling WITNESS through witness_watch we could end up in
1508	 * having registered locks in the td_sleeplocks queue.
1509	 * We have to make sure we flush these queues, so just search for
1510	 * eventual register locks and remove them.
1511	 */
1512	if (witness_watch > 0)
1513		panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1514		    lock->lo_name, file, line);
1515	else
1516		return;
1517found:
1518
1519	/* First, check for shared/exclusive mismatches. */
1520	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1521	    (flags & LOP_EXCLUSIVE) == 0) {
1522		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1523		    lock->lo_name, file, line);
1524		printf("while exclusively locked from %s:%d\n",
1525		    instance->li_file, instance->li_line);
1526		panic("excl->ushare");
1527	}
1528	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1529	    (flags & LOP_EXCLUSIVE) != 0) {
1530		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1531		    lock->lo_name, file, line);
1532		printf("while share locked from %s:%d\n", instance->li_file,
1533		    instance->li_line);
1534		panic("share->uexcl");
1535	}
1536	/* If we are recursed, unrecurse. */
1537	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1538		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1539		    td->td_proc->p_pid, instance->li_lock->lo_name,
1540		    instance->li_flags);
1541		instance->li_flags--;
1542		return;
1543	}
1544	/* The lock is now being dropped, check for NORELEASE flag */
1545	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1546		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1547		    lock->lo_name, file, line);
1548		panic("lock marked norelease");
1549	}
1550
1551	/* Otherwise, remove this item from the list. */
1552	s = intr_disable();
1553	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1554	    td->td_proc->p_pid, instance->li_lock->lo_name,
1555	    (*lock_list)->ll_count - 1);
1556	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1557		(*lock_list)->ll_children[j] =
1558		    (*lock_list)->ll_children[j + 1];
1559	(*lock_list)->ll_count--;
1560	intr_restore(s);
1561
1562	/*
1563	 * In order to reduce contention on w_mtx, we want to keep always an
1564	 * head object into lists so that frequent allocation from the
1565	 * free witness pool (and subsequent locking) is avoided.
1566	 * In order to maintain the current code simple, when the head
1567	 * object is totally unloaded it means also that we do not have
1568	 * further objects in the list, so the list ownership needs to be
1569	 * hand over to another object if the current head needs to be freed.
1570	 */
1571	if ((*lock_list)->ll_count == 0) {
1572		if (*lock_list == lle) {
1573			if (lle->ll_next == NULL)
1574				return;
1575		} else
1576			lle = *lock_list;
1577		*lock_list = lle->ll_next;
1578		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1579		    td->td_proc->p_pid, lle);
1580		witness_lock_list_free(lle);
1581	}
1582}
1583
1584void
1585witness_thread_exit(struct thread *td)
1586{
1587	struct lock_list_entry *lle;
1588	int i, n;
1589
1590	lle = td->td_sleeplocks;
1591	if (lle == NULL || panicstr != NULL)
1592		return;
1593	if (lle->ll_count != 0) {
1594		for (n = 0; lle != NULL; lle = lle->ll_next)
1595			for (i = lle->ll_count - 1; i >= 0; i--) {
1596				if (n == 0)
1597		printf("Thread %p exiting with the following locks held:\n",
1598					    td);
1599				n++;
1600				witness_list_lock(&lle->ll_children[i]);
1601
1602			}
1603		panic("Thread %p cannot exit while holding sleeplocks\n", td);
1604	}
1605	witness_lock_list_free(lle);
1606}
1607
1608/*
1609 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1610 * exempt Giant and sleepable locks from the checks as well.  If any
1611 * non-exempt locks are held, then a supplied message is printed to the
1612 * console along with a list of the offending locks.  If indicated in the
1613 * flags then a failure results in a panic as well.
1614 */
1615int
1616witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1617{
1618	struct lock_list_entry *lock_list, *lle;
1619	struct lock_instance *lock1;
1620	struct thread *td;
1621	va_list ap;
1622	int i, n;
1623
1624	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1625		return (0);
1626	n = 0;
1627	td = curthread;
1628	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1629		for (i = lle->ll_count - 1; i >= 0; i--) {
1630			lock1 = &lle->ll_children[i];
1631			if (lock1->li_lock == lock)
1632				continue;
1633			if (flags & WARN_GIANTOK &&
1634			    lock1->li_lock == &Giant.lock_object)
1635				continue;
1636			if (flags & WARN_SLEEPOK &&
1637			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1638				continue;
1639			if (n == 0) {
1640				va_start(ap, fmt);
1641				vprintf(fmt, ap);
1642				va_end(ap);
1643				printf(" with the following");
1644				if (flags & WARN_SLEEPOK)
1645					printf(" non-sleepable");
1646				printf(" locks held:\n");
1647			}
1648			n++;
1649			witness_list_lock(lock1);
1650		}
1651
1652	/*
1653	 * Pin the thread in order to avoid problems with thread migration.
1654	 * Once that all verifies are passed about spinlocks ownership,
1655	 * the thread is in a safe path and it can be unpinned.
1656	 */
1657	sched_pin();
1658	lock_list = PCPU_GET(spinlocks);
1659	if (lock_list != NULL && lock_list->ll_count != 0) {
1660		sched_unpin();
1661
1662		/*
1663		 * We should only have one spinlock and as long as
1664		 * the flags cannot match for this locks class,
1665		 * check if the first spinlock is the one curthread
1666		 * should hold.
1667		 */
1668		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1669		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1670		    lock1->li_lock == lock && n == 0)
1671			return (0);
1672
1673		va_start(ap, fmt);
1674		vprintf(fmt, ap);
1675		va_end(ap);
1676		printf(" with the following");
1677		if (flags & WARN_SLEEPOK)
1678			printf(" non-sleepable");
1679		printf(" locks held:\n");
1680		n += witness_list_locks(&lock_list);
1681	} else
1682		sched_unpin();
1683	if (flags & WARN_PANIC && n)
1684		panic("%s", __func__);
1685	else
1686		witness_debugger(n);
1687	return (n);
1688}
1689
1690const char *
1691witness_file(struct lock_object *lock)
1692{
1693	struct witness *w;
1694
1695	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1696		return ("?");
1697	w = lock->lo_witness;
1698	return (w->w_file);
1699}
1700
1701int
1702witness_line(struct lock_object *lock)
1703{
1704	struct witness *w;
1705
1706	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1707		return (0);
1708	w = lock->lo_witness;
1709	return (w->w_line);
1710}
1711
1712static struct witness *
1713enroll(const char *description, struct lock_class *lock_class)
1714{
1715	struct witness *w;
1716	struct witness_list *typelist;
1717
1718	MPASS(description != NULL);
1719
1720	if (witness_watch == -1 || panicstr != NULL)
1721		return (NULL);
1722	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1723		if (witness_skipspin)
1724			return (NULL);
1725		else
1726			typelist = &w_spin;
1727	} else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1728		typelist = &w_sleep;
1729	else
1730		panic("lock class %s is not sleep or spin",
1731		    lock_class->lc_name);
1732
1733	mtx_lock_spin(&w_mtx);
1734	w = witness_hash_get(description);
1735	if (w)
1736		goto found;
1737	if ((w = witness_get()) == NULL)
1738		return (NULL);
1739	MPASS(strlen(description) < MAX_W_NAME);
1740	strcpy(w->w_name, description);
1741	w->w_class = lock_class;
1742	w->w_refcount = 1;
1743	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1744	if (lock_class->lc_flags & LC_SPINLOCK) {
1745		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1746		w_spin_cnt++;
1747	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1748		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1749		w_sleep_cnt++;
1750	}
1751
1752	/* Insert new witness into the hash */
1753	witness_hash_put(w);
1754	witness_increment_graph_generation();
1755	mtx_unlock_spin(&w_mtx);
1756	return (w);
1757found:
1758	w->w_refcount++;
1759	mtx_unlock_spin(&w_mtx);
1760	if (lock_class != w->w_class)
1761		panic(
1762			"lock (%s) %s does not match earlier (%s) lock",
1763			description, lock_class->lc_name,
1764			w->w_class->lc_name);
1765	return (w);
1766}
1767
1768static void
1769depart(struct witness *w)
1770{
1771	struct witness_list *list;
1772
1773	MPASS(w->w_refcount == 0);
1774	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1775		list = &w_sleep;
1776		w_sleep_cnt--;
1777	} else {
1778		list = &w_spin;
1779		w_spin_cnt--;
1780	}
1781	/*
1782	 * Set file to NULL as it may point into a loadable module.
1783	 */
1784	w->w_file = NULL;
1785	w->w_line = 0;
1786	witness_increment_graph_generation();
1787}
1788
1789
1790static void
1791adopt(struct witness *parent, struct witness *child)
1792{
1793	int pi, ci, i, j;
1794
1795	if (witness_cold == 0)
1796		mtx_assert(&w_mtx, MA_OWNED);
1797
1798	/* If the relationship is already known, there's no work to be done. */
1799	if (isitmychild(parent, child))
1800		return;
1801
1802	/* When the structure of the graph changes, bump up the generation. */
1803	witness_increment_graph_generation();
1804
1805	/*
1806	 * The hard part ... create the direct relationship, then propagate all
1807	 * indirect relationships.
1808	 */
1809	pi = parent->w_index;
1810	ci = child->w_index;
1811	WITNESS_INDEX_ASSERT(pi);
1812	WITNESS_INDEX_ASSERT(ci);
1813	MPASS(pi != ci);
1814	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1815	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1816
1817	/*
1818	 * If parent was not already an ancestor of child,
1819	 * then we increment the descendant and ancestor counters.
1820	 */
1821	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1822		parent->w_num_descendants++;
1823		child->w_num_ancestors++;
1824	}
1825
1826	/*
1827	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1828	 * an ancestor of 'pi' during this loop.
1829	 */
1830	for (i = 1; i <= w_max_used_index; i++) {
1831		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1832		    (i != pi))
1833			continue;
1834
1835		/* Find each descendant of 'i' and mark it as a descendant. */
1836		for (j = 1; j <= w_max_used_index; j++) {
1837
1838			/*
1839			 * Skip children that are already marked as
1840			 * descendants of 'i'.
1841			 */
1842			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1843				continue;
1844
1845			/*
1846			 * We are only interested in descendants of 'ci'. Note
1847			 * that 'ci' itself is counted as a descendant of 'ci'.
1848			 */
1849			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1850			    (j != ci))
1851				continue;
1852			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1853			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1854			w_data[i].w_num_descendants++;
1855			w_data[j].w_num_ancestors++;
1856
1857			/*
1858			 * Make sure we aren't marking a node as both an
1859			 * ancestor and descendant. We should have caught
1860			 * this as a lock order reversal earlier.
1861			 */
1862			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1863			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1864				printf("witness rmatrix paradox! [%d][%d]=%d "
1865				    "both ancestor and descendant\n",
1866				    i, j, w_rmatrix[i][j]);
1867				kdb_backtrace();
1868				printf("Witness disabled.\n");
1869				witness_watch = -1;
1870			}
1871			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1872			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1873				printf("witness rmatrix paradox! [%d][%d]=%d "
1874				    "both ancestor and descendant\n",
1875				    j, i, w_rmatrix[j][i]);
1876				kdb_backtrace();
1877				printf("Witness disabled.\n");
1878				witness_watch = -1;
1879			}
1880		}
1881	}
1882}
1883
1884static void
1885itismychild(struct witness *parent, struct witness *child)
1886{
1887
1888	MPASS(child != NULL && parent != NULL);
1889	if (witness_cold == 0)
1890		mtx_assert(&w_mtx, MA_OWNED);
1891
1892	if (!witness_lock_type_equal(parent, child)) {
1893		if (witness_cold == 0)
1894			mtx_unlock_spin(&w_mtx);
1895		panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1896		    "the same lock type", __func__, parent->w_name,
1897		    parent->w_class->lc_name, child->w_name,
1898		    child->w_class->lc_name);
1899	}
1900	adopt(parent, child);
1901}
1902
1903/*
1904 * Generic code for the isitmy*() functions. The rmask parameter is the
1905 * expected relationship of w1 to w2.
1906 */
1907static int
1908_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1909{
1910	unsigned char r1, r2;
1911	int i1, i2;
1912
1913	i1 = w1->w_index;
1914	i2 = w2->w_index;
1915	WITNESS_INDEX_ASSERT(i1);
1916	WITNESS_INDEX_ASSERT(i2);
1917	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1918	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1919
1920	/* The flags on one better be the inverse of the flags on the other */
1921	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1922		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1923		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1924		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1925		    "w_rmatrix[%d][%d] == %hhx\n",
1926		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1927		    i2, i1, r2);
1928		kdb_backtrace();
1929		printf("Witness disabled.\n");
1930		witness_watch = -1;
1931	}
1932	return (r1 & rmask);
1933}
1934
1935/*
1936 * Checks if @child is a direct child of @parent.
1937 */
1938static int
1939isitmychild(struct witness *parent, struct witness *child)
1940{
1941
1942	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1943}
1944
1945/*
1946 * Checks if @descendant is a direct or inderect descendant of @ancestor.
1947 */
1948static int
1949isitmydescendant(struct witness *ancestor, struct witness *descendant)
1950{
1951
1952	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1953	    __func__));
1954}
1955
1956#ifdef BLESSING
1957static int
1958blessed(struct witness *w1, struct witness *w2)
1959{
1960	int i;
1961	struct witness_blessed *b;
1962
1963	for (i = 0; i < blessed_count; i++) {
1964		b = &blessed_list[i];
1965		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1966			if (strcmp(w2->w_name, b->b_lock2) == 0)
1967				return (1);
1968			continue;
1969		}
1970		if (strcmp(w1->w_name, b->b_lock2) == 0)
1971			if (strcmp(w2->w_name, b->b_lock1) == 0)
1972				return (1);
1973	}
1974	return (0);
1975}
1976#endif
1977
1978static struct witness *
1979witness_get(void)
1980{
1981	struct witness *w;
1982	int index;
1983
1984	if (witness_cold == 0)
1985		mtx_assert(&w_mtx, MA_OWNED);
1986
1987	if (witness_watch == -1) {
1988		mtx_unlock_spin(&w_mtx);
1989		return (NULL);
1990	}
1991	if (STAILQ_EMPTY(&w_free)) {
1992		witness_watch = -1;
1993		mtx_unlock_spin(&w_mtx);
1994		printf("WITNESS: unable to allocate a new witness object\n");
1995		return (NULL);
1996	}
1997	w = STAILQ_FIRST(&w_free);
1998	STAILQ_REMOVE_HEAD(&w_free, w_list);
1999	w_free_cnt--;
2000	index = w->w_index;
2001	MPASS(index > 0 && index == w_max_used_index+1 &&
2002	    index < WITNESS_COUNT);
2003	bzero(w, sizeof(*w));
2004	w->w_index = index;
2005	if (index > w_max_used_index)
2006		w_max_used_index = index;
2007	return (w);
2008}
2009
2010static void
2011witness_free(struct witness *w)
2012{
2013
2014	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2015	w_free_cnt++;
2016}
2017
2018static struct lock_list_entry *
2019witness_lock_list_get(void)
2020{
2021	struct lock_list_entry *lle;
2022
2023	if (witness_watch == -1)
2024		return (NULL);
2025	mtx_lock_spin(&w_mtx);
2026	lle = w_lock_list_free;
2027	if (lle == NULL) {
2028		witness_watch = -1;
2029		mtx_unlock_spin(&w_mtx);
2030		printf("%s: witness exhausted\n", __func__);
2031		return (NULL);
2032	}
2033	w_lock_list_free = lle->ll_next;
2034	mtx_unlock_spin(&w_mtx);
2035	bzero(lle, sizeof(*lle));
2036	return (lle);
2037}
2038
2039static void
2040witness_lock_list_free(struct lock_list_entry *lle)
2041{
2042
2043	mtx_lock_spin(&w_mtx);
2044	lle->ll_next = w_lock_list_free;
2045	w_lock_list_free = lle;
2046	mtx_unlock_spin(&w_mtx);
2047}
2048
2049static struct lock_instance *
2050find_instance(struct lock_list_entry *list, struct lock_object *lock)
2051{
2052	struct lock_list_entry *lle;
2053	struct lock_instance *instance;
2054	int i;
2055
2056	for (lle = list; lle != NULL; lle = lle->ll_next)
2057		for (i = lle->ll_count - 1; i >= 0; i--) {
2058			instance = &lle->ll_children[i];
2059			if (instance->li_lock == lock)
2060				return (instance);
2061		}
2062	return (NULL);
2063}
2064
2065static void
2066witness_list_lock(struct lock_instance *instance)
2067{
2068	struct lock_object *lock;
2069
2070	lock = instance->li_lock;
2071	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2072	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2073	if (lock->lo_witness->w_name != lock->lo_name)
2074		printf(" (%s)", lock->lo_witness->w_name);
2075	printf(" r = %d (%p) locked @ %s:%d\n",
2076	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2077	    instance->li_line);
2078}
2079
2080#ifdef DDB
2081static int
2082witness_thread_has_locks(struct thread *td)
2083{
2084
2085	if (td->td_sleeplocks == NULL)
2086		return (0);
2087	return (td->td_sleeplocks->ll_count != 0);
2088}
2089
2090static int
2091witness_proc_has_locks(struct proc *p)
2092{
2093	struct thread *td;
2094
2095	FOREACH_THREAD_IN_PROC(p, td) {
2096		if (witness_thread_has_locks(td))
2097			return (1);
2098	}
2099	return (0);
2100}
2101#endif
2102
2103int
2104witness_list_locks(struct lock_list_entry **lock_list)
2105{
2106	struct lock_list_entry *lle;
2107	int i, nheld;
2108
2109	nheld = 0;
2110	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2111		for (i = lle->ll_count - 1; i >= 0; i--) {
2112			witness_list_lock(&lle->ll_children[i]);
2113			nheld++;
2114		}
2115	return (nheld);
2116}
2117
2118/*
2119 * This is a bit risky at best.  We call this function when we have timed
2120 * out acquiring a spin lock, and we assume that the other CPU is stuck
2121 * with this lock held.  So, we go groveling around in the other CPU's
2122 * per-cpu data to try to find the lock instance for this spin lock to
2123 * see when it was last acquired.
2124 */
2125void
2126witness_display_spinlock(struct lock_object *lock, struct thread *owner)
2127{
2128	struct lock_instance *instance;
2129	struct pcpu *pc;
2130
2131	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2132		return;
2133	pc = pcpu_find(owner->td_oncpu);
2134	instance = find_instance(pc->pc_spinlocks, lock);
2135	if (instance != NULL)
2136		witness_list_lock(instance);
2137}
2138
2139void
2140witness_save(struct lock_object *lock, const char **filep, int *linep)
2141{
2142	struct lock_list_entry *lock_list;
2143	struct lock_instance *instance;
2144	struct lock_class *class;
2145
2146	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2147	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2148		return;
2149	class = LOCK_CLASS(lock);
2150	if (class->lc_flags & LC_SLEEPLOCK)
2151		lock_list = curthread->td_sleeplocks;
2152	else {
2153		if (witness_skipspin)
2154			return;
2155		lock_list = PCPU_GET(spinlocks);
2156	}
2157	instance = find_instance(lock_list, lock);
2158	if (instance == NULL)
2159		panic("%s: lock (%s) %s not locked", __func__,
2160		    class->lc_name, lock->lo_name);
2161	*filep = instance->li_file;
2162	*linep = instance->li_line;
2163}
2164
2165void
2166witness_restore(struct lock_object *lock, const char *file, int line)
2167{
2168	struct lock_list_entry *lock_list;
2169	struct lock_instance *instance;
2170	struct lock_class *class;
2171
2172	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2173	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2174		return;
2175	class = LOCK_CLASS(lock);
2176	if (class->lc_flags & LC_SLEEPLOCK)
2177		lock_list = curthread->td_sleeplocks;
2178	else {
2179		if (witness_skipspin)
2180			return;
2181		lock_list = PCPU_GET(spinlocks);
2182	}
2183	instance = find_instance(lock_list, lock);
2184	if (instance == NULL)
2185		panic("%s: lock (%s) %s not locked", __func__,
2186		    class->lc_name, lock->lo_name);
2187	lock->lo_witness->w_file = file;
2188	lock->lo_witness->w_line = line;
2189	instance->li_file = file;
2190	instance->li_line = line;
2191}
2192
2193void
2194witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2195{
2196#ifdef INVARIANT_SUPPORT
2197	struct lock_instance *instance;
2198	struct lock_class *class;
2199
2200	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2201		return;
2202	class = LOCK_CLASS(lock);
2203	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2204		instance = find_instance(curthread->td_sleeplocks, lock);
2205	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2206		instance = find_instance(PCPU_GET(spinlocks), lock);
2207	else {
2208		panic("Lock (%s) %s is not sleep or spin!",
2209		    class->lc_name, lock->lo_name);
2210	}
2211	file = fixup_filename(file);
2212	switch (flags) {
2213	case LA_UNLOCKED:
2214		if (instance != NULL)
2215			panic("Lock (%s) %s locked @ %s:%d.",
2216			    class->lc_name, lock->lo_name, file, line);
2217		break;
2218	case LA_LOCKED:
2219	case LA_LOCKED | LA_RECURSED:
2220	case LA_LOCKED | LA_NOTRECURSED:
2221	case LA_SLOCKED:
2222	case LA_SLOCKED | LA_RECURSED:
2223	case LA_SLOCKED | LA_NOTRECURSED:
2224	case LA_XLOCKED:
2225	case LA_XLOCKED | LA_RECURSED:
2226	case LA_XLOCKED | LA_NOTRECURSED:
2227		if (instance == NULL) {
2228			panic("Lock (%s) %s not locked @ %s:%d.",
2229			    class->lc_name, lock->lo_name, file, line);
2230			break;
2231		}
2232		if ((flags & LA_XLOCKED) != 0 &&
2233		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2234			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2235			    class->lc_name, lock->lo_name, file, line);
2236		if ((flags & LA_SLOCKED) != 0 &&
2237		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2238			panic("Lock (%s) %s exclusively locked @ %s:%d.",
2239			    class->lc_name, lock->lo_name, file, line);
2240		if ((flags & LA_RECURSED) != 0 &&
2241		    (instance->li_flags & LI_RECURSEMASK) == 0)
2242			panic("Lock (%s) %s not recursed @ %s:%d.",
2243			    class->lc_name, lock->lo_name, file, line);
2244		if ((flags & LA_NOTRECURSED) != 0 &&
2245		    (instance->li_flags & LI_RECURSEMASK) != 0)
2246			panic("Lock (%s) %s recursed @ %s:%d.",
2247			    class->lc_name, lock->lo_name, file, line);
2248		break;
2249	default:
2250		panic("Invalid lock assertion at %s:%d.", file, line);
2251
2252	}
2253#endif	/* INVARIANT_SUPPORT */
2254}
2255
2256static void
2257witness_setflag(struct lock_object *lock, int flag, int set)
2258{
2259	struct lock_list_entry *lock_list;
2260	struct lock_instance *instance;
2261	struct lock_class *class;
2262
2263	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2264		return;
2265	class = LOCK_CLASS(lock);
2266	if (class->lc_flags & LC_SLEEPLOCK)
2267		lock_list = curthread->td_sleeplocks;
2268	else {
2269		if (witness_skipspin)
2270			return;
2271		lock_list = PCPU_GET(spinlocks);
2272	}
2273	instance = find_instance(lock_list, lock);
2274	if (instance == NULL)
2275		panic("%s: lock (%s) %s not locked", __func__,
2276		    class->lc_name, lock->lo_name);
2277
2278	if (set)
2279		instance->li_flags |= flag;
2280	else
2281		instance->li_flags &= ~flag;
2282}
2283
2284void
2285witness_norelease(struct lock_object *lock)
2286{
2287
2288	witness_setflag(lock, LI_NORELEASE, 1);
2289}
2290
2291void
2292witness_releaseok(struct lock_object *lock)
2293{
2294
2295	witness_setflag(lock, LI_NORELEASE, 0);
2296}
2297
2298#ifdef DDB
2299static void
2300witness_ddb_list(struct thread *td)
2301{
2302
2303	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2304	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2305
2306	if (witness_watch < 1)
2307		return;
2308
2309	witness_list_locks(&td->td_sleeplocks);
2310
2311	/*
2312	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2313	 * if td is currently executing on some other CPU and holds spin locks
2314	 * as we won't display those locks.  If we had a MI way of getting
2315	 * the per-cpu data for a given cpu then we could use
2316	 * td->td_oncpu to get the list of spinlocks for this thread
2317	 * and "fix" this.
2318	 *
2319	 * That still wouldn't really fix this unless we locked the scheduler
2320	 * lock or stopped the other CPU to make sure it wasn't changing the
2321	 * list out from under us.  It is probably best to just not try to
2322	 * handle threads on other CPU's for now.
2323	 */
2324	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2325		witness_list_locks(PCPU_PTR(spinlocks));
2326}
2327
2328DB_SHOW_COMMAND(locks, db_witness_list)
2329{
2330	struct thread *td;
2331
2332	if (have_addr)
2333		td = db_lookup_thread(addr, TRUE);
2334	else
2335		td = kdb_thread;
2336	witness_ddb_list(td);
2337}
2338
2339DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2340{
2341	struct thread *td;
2342	struct proc *p;
2343
2344	/*
2345	 * It would be nice to list only threads and processes that actually
2346	 * held sleep locks, but that information is currently not exported
2347	 * by WITNESS.
2348	 */
2349	FOREACH_PROC_IN_SYSTEM(p) {
2350		if (!witness_proc_has_locks(p))
2351			continue;
2352		FOREACH_THREAD_IN_PROC(p, td) {
2353			if (!witness_thread_has_locks(td))
2354				continue;
2355			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2356			    p->p_comm, td, td->td_tid);
2357			witness_ddb_list(td);
2358		}
2359	}
2360}
2361DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2362
2363DB_SHOW_COMMAND(witness, db_witness_display)
2364{
2365
2366	witness_ddb_display(db_printf);
2367}
2368#endif
2369
2370static int
2371sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2372{
2373	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2374	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2375	struct sbuf *sb;
2376	u_int w_rmatrix1, w_rmatrix2;
2377	int error, generation, i, j;
2378
2379	tmp_data1 = NULL;
2380	tmp_data2 = NULL;
2381	tmp_w1 = NULL;
2382	tmp_w2 = NULL;
2383	if (witness_watch < 1) {
2384		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2385		return (error);
2386	}
2387	if (witness_cold) {
2388		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2389		return (error);
2390	}
2391	error = 0;
2392	sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2393	if (sb == NULL)
2394		return (ENOMEM);
2395
2396	/* Allocate and init temporary storage space. */
2397	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2398	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2399	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2400	    M_WAITOK | M_ZERO);
2401	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2402	    M_WAITOK | M_ZERO);
2403	stack_zero(&tmp_data1->wlod_stack);
2404	stack_zero(&tmp_data2->wlod_stack);
2405
2406restart:
2407	mtx_lock_spin(&w_mtx);
2408	generation = w_generation;
2409	mtx_unlock_spin(&w_mtx);
2410	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2411	    w_lohash.wloh_count);
2412	for (i = 1; i < w_max_used_index; i++) {
2413		mtx_lock_spin(&w_mtx);
2414		if (generation != w_generation) {
2415			mtx_unlock_spin(&w_mtx);
2416
2417			/* The graph has changed, try again. */
2418			req->oldidx = 0;
2419			sbuf_clear(sb);
2420			goto restart;
2421		}
2422
2423		w1 = &w_data[i];
2424		if (w1->w_reversed == 0) {
2425			mtx_unlock_spin(&w_mtx);
2426			continue;
2427		}
2428
2429		/* Copy w1 locally so we can release the spin lock. */
2430		*tmp_w1 = *w1;
2431		mtx_unlock_spin(&w_mtx);
2432
2433		if (tmp_w1->w_reversed == 0)
2434			continue;
2435		for (j = 1; j < w_max_used_index; j++) {
2436			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2437				continue;
2438
2439			mtx_lock_spin(&w_mtx);
2440			if (generation != w_generation) {
2441				mtx_unlock_spin(&w_mtx);
2442
2443				/* The graph has changed, try again. */
2444				req->oldidx = 0;
2445				sbuf_clear(sb);
2446				goto restart;
2447			}
2448
2449			w2 = &w_data[j];
2450			data1 = witness_lock_order_get(w1, w2);
2451			data2 = witness_lock_order_get(w2, w1);
2452
2453			/*
2454			 * Copy information locally so we can release the
2455			 * spin lock.
2456			 */
2457			*tmp_w2 = *w2;
2458			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2459			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2460
2461			if (data1) {
2462				stack_zero(&tmp_data1->wlod_stack);
2463				stack_copy(&data1->wlod_stack,
2464				    &tmp_data1->wlod_stack);
2465			}
2466			if (data2 && data2 != data1) {
2467				stack_zero(&tmp_data2->wlod_stack);
2468				stack_copy(&data2->wlod_stack,
2469				    &tmp_data2->wlod_stack);
2470			}
2471			mtx_unlock_spin(&w_mtx);
2472
2473			sbuf_printf(sb,
2474	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2475			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2476			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2477#if 0
2478 			sbuf_printf(sb,
2479			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2480 			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2481 			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2482#endif
2483			if (data1) {
2484				sbuf_printf(sb,
2485			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2486				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2487				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2488				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2489				sbuf_printf(sb, "\n");
2490			}
2491			if (data2 && data2 != data1) {
2492				sbuf_printf(sb,
2493			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2494				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2495				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2496				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2497				sbuf_printf(sb, "\n");
2498			}
2499		}
2500	}
2501	mtx_lock_spin(&w_mtx);
2502	if (generation != w_generation) {
2503		mtx_unlock_spin(&w_mtx);
2504
2505		/*
2506		 * The graph changed while we were printing stack data,
2507		 * try again.
2508		 */
2509		req->oldidx = 0;
2510		sbuf_clear(sb);
2511		goto restart;
2512	}
2513	mtx_unlock_spin(&w_mtx);
2514
2515	/* Free temporary storage space. */
2516	free(tmp_data1, M_TEMP);
2517	free(tmp_data2, M_TEMP);
2518	free(tmp_w1, M_TEMP);
2519	free(tmp_w2, M_TEMP);
2520
2521	sbuf_finish(sb);
2522	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2523	sbuf_delete(sb);
2524
2525	return (error);
2526}
2527
2528static int
2529sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2530{
2531	struct witness *w;
2532	struct sbuf *sb;
2533	int error;
2534
2535	if (witness_watch < 1) {
2536		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2537		return (error);
2538	}
2539	if (witness_cold) {
2540		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2541		return (error);
2542	}
2543	error = 0;
2544	sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN);
2545	if (sb == NULL)
2546		return (ENOMEM);
2547	sbuf_printf(sb, "\n");
2548
2549	mtx_lock_spin(&w_mtx);
2550	STAILQ_FOREACH(w, &w_all, w_list)
2551		w->w_displayed = 0;
2552	STAILQ_FOREACH(w, &w_all, w_list)
2553		witness_add_fullgraph(sb, w);
2554	mtx_unlock_spin(&w_mtx);
2555
2556	/*
2557	 * While using SBUF_FIXEDLEN, check if the sbuf overflowed.
2558	 */
2559	if (sbuf_overflowed(sb)) {
2560		sbuf_delete(sb);
2561		panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n",
2562		    __func__);
2563	}
2564
2565	/*
2566	 * Close the sbuf and return to userland.
2567	 */
2568	sbuf_finish(sb);
2569	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2570	sbuf_delete(sb);
2571
2572	return (error);
2573}
2574
2575static int
2576sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2577{
2578	int error, value;
2579
2580	value = witness_watch;
2581	error = sysctl_handle_int(oidp, &value, 0, req);
2582	if (error != 0 || req->newptr == NULL)
2583		return (error);
2584	if (value > 1 || value < -1 ||
2585	    (witness_watch == -1 && value != witness_watch))
2586		return (EINVAL);
2587	witness_watch = value;
2588	return (0);
2589}
2590
2591static void
2592witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2593{
2594	int i;
2595
2596	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2597		return;
2598	w->w_displayed = 1;
2599
2600	WITNESS_INDEX_ASSERT(w->w_index);
2601	for (i = 1; i <= w_max_used_index; i++) {
2602		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2603			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2604			    w_data[i].w_name);
2605			witness_add_fullgraph(sb, &w_data[i]);
2606		}
2607	}
2608}
2609
2610/*
2611 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2612 * interprets the key as a string and reads until the null
2613 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2614 * hash value computed from the key.
2615 */
2616static uint32_t
2617witness_hash_djb2(const uint8_t *key, uint32_t size)
2618{
2619	unsigned int hash = 5381;
2620	int i;
2621
2622	/* hash = hash * 33 + key[i] */
2623	if (size)
2624		for (i = 0; i < size; i++)
2625			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2626	else
2627		for (i = 0; key[i] != 0; i++)
2628			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2629
2630	return (hash);
2631}
2632
2633
2634/*
2635 * Initializes the two witness hash tables. Called exactly once from
2636 * witness_initialize().
2637 */
2638static void
2639witness_init_hash_tables(void)
2640{
2641	int i;
2642
2643	MPASS(witness_cold);
2644
2645	/* Initialize the hash tables. */
2646	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2647		w_hash.wh_array[i] = NULL;
2648
2649	w_hash.wh_size = WITNESS_HASH_SIZE;
2650	w_hash.wh_count = 0;
2651
2652	/* Initialize the lock order data hash. */
2653	w_lofree = NULL;
2654	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2655		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2656		w_lodata[i].wlod_next = w_lofree;
2657		w_lofree = &w_lodata[i];
2658	}
2659	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2660	w_lohash.wloh_count = 0;
2661	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2662		w_lohash.wloh_array[i] = NULL;
2663}
2664
2665static struct witness *
2666witness_hash_get(const char *key)
2667{
2668	struct witness *w;
2669	uint32_t hash;
2670
2671	MPASS(key != NULL);
2672	if (witness_cold == 0)
2673		mtx_assert(&w_mtx, MA_OWNED);
2674	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2675	w = w_hash.wh_array[hash];
2676	while (w != NULL) {
2677		if (strcmp(w->w_name, key) == 0)
2678			goto out;
2679		w = w->w_hash_next;
2680	}
2681
2682out:
2683	return (w);
2684}
2685
2686static void
2687witness_hash_put(struct witness *w)
2688{
2689	uint32_t hash;
2690
2691	MPASS(w != NULL);
2692	MPASS(w->w_name != NULL);
2693	if (witness_cold == 0)
2694		mtx_assert(&w_mtx, MA_OWNED);
2695	KASSERT(witness_hash_get(w->w_name) == NULL,
2696	    ("%s: trying to add a hash entry that already exists!", __func__));
2697	KASSERT(w->w_hash_next == NULL,
2698	    ("%s: w->w_hash_next != NULL", __func__));
2699
2700	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2701	w->w_hash_next = w_hash.wh_array[hash];
2702	w_hash.wh_array[hash] = w;
2703	w_hash.wh_count++;
2704}
2705
2706
2707static struct witness_lock_order_data *
2708witness_lock_order_get(struct witness *parent, struct witness *child)
2709{
2710	struct witness_lock_order_data *data = NULL;
2711	struct witness_lock_order_key key;
2712	unsigned int hash;
2713
2714	MPASS(parent != NULL && child != NULL);
2715	key.from = parent->w_index;
2716	key.to = child->w_index;
2717	WITNESS_INDEX_ASSERT(key.from);
2718	WITNESS_INDEX_ASSERT(key.to);
2719	if ((w_rmatrix[parent->w_index][child->w_index]
2720	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2721		goto out;
2722
2723	hash = witness_hash_djb2((const char*)&key,
2724	    sizeof(key)) % w_lohash.wloh_size;
2725	data = w_lohash.wloh_array[hash];
2726	while (data != NULL) {
2727		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2728			break;
2729		data = data->wlod_next;
2730	}
2731
2732out:
2733	return (data);
2734}
2735
2736/*
2737 * Verify that parent and child have a known relationship, are not the same,
2738 * and child is actually a child of parent.  This is done without w_mtx
2739 * to avoid contention in the common case.
2740 */
2741static int
2742witness_lock_order_check(struct witness *parent, struct witness *child)
2743{
2744
2745	if (parent != child &&
2746	    w_rmatrix[parent->w_index][child->w_index]
2747	    & WITNESS_LOCK_ORDER_KNOWN &&
2748	    isitmychild(parent, child))
2749		return (1);
2750
2751	return (0);
2752}
2753
2754static int
2755witness_lock_order_add(struct witness *parent, struct witness *child)
2756{
2757	struct witness_lock_order_data *data = NULL;
2758	struct witness_lock_order_key key;
2759	unsigned int hash;
2760
2761	MPASS(parent != NULL && child != NULL);
2762	key.from = parent->w_index;
2763	key.to = child->w_index;
2764	WITNESS_INDEX_ASSERT(key.from);
2765	WITNESS_INDEX_ASSERT(key.to);
2766	if (w_rmatrix[parent->w_index][child->w_index]
2767	    & WITNESS_LOCK_ORDER_KNOWN)
2768		return (1);
2769
2770	hash = witness_hash_djb2((const char*)&key,
2771	    sizeof(key)) % w_lohash.wloh_size;
2772	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2773	data = w_lofree;
2774	if (data == NULL)
2775		return (0);
2776	w_lofree = data->wlod_next;
2777	data->wlod_next = w_lohash.wloh_array[hash];
2778	data->wlod_key = key;
2779	w_lohash.wloh_array[hash] = data;
2780	w_lohash.wloh_count++;
2781	stack_zero(&data->wlod_stack);
2782	stack_save(&data->wlod_stack);
2783	return (1);
2784}
2785
2786/* Call this whenver the structure of the witness graph changes. */
2787static void
2788witness_increment_graph_generation(void)
2789{
2790
2791	if (witness_cold == 0)
2792		mtx_assert(&w_mtx, MA_OWNED);
2793	w_generation++;
2794}
2795
2796#ifdef KDB
2797static void
2798_witness_debugger(int cond, const char *msg)
2799{
2800
2801	if (witness_trace && cond)
2802		kdb_backtrace();
2803	if (witness_kdb && cond)
2804		kdb_enter(KDB_WHY_WITNESS, msg);
2805}
2806#endif
2807