subr_witness.c revision 273342
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 *    promote products derived from this software without specific prior
17 *    written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier.  Originally implemented for
37 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 *	Main Entry: witness
43 *	Pronunciation: 'wit-n&s
44 *	Function: noun
45 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
46 *	    testimony, witness, from 2wit
47 *	Date: before 12th century
48 *	1 : attestation of a fact or event : TESTIMONY
49 *	2 : one that gives evidence; specifically : one who testifies in
50 *	    a cause or before a judicial tribunal
51 *	3 : one asked to be present at a transaction so as to be able to
52 *	    testify to its having taken place
53 *	4 : one who has personal knowledge of something
54 *	5 a : something serving as evidence or proof : SIGN
55 *	  b : public affirmation by word or example of usually
56 *	      religious faith or conviction <the heroic witness to divine
57 *	      life -- Pilot>
58 *	6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes.  Stated another way,
65 *    no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl().  Basically, when a thread sleeps, it must release
71 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious.  Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock.  The second
79 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
80 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
81 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
82 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute.  Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 273342 2014-10-20 20:21:40Z markj $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define	KTR_WITNESS	KTR_SUBSYS
124#else
125#define	KTR_WITNESS	0
126#endif
127
128#define	LI_RECURSEMASK	0x0000ffff	/* Recursion depth of lock instance. */
129#define	LI_EXCLUSIVE	0x00010000	/* Exclusive lock instance. */
130#define	LI_NORELEASE	0x00020000	/* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#ifndef WITNESS_COUNT
136#define	WITNESS_COUNT 		1536
137#endif
138#define	WITNESS_HASH_SIZE	251	/* Prime, gives load factor < 2 */
139#define	WITNESS_PENDLIST	(1024 + MAXCPU)
140
141/* Allocate 256 KB of stack data space */
142#define	WITNESS_LO_DATA_COUNT	2048
143
144/* Prime, gives load factor of ~2 at full load */
145#define	WITNESS_LO_HASH_SIZE	1021
146
147/*
148 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
149 * will hold LOCK_NCHILDREN locks.  We handle failure ok, and we should
150 * probably be safe for the most part, but it's still a SWAG.
151 */
152#define	LOCK_NCHILDREN	5
153#define	LOCK_CHILDCOUNT	2048
154
155#define	MAX_W_NAME	64
156
157#define	FULLGRAPH_SBUF_SIZE	512
158
159/*
160 * These flags go in the witness relationship matrix and describe the
161 * relationship between any two struct witness objects.
162 */
163#define	WITNESS_UNRELATED        0x00    /* No lock order relation. */
164#define	WITNESS_PARENT           0x01    /* Parent, aka direct ancestor. */
165#define	WITNESS_ANCESTOR         0x02    /* Direct or indirect ancestor. */
166#define	WITNESS_CHILD            0x04    /* Child, aka direct descendant. */
167#define	WITNESS_DESCENDANT       0x08    /* Direct or indirect descendant. */
168#define	WITNESS_ANCESTOR_MASK    (WITNESS_PARENT | WITNESS_ANCESTOR)
169#define	WITNESS_DESCENDANT_MASK  (WITNESS_CHILD | WITNESS_DESCENDANT)
170#define	WITNESS_RELATED_MASK						\
171	(WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
172#define	WITNESS_REVERSAL         0x10    /* A lock order reversal has been
173					  * observed. */
174#define	WITNESS_RESERVED1        0x20    /* Unused flag, reserved. */
175#define	WITNESS_RESERVED2        0x40    /* Unused flag, reserved. */
176#define	WITNESS_LOCK_ORDER_KNOWN 0x80    /* This lock order is known. */
177
178/* Descendant to ancestor flags */
179#define	WITNESS_DTOA(x)	(((x) & WITNESS_RELATED_MASK) >> 2)
180
181/* Ancestor to descendant flags */
182#define	WITNESS_ATOD(x)	(((x) & WITNESS_RELATED_MASK) << 2)
183
184#define	WITNESS_INDEX_ASSERT(i)						\
185	MPASS((i) > 0 && (i) <= w_max_used_index && (i) < witness_count)
186
187static MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
188
189/*
190 * Lock instances.  A lock instance is the data associated with a lock while
191 * it is held by witness.  For example, a lock instance will hold the
192 * recursion count of a lock.  Lock instances are held in lists.  Spin locks
193 * are held in a per-cpu list while sleep locks are held in per-thread list.
194 */
195struct lock_instance {
196	struct lock_object	*li_lock;
197	const char		*li_file;
198	int			li_line;
199	u_int			li_flags;
200};
201
202/*
203 * A simple list type used to build the list of locks held by a thread
204 * or CPU.  We can't simply embed the list in struct lock_object since a
205 * lock may be held by more than one thread if it is a shared lock.  Locks
206 * are added to the head of the list, so we fill up each list entry from
207 * "the back" logically.  To ease some of the arithmetic, we actually fill
208 * in each list entry the normal way (children[0] then children[1], etc.) but
209 * when we traverse the list we read children[count-1] as the first entry
210 * down to children[0] as the final entry.
211 */
212struct lock_list_entry {
213	struct lock_list_entry	*ll_next;
214	struct lock_instance	ll_children[LOCK_NCHILDREN];
215	u_int			ll_count;
216};
217
218/*
219 * The main witness structure. One of these per named lock type in the system
220 * (for example, "vnode interlock").
221 */
222struct witness {
223	char  			w_name[MAX_W_NAME];
224	uint32_t 		w_index;  /* Index in the relationship matrix */
225	struct lock_class	*w_class;
226	STAILQ_ENTRY(witness) 	w_list;		/* List of all witnesses. */
227	STAILQ_ENTRY(witness) 	w_typelist;	/* Witnesses of a type. */
228	struct witness		*w_hash_next; /* Linked list in hash buckets. */
229	const char		*w_file; /* File where last acquired */
230	uint32_t 		w_line; /* Line where last acquired */
231	uint32_t 		w_refcount;
232	uint16_t 		w_num_ancestors; /* direct/indirect
233						  * ancestor count */
234	uint16_t 		w_num_descendants; /* direct/indirect
235						    * descendant count */
236	int16_t 		w_ddb_level;
237	unsigned		w_displayed:1;
238	unsigned		w_reversed:1;
239};
240
241STAILQ_HEAD(witness_list, witness);
242
243/*
244 * The witness hash table. Keys are witness names (const char *), elements are
245 * witness objects (struct witness *).
246 */
247struct witness_hash {
248	struct witness	*wh_array[WITNESS_HASH_SIZE];
249	uint32_t	wh_size;
250	uint32_t	wh_count;
251};
252
253/*
254 * Key type for the lock order data hash table.
255 */
256struct witness_lock_order_key {
257	uint16_t	from;
258	uint16_t	to;
259};
260
261struct witness_lock_order_data {
262	struct stack			wlod_stack;
263	struct witness_lock_order_key	wlod_key;
264	struct witness_lock_order_data	*wlod_next;
265};
266
267/*
268 * The witness lock order data hash table. Keys are witness index tuples
269 * (struct witness_lock_order_key), elements are lock order data objects
270 * (struct witness_lock_order_data).
271 */
272struct witness_lock_order_hash {
273	struct witness_lock_order_data	*wloh_array[WITNESS_LO_HASH_SIZE];
274	u_int	wloh_size;
275	u_int	wloh_count;
276};
277
278#ifdef BLESSING
279struct witness_blessed {
280	const char	*b_lock1;
281	const char	*b_lock2;
282};
283#endif
284
285struct witness_pendhelp {
286	const char		*wh_type;
287	struct lock_object	*wh_lock;
288};
289
290struct witness_order_list_entry {
291	const char		*w_name;
292	struct lock_class	*w_class;
293};
294
295/*
296 * Returns 0 if one of the locks is a spin lock and the other is not.
297 * Returns 1 otherwise.
298 */
299static __inline int
300witness_lock_type_equal(struct witness *w1, struct witness *w2)
301{
302
303	return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
304		(w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
305}
306
307static __inline int
308witness_lock_order_key_equal(const struct witness_lock_order_key *a,
309    const struct witness_lock_order_key *b)
310{
311
312	return (a->from == b->from && a->to == b->to);
313}
314
315static int	_isitmyx(struct witness *w1, struct witness *w2, int rmask,
316		    const char *fname);
317#ifdef KDB
318static void	_witness_debugger(int cond, const char *msg);
319#endif
320static void	adopt(struct witness *parent, struct witness *child);
321#ifdef BLESSING
322static int	blessed(struct witness *, struct witness *);
323#endif
324static void	depart(struct witness *w);
325static struct witness	*enroll(const char *description,
326			    struct lock_class *lock_class);
327static struct lock_instance	*find_instance(struct lock_list_entry *list,
328				    const struct lock_object *lock);
329static int	isitmychild(struct witness *parent, struct witness *child);
330static int	isitmydescendant(struct witness *parent, struct witness *child);
331static void	itismychild(struct witness *parent, struct witness *child);
332static int	sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
333static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
334static int	sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
335static void	witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
336#ifdef DDB
337static void	witness_ddb_compute_levels(void);
338static void	witness_ddb_display(int(*)(const char *fmt, ...));
339static void	witness_ddb_display_descendants(int(*)(const char *fmt, ...),
340		    struct witness *, int indent);
341static void	witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
342		    struct witness_list *list);
343static void	witness_ddb_level_descendants(struct witness *parent, int l);
344static void	witness_ddb_list(struct thread *td);
345#endif
346static void	witness_free(struct witness *m);
347static struct witness	*witness_get(void);
348static uint32_t	witness_hash_djb2(const uint8_t *key, uint32_t size);
349static struct witness	*witness_hash_get(const char *key);
350static void	witness_hash_put(struct witness *w);
351static void	witness_init_hash_tables(void);
352static void	witness_increment_graph_generation(void);
353static void	witness_lock_list_free(struct lock_list_entry *lle);
354static struct lock_list_entry	*witness_lock_list_get(void);
355static int	witness_lock_order_add(struct witness *parent,
356		    struct witness *child);
357static int	witness_lock_order_check(struct witness *parent,
358		    struct witness *child);
359static struct witness_lock_order_data	*witness_lock_order_get(
360					    struct witness *parent,
361					    struct witness *child);
362static void	witness_list_lock(struct lock_instance *instance,
363		    int (*prnt)(const char *fmt, ...));
364static void	witness_setflag(struct lock_object *lock, int flag, int set);
365
366#ifdef KDB
367#define	witness_debugger(c)	_witness_debugger(c, __func__)
368#else
369#define	witness_debugger(c)
370#endif
371
372static SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL,
373    "Witness Locking");
374
375/*
376 * If set to 0, lock order checking is disabled.  If set to -1,
377 * witness is completely disabled.  Otherwise witness performs full
378 * lock order checking for all locks.  At runtime, lock order checking
379 * may be toggled.  However, witness cannot be reenabled once it is
380 * completely disabled.
381 */
382static int witness_watch = 1;
383SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RWTUN | CTLTYPE_INT, NULL, 0,
384    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
385
386#ifdef KDB
387/*
388 * When KDB is enabled and witness_kdb is 1, it will cause the system
389 * to drop into kdebug() when:
390 *	- a lock hierarchy violation occurs
391 *	- locks are held when going to sleep.
392 */
393#ifdef WITNESS_KDB
394int	witness_kdb = 1;
395#else
396int	witness_kdb = 0;
397#endif
398SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RWTUN, &witness_kdb, 0, "");
399
400/*
401 * When KDB is enabled and witness_trace is 1, it will cause the system
402 * to print a stack trace:
403 *	- a lock hierarchy violation occurs
404 *	- locks are held when going to sleep.
405 */
406int	witness_trace = 1;
407SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RWTUN, &witness_trace, 0, "");
408#endif /* KDB */
409
410#ifdef WITNESS_SKIPSPIN
411int	witness_skipspin = 1;
412#else
413int	witness_skipspin = 0;
414#endif
415SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin, 0, "");
416
417int badstack_sbuf_size;
418
419int witness_count = WITNESS_COUNT;
420SYSCTL_INT(_debug_witness, OID_AUTO, witness_count, CTLFLAG_RDTUN,
421    &witness_count, 0, "");
422
423/*
424 * Call this to print out the relations between locks.
425 */
426SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
427    NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
428
429/*
430 * Call this to print out the witness faulty stacks.
431 */
432SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
433    NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
434
435static struct mtx w_mtx;
436
437/* w_list */
438static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
439static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
440
441/* w_typelist */
442static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
443static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
444
445/* lock list */
446static struct lock_list_entry *w_lock_list_free = NULL;
447static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
448static u_int pending_cnt;
449
450static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
451SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
452SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
453SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
454    "");
455
456static struct witness *w_data;
457static uint8_t **w_rmatrix;
458static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
459static struct witness_hash w_hash;	/* The witness hash table. */
460
461/* The lock order data hash */
462static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
463static struct witness_lock_order_data *w_lofree = NULL;
464static struct witness_lock_order_hash w_lohash;
465static int w_max_used_index = 0;
466static unsigned int w_generation = 0;
467static const char w_notrunning[] = "Witness not running\n";
468static const char w_stillcold[] = "Witness is still cold\n";
469
470
471static struct witness_order_list_entry order_lists[] = {
472	/*
473	 * sx locks
474	 */
475	{ "proctree", &lock_class_sx },
476	{ "allproc", &lock_class_sx },
477	{ "allprison", &lock_class_sx },
478	{ NULL, NULL },
479	/*
480	 * Various mutexes
481	 */
482	{ "Giant", &lock_class_mtx_sleep },
483	{ "pipe mutex", &lock_class_mtx_sleep },
484	{ "sigio lock", &lock_class_mtx_sleep },
485	{ "process group", &lock_class_mtx_sleep },
486	{ "process lock", &lock_class_mtx_sleep },
487	{ "session", &lock_class_mtx_sleep },
488	{ "uidinfo hash", &lock_class_rw },
489#ifdef	HWPMC_HOOKS
490	{ "pmc-sleep", &lock_class_mtx_sleep },
491#endif
492	{ "time lock", &lock_class_mtx_sleep },
493	{ NULL, NULL },
494	/*
495	 * Sockets
496	 */
497	{ "accept", &lock_class_mtx_sleep },
498	{ "so_snd", &lock_class_mtx_sleep },
499	{ "so_rcv", &lock_class_mtx_sleep },
500	{ "sellck", &lock_class_mtx_sleep },
501	{ NULL, NULL },
502	/*
503	 * Routing
504	 */
505	{ "so_rcv", &lock_class_mtx_sleep },
506	{ "radix node head", &lock_class_rw },
507	{ "rtentry", &lock_class_mtx_sleep },
508	{ "ifaddr", &lock_class_mtx_sleep },
509	{ NULL, NULL },
510	/*
511	 * IPv4 multicast:
512	 * protocol locks before interface locks, after UDP locks.
513	 */
514	{ "udpinp", &lock_class_rw },
515	{ "in_multi_mtx", &lock_class_mtx_sleep },
516	{ "igmp_mtx", &lock_class_mtx_sleep },
517	{ "if_addr_lock", &lock_class_rw },
518	{ NULL, NULL },
519	/*
520	 * IPv6 multicast:
521	 * protocol locks before interface locks, after UDP locks.
522	 */
523	{ "udpinp", &lock_class_rw },
524	{ "in6_multi_mtx", &lock_class_mtx_sleep },
525	{ "mld_mtx", &lock_class_mtx_sleep },
526	{ "if_addr_lock", &lock_class_rw },
527	{ NULL, NULL },
528	/*
529	 * UNIX Domain Sockets
530	 */
531	{ "unp_link_rwlock", &lock_class_rw },
532	{ "unp_list_lock", &lock_class_mtx_sleep },
533	{ "unp", &lock_class_mtx_sleep },
534	{ "so_snd", &lock_class_mtx_sleep },
535	{ NULL, NULL },
536	/*
537	 * UDP/IP
538	 */
539	{ "udp", &lock_class_rw },
540	{ "udpinp", &lock_class_rw },
541	{ "so_snd", &lock_class_mtx_sleep },
542	{ NULL, NULL },
543	/*
544	 * TCP/IP
545	 */
546	{ "tcp", &lock_class_rw },
547	{ "tcpinp", &lock_class_rw },
548	{ "so_snd", &lock_class_mtx_sleep },
549	{ NULL, NULL },
550	/*
551	 * BPF
552	 */
553	{ "bpf global lock", &lock_class_mtx_sleep },
554	{ "bpf interface lock", &lock_class_rw },
555	{ "bpf cdev lock", &lock_class_mtx_sleep },
556	{ NULL, NULL },
557	/*
558	 * NFS server
559	 */
560	{ "nfsd_mtx", &lock_class_mtx_sleep },
561	{ "so_snd", &lock_class_mtx_sleep },
562	{ NULL, NULL },
563
564	/*
565	 * IEEE 802.11
566	 */
567	{ "802.11 com lock", &lock_class_mtx_sleep},
568	{ NULL, NULL },
569	/*
570	 * Network drivers
571	 */
572	{ "network driver", &lock_class_mtx_sleep},
573	{ NULL, NULL },
574
575	/*
576	 * Netgraph
577	 */
578	{ "ng_node", &lock_class_mtx_sleep },
579	{ "ng_worklist", &lock_class_mtx_sleep },
580	{ NULL, NULL },
581	/*
582	 * CDEV
583	 */
584	{ "vm map (system)", &lock_class_mtx_sleep },
585	{ "vm page queue", &lock_class_mtx_sleep },
586	{ "vnode interlock", &lock_class_mtx_sleep },
587	{ "cdev", &lock_class_mtx_sleep },
588	{ NULL, NULL },
589	/*
590	 * VM
591	 */
592	{ "vm map (user)", &lock_class_sx },
593	{ "vm object", &lock_class_rw },
594	{ "vm page", &lock_class_mtx_sleep },
595	{ "vm page queue", &lock_class_mtx_sleep },
596	{ "pmap pv global", &lock_class_rw },
597	{ "pmap", &lock_class_mtx_sleep },
598	{ "pmap pv list", &lock_class_rw },
599	{ "vm page free queue", &lock_class_mtx_sleep },
600	{ NULL, NULL },
601	/*
602	 * kqueue/VFS interaction
603	 */
604	{ "kqueue", &lock_class_mtx_sleep },
605	{ "struct mount mtx", &lock_class_mtx_sleep },
606	{ "vnode interlock", &lock_class_mtx_sleep },
607	{ NULL, NULL },
608	/*
609	 * ZFS locking
610	 */
611	{ "dn->dn_mtx", &lock_class_sx },
612	{ "dr->dt.di.dr_mtx", &lock_class_sx },
613	{ "db->db_mtx", &lock_class_sx },
614	{ NULL, NULL },
615	/*
616	 * spin locks
617	 */
618#ifdef SMP
619	{ "ap boot", &lock_class_mtx_spin },
620#endif
621	{ "rm.mutex_mtx", &lock_class_mtx_spin },
622	{ "sio", &lock_class_mtx_spin },
623	{ "scrlock", &lock_class_mtx_spin },
624#ifdef __i386__
625	{ "cy", &lock_class_mtx_spin },
626#endif
627#ifdef __sparc64__
628	{ "pcib_mtx", &lock_class_mtx_spin },
629	{ "rtc_mtx", &lock_class_mtx_spin },
630#endif
631	{ "scc_hwmtx", &lock_class_mtx_spin },
632	{ "uart_hwmtx", &lock_class_mtx_spin },
633	{ "fast_taskqueue", &lock_class_mtx_spin },
634	{ "intr table", &lock_class_mtx_spin },
635#ifdef	HWPMC_HOOKS
636	{ "pmc-per-proc", &lock_class_mtx_spin },
637#endif
638	{ "process slock", &lock_class_mtx_spin },
639	{ "sleepq chain", &lock_class_mtx_spin },
640	{ "umtx lock", &lock_class_mtx_spin },
641	{ "rm_spinlock", &lock_class_mtx_spin },
642	{ "turnstile chain", &lock_class_mtx_spin },
643	{ "turnstile lock", &lock_class_mtx_spin },
644	{ "sched lock", &lock_class_mtx_spin },
645	{ "td_contested", &lock_class_mtx_spin },
646	{ "callout", &lock_class_mtx_spin },
647	{ "entropy harvest mutex", &lock_class_mtx_spin },
648	{ "syscons video lock", &lock_class_mtx_spin },
649#ifdef SMP
650	{ "smp rendezvous", &lock_class_mtx_spin },
651#endif
652#ifdef __powerpc__
653	{ "tlb0", &lock_class_mtx_spin },
654#endif
655	/*
656	 * leaf locks
657	 */
658	{ "intrcnt", &lock_class_mtx_spin },
659	{ "icu", &lock_class_mtx_spin },
660#ifdef __i386__
661	{ "allpmaps", &lock_class_mtx_spin },
662	{ "descriptor tables", &lock_class_mtx_spin },
663#endif
664	{ "clk", &lock_class_mtx_spin },
665	{ "cpuset", &lock_class_mtx_spin },
666	{ "mprof lock", &lock_class_mtx_spin },
667	{ "zombie lock", &lock_class_mtx_spin },
668	{ "ALD Queue", &lock_class_mtx_spin },
669#if defined(__i386__) || defined(__amd64__)
670	{ "pcicfg", &lock_class_mtx_spin },
671	{ "NDIS thread lock", &lock_class_mtx_spin },
672#endif
673	{ "tw_osl_io_lock", &lock_class_mtx_spin },
674	{ "tw_osl_q_lock", &lock_class_mtx_spin },
675	{ "tw_cl_io_lock", &lock_class_mtx_spin },
676	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
677	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
678#ifdef	HWPMC_HOOKS
679	{ "pmc-leaf", &lock_class_mtx_spin },
680#endif
681	{ "blocked lock", &lock_class_mtx_spin },
682	{ NULL, NULL },
683	{ NULL, NULL }
684};
685
686#ifdef BLESSING
687/*
688 * Pairs of locks which have been blessed
689 * Don't complain about order problems with blessed locks
690 */
691static struct witness_blessed blessed_list[] = {
692};
693static int blessed_count =
694	sizeof(blessed_list) / sizeof(struct witness_blessed);
695#endif
696
697/*
698 * This global is set to 0 once it becomes safe to use the witness code.
699 */
700static int witness_cold = 1;
701
702/*
703 * This global is set to 1 once the static lock orders have been enrolled
704 * so that a warning can be issued for any spin locks enrolled later.
705 */
706static int witness_spin_warn = 0;
707
708/* Trim useless garbage from filenames. */
709static const char *
710fixup_filename(const char *file)
711{
712
713	if (file == NULL)
714		return (NULL);
715	while (strncmp(file, "../", 3) == 0)
716		file += 3;
717	return (file);
718}
719
720/*
721 * The WITNESS-enabled diagnostic code.  Note that the witness code does
722 * assume that the early boot is single-threaded at least until after this
723 * routine is completed.
724 */
725static void
726witness_initialize(void *dummy __unused)
727{
728	struct lock_object *lock;
729	struct witness_order_list_entry *order;
730	struct witness *w, *w1;
731	int i;
732
733	w_data = malloc(sizeof (struct witness) * witness_count, M_WITNESS,
734	    M_WAITOK | M_ZERO);
735
736	w_rmatrix = malloc(sizeof(*w_rmatrix) * (witness_count + 1),
737	    M_WITNESS, M_WAITOK | M_ZERO);
738
739	for (i = 0; i < witness_count + 1; i++) {
740		w_rmatrix[i] = malloc(sizeof(*w_rmatrix[i]) *
741		    (witness_count + 1), M_WITNESS, M_WAITOK | M_ZERO);
742	}
743	badstack_sbuf_size = witness_count * 256;
744
745	/*
746	 * We have to release Giant before initializing its witness
747	 * structure so that WITNESS doesn't get confused.
748	 */
749	mtx_unlock(&Giant);
750	mtx_assert(&Giant, MA_NOTOWNED);
751
752	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
753	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
754	    MTX_NOWITNESS | MTX_NOPROFILE);
755	for (i = witness_count - 1; i >= 0; i--) {
756		w = &w_data[i];
757		memset(w, 0, sizeof(*w));
758		w_data[i].w_index = i;	/* Witness index never changes. */
759		witness_free(w);
760	}
761	KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
762	    ("%s: Invalid list of free witness objects", __func__));
763
764	/* Witness with index 0 is not used to aid in debugging. */
765	STAILQ_REMOVE_HEAD(&w_free, w_list);
766	w_free_cnt--;
767
768	for (i = 0; i < witness_count; i++) {
769		memset(w_rmatrix[i], 0, sizeof(*w_rmatrix[i]) *
770		    (witness_count + 1));
771	}
772
773	for (i = 0; i < LOCK_CHILDCOUNT; i++)
774		witness_lock_list_free(&w_locklistdata[i]);
775	witness_init_hash_tables();
776
777	/* First add in all the specified order lists. */
778	for (order = order_lists; order->w_name != NULL; order++) {
779		w = enroll(order->w_name, order->w_class);
780		if (w == NULL)
781			continue;
782		w->w_file = "order list";
783		for (order++; order->w_name != NULL; order++) {
784			w1 = enroll(order->w_name, order->w_class);
785			if (w1 == NULL)
786				continue;
787			w1->w_file = "order list";
788			itismychild(w, w1);
789			w = w1;
790		}
791	}
792	witness_spin_warn = 1;
793
794	/* Iterate through all locks and add them to witness. */
795	for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
796		lock = pending_locks[i].wh_lock;
797		KASSERT(lock->lo_flags & LO_WITNESS,
798		    ("%s: lock %s is on pending list but not LO_WITNESS",
799		    __func__, lock->lo_name));
800		lock->lo_witness = enroll(pending_locks[i].wh_type,
801		    LOCK_CLASS(lock));
802	}
803
804	/* Mark the witness code as being ready for use. */
805	witness_cold = 0;
806
807	mtx_lock(&Giant);
808}
809SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
810    NULL);
811
812void
813witness_init(struct lock_object *lock, const char *type)
814{
815	struct lock_class *class;
816
817	/* Various sanity checks. */
818	class = LOCK_CLASS(lock);
819	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
820	    (class->lc_flags & LC_RECURSABLE) == 0)
821		kassert_panic("%s: lock (%s) %s can not be recursable",
822		    __func__, class->lc_name, lock->lo_name);
823	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
824	    (class->lc_flags & LC_SLEEPABLE) == 0)
825		kassert_panic("%s: lock (%s) %s can not be sleepable",
826		    __func__, class->lc_name, lock->lo_name);
827	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
828	    (class->lc_flags & LC_UPGRADABLE) == 0)
829		kassert_panic("%s: lock (%s) %s can not be upgradable",
830		    __func__, class->lc_name, lock->lo_name);
831
832	/*
833	 * If we shouldn't watch this lock, then just clear lo_witness.
834	 * Otherwise, if witness_cold is set, then it is too early to
835	 * enroll this lock, so defer it to witness_initialize() by adding
836	 * it to the pending_locks list.  If it is not too early, then enroll
837	 * the lock now.
838	 */
839	if (witness_watch < 1 || panicstr != NULL ||
840	    (lock->lo_flags & LO_WITNESS) == 0)
841		lock->lo_witness = NULL;
842	else if (witness_cold) {
843		pending_locks[pending_cnt].wh_lock = lock;
844		pending_locks[pending_cnt++].wh_type = type;
845		if (pending_cnt > WITNESS_PENDLIST)
846			panic("%s: pending locks list is too small, "
847			    "increase WITNESS_PENDLIST\n",
848			    __func__);
849	} else
850		lock->lo_witness = enroll(type, class);
851}
852
853void
854witness_destroy(struct lock_object *lock)
855{
856	struct lock_class *class;
857	struct witness *w;
858
859	class = LOCK_CLASS(lock);
860
861	if (witness_cold)
862		panic("lock (%s) %s destroyed while witness_cold",
863		    class->lc_name, lock->lo_name);
864
865	/* XXX: need to verify that no one holds the lock */
866	if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
867		return;
868	w = lock->lo_witness;
869
870	mtx_lock_spin(&w_mtx);
871	MPASS(w->w_refcount > 0);
872	w->w_refcount--;
873
874	if (w->w_refcount == 0)
875		depart(w);
876	mtx_unlock_spin(&w_mtx);
877}
878
879#ifdef DDB
880static void
881witness_ddb_compute_levels(void)
882{
883	struct witness *w;
884
885	/*
886	 * First clear all levels.
887	 */
888	STAILQ_FOREACH(w, &w_all, w_list)
889		w->w_ddb_level = -1;
890
891	/*
892	 * Look for locks with no parents and level all their descendants.
893	 */
894	STAILQ_FOREACH(w, &w_all, w_list) {
895
896		/* If the witness has ancestors (is not a root), skip it. */
897		if (w->w_num_ancestors > 0)
898			continue;
899		witness_ddb_level_descendants(w, 0);
900	}
901}
902
903static void
904witness_ddb_level_descendants(struct witness *w, int l)
905{
906	int i;
907
908	if (w->w_ddb_level >= l)
909		return;
910
911	w->w_ddb_level = l;
912	l++;
913
914	for (i = 1; i <= w_max_used_index; i++) {
915		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
916			witness_ddb_level_descendants(&w_data[i], l);
917	}
918}
919
920static void
921witness_ddb_display_descendants(int(*prnt)(const char *fmt, ...),
922    struct witness *w, int indent)
923{
924	int i;
925
926 	for (i = 0; i < indent; i++)
927 		prnt(" ");
928	prnt("%s (type: %s, depth: %d, active refs: %d)",
929	     w->w_name, w->w_class->lc_name,
930	     w->w_ddb_level, w->w_refcount);
931 	if (w->w_displayed) {
932 		prnt(" -- (already displayed)\n");
933 		return;
934 	}
935 	w->w_displayed = 1;
936	if (w->w_file != NULL && w->w_line != 0)
937		prnt(" -- last acquired @ %s:%d\n", fixup_filename(w->w_file),
938		    w->w_line);
939	else
940		prnt(" -- never acquired\n");
941	indent++;
942	WITNESS_INDEX_ASSERT(w->w_index);
943	for (i = 1; i <= w_max_used_index; i++) {
944		if (db_pager_quit)
945			return;
946		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
947			witness_ddb_display_descendants(prnt, &w_data[i],
948			    indent);
949	}
950}
951
952static void
953witness_ddb_display_list(int(*prnt)(const char *fmt, ...),
954    struct witness_list *list)
955{
956	struct witness *w;
957
958	STAILQ_FOREACH(w, list, w_typelist) {
959		if (w->w_file == NULL || w->w_ddb_level > 0)
960			continue;
961
962		/* This lock has no anscestors - display its descendants. */
963		witness_ddb_display_descendants(prnt, w, 0);
964		if (db_pager_quit)
965			return;
966	}
967}
968
969static void
970witness_ddb_display(int(*prnt)(const char *fmt, ...))
971{
972	struct witness *w;
973
974	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
975	witness_ddb_compute_levels();
976
977	/* Clear all the displayed flags. */
978	STAILQ_FOREACH(w, &w_all, w_list)
979		w->w_displayed = 0;
980
981	/*
982	 * First, handle sleep locks which have been acquired at least
983	 * once.
984	 */
985	prnt("Sleep locks:\n");
986	witness_ddb_display_list(prnt, &w_sleep);
987	if (db_pager_quit)
988		return;
989
990	/*
991	 * Now do spin locks which have been acquired at least once.
992	 */
993	prnt("\nSpin locks:\n");
994	witness_ddb_display_list(prnt, &w_spin);
995	if (db_pager_quit)
996		return;
997
998	/*
999	 * Finally, any locks which have not been acquired yet.
1000	 */
1001	prnt("\nLocks which were never acquired:\n");
1002	STAILQ_FOREACH(w, &w_all, w_list) {
1003		if (w->w_file != NULL || w->w_refcount == 0)
1004			continue;
1005		prnt("%s (type: %s, depth: %d)\n", w->w_name,
1006		    w->w_class->lc_name, w->w_ddb_level);
1007		if (db_pager_quit)
1008			return;
1009	}
1010}
1011#endif /* DDB */
1012
1013int
1014witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
1015{
1016
1017	if (witness_watch == -1 || panicstr != NULL)
1018		return (0);
1019
1020	/* Require locks that witness knows about. */
1021	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1022	    lock2->lo_witness == NULL)
1023		return (EINVAL);
1024
1025	mtx_assert(&w_mtx, MA_NOTOWNED);
1026	mtx_lock_spin(&w_mtx);
1027
1028	/*
1029	 * If we already have either an explicit or implied lock order that
1030	 * is the other way around, then return an error.
1031	 */
1032	if (witness_watch &&
1033	    isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1034		mtx_unlock_spin(&w_mtx);
1035		return (EDOOFUS);
1036	}
1037
1038	/* Try to add the new order. */
1039	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1040	    lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1041	itismychild(lock1->lo_witness, lock2->lo_witness);
1042	mtx_unlock_spin(&w_mtx);
1043	return (0);
1044}
1045
1046void
1047witness_checkorder(struct lock_object *lock, int flags, const char *file,
1048    int line, struct lock_object *interlock)
1049{
1050	struct lock_list_entry *lock_list, *lle;
1051	struct lock_instance *lock1, *lock2, *plock;
1052	struct lock_class *class, *iclass;
1053	struct witness *w, *w1;
1054	struct thread *td;
1055	int i, j;
1056
1057	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1058	    panicstr != NULL)
1059		return;
1060
1061	w = lock->lo_witness;
1062	class = LOCK_CLASS(lock);
1063	td = curthread;
1064
1065	if (class->lc_flags & LC_SLEEPLOCK) {
1066
1067		/*
1068		 * Since spin locks include a critical section, this check
1069		 * implicitly enforces a lock order of all sleep locks before
1070		 * all spin locks.
1071		 */
1072		if (td->td_critnest != 0 && !kdb_active)
1073			kassert_panic("acquiring blockable sleep lock with "
1074			    "spinlock or critical section held (%s) %s @ %s:%d",
1075			    class->lc_name, lock->lo_name,
1076			    fixup_filename(file), line);
1077
1078		/*
1079		 * If this is the first lock acquired then just return as
1080		 * no order checking is needed.
1081		 */
1082		lock_list = td->td_sleeplocks;
1083		if (lock_list == NULL || lock_list->ll_count == 0)
1084			return;
1085	} else {
1086
1087		/*
1088		 * If this is the first lock, just return as no order
1089		 * checking is needed.  Avoid problems with thread
1090		 * migration pinning the thread while checking if
1091		 * spinlocks are held.  If at least one spinlock is held
1092		 * the thread is in a safe path and it is allowed to
1093		 * unpin it.
1094		 */
1095		sched_pin();
1096		lock_list = PCPU_GET(spinlocks);
1097		if (lock_list == NULL || lock_list->ll_count == 0) {
1098			sched_unpin();
1099			return;
1100		}
1101		sched_unpin();
1102	}
1103
1104	/*
1105	 * Check to see if we are recursing on a lock we already own.  If
1106	 * so, make sure that we don't mismatch exclusive and shared lock
1107	 * acquires.
1108	 */
1109	lock1 = find_instance(lock_list, lock);
1110	if (lock1 != NULL) {
1111		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1112		    (flags & LOP_EXCLUSIVE) == 0) {
1113			printf("shared lock of (%s) %s @ %s:%d\n",
1114			    class->lc_name, lock->lo_name,
1115			    fixup_filename(file), line);
1116			printf("while exclusively locked from %s:%d\n",
1117			    fixup_filename(lock1->li_file), lock1->li_line);
1118			kassert_panic("excl->share");
1119		}
1120		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1121		    (flags & LOP_EXCLUSIVE) != 0) {
1122			printf("exclusive lock of (%s) %s @ %s:%d\n",
1123			    class->lc_name, lock->lo_name,
1124			    fixup_filename(file), line);
1125			printf("while share locked from %s:%d\n",
1126			    fixup_filename(lock1->li_file), lock1->li_line);
1127			kassert_panic("share->excl");
1128		}
1129		return;
1130	}
1131
1132	/* Warn if the interlock is not locked exactly once. */
1133	if (interlock != NULL) {
1134		iclass = LOCK_CLASS(interlock);
1135		lock1 = find_instance(lock_list, interlock);
1136		if (lock1 == NULL)
1137			kassert_panic("interlock (%s) %s not locked @ %s:%d",
1138			    iclass->lc_name, interlock->lo_name,
1139			    fixup_filename(file), line);
1140		else if ((lock1->li_flags & LI_RECURSEMASK) != 0)
1141			kassert_panic("interlock (%s) %s recursed @ %s:%d",
1142			    iclass->lc_name, interlock->lo_name,
1143			    fixup_filename(file), line);
1144	}
1145
1146	/*
1147	 * Find the previously acquired lock, but ignore interlocks.
1148	 */
1149	plock = &lock_list->ll_children[lock_list->ll_count - 1];
1150	if (interlock != NULL && plock->li_lock == interlock) {
1151		if (lock_list->ll_count > 1)
1152			plock =
1153			    &lock_list->ll_children[lock_list->ll_count - 2];
1154		else {
1155			lle = lock_list->ll_next;
1156
1157			/*
1158			 * The interlock is the only lock we hold, so
1159			 * simply return.
1160			 */
1161			if (lle == NULL)
1162				return;
1163			plock = &lle->ll_children[lle->ll_count - 1];
1164		}
1165	}
1166
1167	/*
1168	 * Try to perform most checks without a lock.  If this succeeds we
1169	 * can skip acquiring the lock and return success.
1170	 */
1171	w1 = plock->li_lock->lo_witness;
1172	if (witness_lock_order_check(w1, w))
1173		return;
1174
1175	/*
1176	 * Check for duplicate locks of the same type.  Note that we only
1177	 * have to check for this on the last lock we just acquired.  Any
1178	 * other cases will be caught as lock order violations.
1179	 */
1180	mtx_lock_spin(&w_mtx);
1181	witness_lock_order_add(w1, w);
1182	if (w1 == w) {
1183		i = w->w_index;
1184		if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1185		    !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1186		    w_rmatrix[i][i] |= WITNESS_REVERSAL;
1187			w->w_reversed = 1;
1188			mtx_unlock_spin(&w_mtx);
1189			printf(
1190			    "acquiring duplicate lock of same type: \"%s\"\n",
1191			    w->w_name);
1192			printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1193			    fixup_filename(plock->li_file), plock->li_line);
1194			printf(" 2nd %s @ %s:%d\n", lock->lo_name,
1195			    fixup_filename(file), line);
1196			witness_debugger(1);
1197		} else
1198			mtx_unlock_spin(&w_mtx);
1199		return;
1200	}
1201	mtx_assert(&w_mtx, MA_OWNED);
1202
1203	/*
1204	 * If we know that the lock we are acquiring comes after
1205	 * the lock we most recently acquired in the lock order tree,
1206	 * then there is no need for any further checks.
1207	 */
1208	if (isitmychild(w1, w))
1209		goto out;
1210
1211	for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1212		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1213
1214			MPASS(j < witness_count);
1215			lock1 = &lle->ll_children[i];
1216
1217			/*
1218			 * Ignore the interlock.
1219			 */
1220			if (interlock == lock1->li_lock)
1221				continue;
1222
1223			/*
1224			 * If this lock doesn't undergo witness checking,
1225			 * then skip it.
1226			 */
1227			w1 = lock1->li_lock->lo_witness;
1228			if (w1 == NULL) {
1229				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1230				    ("lock missing witness structure"));
1231				continue;
1232			}
1233
1234			/*
1235			 * If we are locking Giant and this is a sleepable
1236			 * lock, then skip it.
1237			 */
1238			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1239			    lock == &Giant.lock_object)
1240				continue;
1241
1242			/*
1243			 * If we are locking a sleepable lock and this lock
1244			 * is Giant, then skip it.
1245			 */
1246			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1247			    lock1->li_lock == &Giant.lock_object)
1248				continue;
1249
1250			/*
1251			 * If we are locking a sleepable lock and this lock
1252			 * isn't sleepable, we want to treat it as a lock
1253			 * order violation to enfore a general lock order of
1254			 * sleepable locks before non-sleepable locks.
1255			 */
1256			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1257			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1258				goto reversal;
1259
1260			/*
1261			 * If we are locking Giant and this is a non-sleepable
1262			 * lock, then treat it as a reversal.
1263			 */
1264			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1265			    lock == &Giant.lock_object)
1266				goto reversal;
1267
1268			/*
1269			 * Check the lock order hierarchy for a reveresal.
1270			 */
1271			if (!isitmydescendant(w, w1))
1272				continue;
1273		reversal:
1274
1275			/*
1276			 * We have a lock order violation, check to see if it
1277			 * is allowed or has already been yelled about.
1278			 */
1279#ifdef BLESSING
1280
1281			/*
1282			 * If the lock order is blessed, just bail.  We don't
1283			 * look for other lock order violations though, which
1284			 * may be a bug.
1285			 */
1286			if (blessed(w, w1))
1287				goto out;
1288#endif
1289
1290			/* Bail if this violation is known */
1291			if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1292				goto out;
1293
1294			/* Record this as a violation */
1295			w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1296			w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1297			w->w_reversed = w1->w_reversed = 1;
1298			witness_increment_graph_generation();
1299			mtx_unlock_spin(&w_mtx);
1300
1301#ifdef WITNESS_NO_VNODE
1302			/*
1303			 * There are known LORs between VNODE locks. They are
1304			 * not an indication of a bug. VNODE locks are flagged
1305			 * as such (LO_IS_VNODE) and we don't yell if the LOR
1306			 * is between 2 VNODE locks.
1307			 */
1308			if ((lock->lo_flags & LO_IS_VNODE) != 0 &&
1309			    (lock1->li_lock->lo_flags & LO_IS_VNODE) != 0)
1310				return;
1311#endif
1312
1313			/*
1314			 * Ok, yell about it.
1315			 */
1316			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1317			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1318				printf(
1319		"lock order reversal: (sleepable after non-sleepable)\n");
1320			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1321			    && lock == &Giant.lock_object)
1322				printf(
1323		"lock order reversal: (Giant after non-sleepable)\n");
1324			else
1325				printf("lock order reversal:\n");
1326
1327			/*
1328			 * Try to locate an earlier lock with
1329			 * witness w in our list.
1330			 */
1331			do {
1332				lock2 = &lle->ll_children[i];
1333				MPASS(lock2->li_lock != NULL);
1334				if (lock2->li_lock->lo_witness == w)
1335					break;
1336				if (i == 0 && lle->ll_next != NULL) {
1337					lle = lle->ll_next;
1338					i = lle->ll_count - 1;
1339					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1340				} else
1341					i--;
1342			} while (i >= 0);
1343			if (i < 0) {
1344				printf(" 1st %p %s (%s) @ %s:%d\n",
1345				    lock1->li_lock, lock1->li_lock->lo_name,
1346				    w1->w_name, fixup_filename(lock1->li_file),
1347				    lock1->li_line);
1348				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1349				    lock->lo_name, w->w_name,
1350				    fixup_filename(file), line);
1351			} else {
1352				printf(" 1st %p %s (%s) @ %s:%d\n",
1353				    lock2->li_lock, lock2->li_lock->lo_name,
1354				    lock2->li_lock->lo_witness->w_name,
1355				    fixup_filename(lock2->li_file),
1356				    lock2->li_line);
1357				printf(" 2nd %p %s (%s) @ %s:%d\n",
1358				    lock1->li_lock, lock1->li_lock->lo_name,
1359				    w1->w_name, fixup_filename(lock1->li_file),
1360				    lock1->li_line);
1361				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1362				    lock->lo_name, w->w_name,
1363				    fixup_filename(file), line);
1364			}
1365			witness_debugger(1);
1366			return;
1367		}
1368	}
1369
1370	/*
1371	 * If requested, build a new lock order.  However, don't build a new
1372	 * relationship between a sleepable lock and Giant if it is in the
1373	 * wrong direction.  The correct lock order is that sleepable locks
1374	 * always come before Giant.
1375	 */
1376	if (flags & LOP_NEWORDER &&
1377	    !(plock->li_lock == &Giant.lock_object &&
1378	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1379		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1380		    w->w_name, plock->li_lock->lo_witness->w_name);
1381		itismychild(plock->li_lock->lo_witness, w);
1382	}
1383out:
1384	mtx_unlock_spin(&w_mtx);
1385}
1386
1387void
1388witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1389{
1390	struct lock_list_entry **lock_list, *lle;
1391	struct lock_instance *instance;
1392	struct witness *w;
1393	struct thread *td;
1394
1395	if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1396	    panicstr != NULL)
1397		return;
1398	w = lock->lo_witness;
1399	td = curthread;
1400
1401	/* Determine lock list for this lock. */
1402	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1403		lock_list = &td->td_sleeplocks;
1404	else
1405		lock_list = PCPU_PTR(spinlocks);
1406
1407	/* Check to see if we are recursing on a lock we already own. */
1408	instance = find_instance(*lock_list, lock);
1409	if (instance != NULL) {
1410		instance->li_flags++;
1411		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1412		    td->td_proc->p_pid, lock->lo_name,
1413		    instance->li_flags & LI_RECURSEMASK);
1414		instance->li_file = file;
1415		instance->li_line = line;
1416		return;
1417	}
1418
1419	/* Update per-witness last file and line acquire. */
1420	w->w_file = file;
1421	w->w_line = line;
1422
1423	/* Find the next open lock instance in the list and fill it. */
1424	lle = *lock_list;
1425	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1426		lle = witness_lock_list_get();
1427		if (lle == NULL)
1428			return;
1429		lle->ll_next = *lock_list;
1430		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1431		    td->td_proc->p_pid, lle);
1432		*lock_list = lle;
1433	}
1434	instance = &lle->ll_children[lle->ll_count++];
1435	instance->li_lock = lock;
1436	instance->li_line = line;
1437	instance->li_file = file;
1438	if ((flags & LOP_EXCLUSIVE) != 0)
1439		instance->li_flags = LI_EXCLUSIVE;
1440	else
1441		instance->li_flags = 0;
1442	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1443	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1444}
1445
1446void
1447witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1448{
1449	struct lock_instance *instance;
1450	struct lock_class *class;
1451
1452	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1453	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1454		return;
1455	class = LOCK_CLASS(lock);
1456	if (witness_watch) {
1457		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1458			kassert_panic(
1459			    "upgrade of non-upgradable lock (%s) %s @ %s:%d",
1460			    class->lc_name, lock->lo_name,
1461			    fixup_filename(file), line);
1462		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1463			kassert_panic(
1464			    "upgrade of non-sleep lock (%s) %s @ %s:%d",
1465			    class->lc_name, lock->lo_name,
1466			    fixup_filename(file), line);
1467	}
1468	instance = find_instance(curthread->td_sleeplocks, lock);
1469	if (instance == NULL) {
1470		kassert_panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1471		    class->lc_name, lock->lo_name,
1472		    fixup_filename(file), line);
1473		return;
1474	}
1475	if (witness_watch) {
1476		if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1477			kassert_panic(
1478			    "upgrade of exclusive lock (%s) %s @ %s:%d",
1479			    class->lc_name, lock->lo_name,
1480			    fixup_filename(file), line);
1481		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1482			kassert_panic(
1483			    "upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1484			    class->lc_name, lock->lo_name,
1485			    instance->li_flags & LI_RECURSEMASK,
1486			    fixup_filename(file), line);
1487	}
1488	instance->li_flags |= LI_EXCLUSIVE;
1489}
1490
1491void
1492witness_downgrade(struct lock_object *lock, int flags, const char *file,
1493    int line)
1494{
1495	struct lock_instance *instance;
1496	struct lock_class *class;
1497
1498	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1499	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1500		return;
1501	class = LOCK_CLASS(lock);
1502	if (witness_watch) {
1503		if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1504			kassert_panic(
1505			    "downgrade of non-upgradable lock (%s) %s @ %s:%d",
1506			    class->lc_name, lock->lo_name,
1507			    fixup_filename(file), line);
1508		if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1509			kassert_panic(
1510			    "downgrade of non-sleep lock (%s) %s @ %s:%d",
1511			    class->lc_name, lock->lo_name,
1512			    fixup_filename(file), line);
1513	}
1514	instance = find_instance(curthread->td_sleeplocks, lock);
1515	if (instance == NULL) {
1516		kassert_panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1517		    class->lc_name, lock->lo_name,
1518		    fixup_filename(file), line);
1519		return;
1520	}
1521	if (witness_watch) {
1522		if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1523			kassert_panic(
1524			    "downgrade of shared lock (%s) %s @ %s:%d",
1525			    class->lc_name, lock->lo_name,
1526			    fixup_filename(file), line);
1527		if ((instance->li_flags & LI_RECURSEMASK) != 0)
1528			kassert_panic(
1529			    "downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1530			    class->lc_name, lock->lo_name,
1531			    instance->li_flags & LI_RECURSEMASK,
1532			    fixup_filename(file), line);
1533	}
1534	instance->li_flags &= ~LI_EXCLUSIVE;
1535}
1536
1537void
1538witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1539{
1540	struct lock_list_entry **lock_list, *lle;
1541	struct lock_instance *instance;
1542	struct lock_class *class;
1543	struct thread *td;
1544	register_t s;
1545	int i, j;
1546
1547	if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1548		return;
1549	td = curthread;
1550	class = LOCK_CLASS(lock);
1551
1552	/* Find lock instance associated with this lock. */
1553	if (class->lc_flags & LC_SLEEPLOCK)
1554		lock_list = &td->td_sleeplocks;
1555	else
1556		lock_list = PCPU_PTR(spinlocks);
1557	lle = *lock_list;
1558	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1559		for (i = 0; i < (*lock_list)->ll_count; i++) {
1560			instance = &(*lock_list)->ll_children[i];
1561			if (instance->li_lock == lock)
1562				goto found;
1563		}
1564
1565	/*
1566	 * When disabling WITNESS through witness_watch we could end up in
1567	 * having registered locks in the td_sleeplocks queue.
1568	 * We have to make sure we flush these queues, so just search for
1569	 * eventual register locks and remove them.
1570	 */
1571	if (witness_watch > 0) {
1572		kassert_panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1573		    lock->lo_name, fixup_filename(file), line);
1574		return;
1575	} else {
1576		return;
1577	}
1578found:
1579
1580	/* First, check for shared/exclusive mismatches. */
1581	if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1582	    (flags & LOP_EXCLUSIVE) == 0) {
1583		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1584		    lock->lo_name, fixup_filename(file), line);
1585		printf("while exclusively locked from %s:%d\n",
1586		    fixup_filename(instance->li_file), instance->li_line);
1587		kassert_panic("excl->ushare");
1588	}
1589	if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1590	    (flags & LOP_EXCLUSIVE) != 0) {
1591		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1592		    lock->lo_name, fixup_filename(file), line);
1593		printf("while share locked from %s:%d\n",
1594		    fixup_filename(instance->li_file),
1595		    instance->li_line);
1596		kassert_panic("share->uexcl");
1597	}
1598	/* If we are recursed, unrecurse. */
1599	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1600		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1601		    td->td_proc->p_pid, instance->li_lock->lo_name,
1602		    instance->li_flags);
1603		instance->li_flags--;
1604		return;
1605	}
1606	/* The lock is now being dropped, check for NORELEASE flag */
1607	if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1608		printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1609		    lock->lo_name, fixup_filename(file), line);
1610		kassert_panic("lock marked norelease");
1611	}
1612
1613	/* Otherwise, remove this item from the list. */
1614	s = intr_disable();
1615	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1616	    td->td_proc->p_pid, instance->li_lock->lo_name,
1617	    (*lock_list)->ll_count - 1);
1618	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1619		(*lock_list)->ll_children[j] =
1620		    (*lock_list)->ll_children[j + 1];
1621	(*lock_list)->ll_count--;
1622	intr_restore(s);
1623
1624	/*
1625	 * In order to reduce contention on w_mtx, we want to keep always an
1626	 * head object into lists so that frequent allocation from the
1627	 * free witness pool (and subsequent locking) is avoided.
1628	 * In order to maintain the current code simple, when the head
1629	 * object is totally unloaded it means also that we do not have
1630	 * further objects in the list, so the list ownership needs to be
1631	 * hand over to another object if the current head needs to be freed.
1632	 */
1633	if ((*lock_list)->ll_count == 0) {
1634		if (*lock_list == lle) {
1635			if (lle->ll_next == NULL)
1636				return;
1637		} else
1638			lle = *lock_list;
1639		*lock_list = lle->ll_next;
1640		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1641		    td->td_proc->p_pid, lle);
1642		witness_lock_list_free(lle);
1643	}
1644}
1645
1646void
1647witness_thread_exit(struct thread *td)
1648{
1649	struct lock_list_entry *lle;
1650	int i, n;
1651
1652	lle = td->td_sleeplocks;
1653	if (lle == NULL || panicstr != NULL)
1654		return;
1655	if (lle->ll_count != 0) {
1656		for (n = 0; lle != NULL; lle = lle->ll_next)
1657			for (i = lle->ll_count - 1; i >= 0; i--) {
1658				if (n == 0)
1659		printf("Thread %p exiting with the following locks held:\n",
1660					    td);
1661				n++;
1662				witness_list_lock(&lle->ll_children[i], printf);
1663
1664			}
1665		kassert_panic(
1666		    "Thread %p cannot exit while holding sleeplocks\n", td);
1667	}
1668	witness_lock_list_free(lle);
1669}
1670
1671/*
1672 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1673 * exempt Giant and sleepable locks from the checks as well.  If any
1674 * non-exempt locks are held, then a supplied message is printed to the
1675 * console along with a list of the offending locks.  If indicated in the
1676 * flags then a failure results in a panic as well.
1677 */
1678int
1679witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1680{
1681	struct lock_list_entry *lock_list, *lle;
1682	struct lock_instance *lock1;
1683	struct thread *td;
1684	va_list ap;
1685	int i, n;
1686
1687	if (witness_cold || witness_watch < 1 || panicstr != NULL)
1688		return (0);
1689	n = 0;
1690	td = curthread;
1691	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1692		for (i = lle->ll_count - 1; i >= 0; i--) {
1693			lock1 = &lle->ll_children[i];
1694			if (lock1->li_lock == lock)
1695				continue;
1696			if (flags & WARN_GIANTOK &&
1697			    lock1->li_lock == &Giant.lock_object)
1698				continue;
1699			if (flags & WARN_SLEEPOK &&
1700			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1701				continue;
1702			if (n == 0) {
1703				va_start(ap, fmt);
1704				vprintf(fmt, ap);
1705				va_end(ap);
1706				printf(" with the following");
1707				if (flags & WARN_SLEEPOK)
1708					printf(" non-sleepable");
1709				printf(" locks held:\n");
1710			}
1711			n++;
1712			witness_list_lock(lock1, printf);
1713		}
1714
1715	/*
1716	 * Pin the thread in order to avoid problems with thread migration.
1717	 * Once that all verifies are passed about spinlocks ownership,
1718	 * the thread is in a safe path and it can be unpinned.
1719	 */
1720	sched_pin();
1721	lock_list = PCPU_GET(spinlocks);
1722	if (lock_list != NULL && lock_list->ll_count != 0) {
1723		sched_unpin();
1724
1725		/*
1726		 * We should only have one spinlock and as long as
1727		 * the flags cannot match for this locks class,
1728		 * check if the first spinlock is the one curthread
1729		 * should hold.
1730		 */
1731		lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1732		if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1733		    lock1->li_lock == lock && n == 0)
1734			return (0);
1735
1736		va_start(ap, fmt);
1737		vprintf(fmt, ap);
1738		va_end(ap);
1739		printf(" with the following");
1740		if (flags & WARN_SLEEPOK)
1741			printf(" non-sleepable");
1742		printf(" locks held:\n");
1743		n += witness_list_locks(&lock_list, printf);
1744	} else
1745		sched_unpin();
1746	if (flags & WARN_PANIC && n)
1747		kassert_panic("%s", __func__);
1748	else
1749		witness_debugger(n);
1750	return (n);
1751}
1752
1753const char *
1754witness_file(struct lock_object *lock)
1755{
1756	struct witness *w;
1757
1758	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1759		return ("?");
1760	w = lock->lo_witness;
1761	return (w->w_file);
1762}
1763
1764int
1765witness_line(struct lock_object *lock)
1766{
1767	struct witness *w;
1768
1769	if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1770		return (0);
1771	w = lock->lo_witness;
1772	return (w->w_line);
1773}
1774
1775static struct witness *
1776enroll(const char *description, struct lock_class *lock_class)
1777{
1778	struct witness *w;
1779	struct witness_list *typelist;
1780
1781	MPASS(description != NULL);
1782
1783	if (witness_watch == -1 || panicstr != NULL)
1784		return (NULL);
1785	if ((lock_class->lc_flags & LC_SPINLOCK)) {
1786		if (witness_skipspin)
1787			return (NULL);
1788		else
1789			typelist = &w_spin;
1790	} else if ((lock_class->lc_flags & LC_SLEEPLOCK)) {
1791		typelist = &w_sleep;
1792	} else {
1793		kassert_panic("lock class %s is not sleep or spin",
1794		    lock_class->lc_name);
1795		return (NULL);
1796	}
1797
1798	mtx_lock_spin(&w_mtx);
1799	w = witness_hash_get(description);
1800	if (w)
1801		goto found;
1802	if ((w = witness_get()) == NULL)
1803		return (NULL);
1804	MPASS(strlen(description) < MAX_W_NAME);
1805	strcpy(w->w_name, description);
1806	w->w_class = lock_class;
1807	w->w_refcount = 1;
1808	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1809	if (lock_class->lc_flags & LC_SPINLOCK) {
1810		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1811		w_spin_cnt++;
1812	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1813		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1814		w_sleep_cnt++;
1815	}
1816
1817	/* Insert new witness into the hash */
1818	witness_hash_put(w);
1819	witness_increment_graph_generation();
1820	mtx_unlock_spin(&w_mtx);
1821	return (w);
1822found:
1823	w->w_refcount++;
1824	mtx_unlock_spin(&w_mtx);
1825	if (lock_class != w->w_class)
1826		kassert_panic(
1827			"lock (%s) %s does not match earlier (%s) lock",
1828			description, lock_class->lc_name,
1829			w->w_class->lc_name);
1830	return (w);
1831}
1832
1833static void
1834depart(struct witness *w)
1835{
1836	struct witness_list *list;
1837
1838	MPASS(w->w_refcount == 0);
1839	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1840		list = &w_sleep;
1841		w_sleep_cnt--;
1842	} else {
1843		list = &w_spin;
1844		w_spin_cnt--;
1845	}
1846	/*
1847	 * Set file to NULL as it may point into a loadable module.
1848	 */
1849	w->w_file = NULL;
1850	w->w_line = 0;
1851	witness_increment_graph_generation();
1852}
1853
1854
1855static void
1856adopt(struct witness *parent, struct witness *child)
1857{
1858	int pi, ci, i, j;
1859
1860	if (witness_cold == 0)
1861		mtx_assert(&w_mtx, MA_OWNED);
1862
1863	/* If the relationship is already known, there's no work to be done. */
1864	if (isitmychild(parent, child))
1865		return;
1866
1867	/* When the structure of the graph changes, bump up the generation. */
1868	witness_increment_graph_generation();
1869
1870	/*
1871	 * The hard part ... create the direct relationship, then propagate all
1872	 * indirect relationships.
1873	 */
1874	pi = parent->w_index;
1875	ci = child->w_index;
1876	WITNESS_INDEX_ASSERT(pi);
1877	WITNESS_INDEX_ASSERT(ci);
1878	MPASS(pi != ci);
1879	w_rmatrix[pi][ci] |= WITNESS_PARENT;
1880	w_rmatrix[ci][pi] |= WITNESS_CHILD;
1881
1882	/*
1883	 * If parent was not already an ancestor of child,
1884	 * then we increment the descendant and ancestor counters.
1885	 */
1886	if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1887		parent->w_num_descendants++;
1888		child->w_num_ancestors++;
1889	}
1890
1891	/*
1892	 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1893	 * an ancestor of 'pi' during this loop.
1894	 */
1895	for (i = 1; i <= w_max_used_index; i++) {
1896		if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1897		    (i != pi))
1898			continue;
1899
1900		/* Find each descendant of 'i' and mark it as a descendant. */
1901		for (j = 1; j <= w_max_used_index; j++) {
1902
1903			/*
1904			 * Skip children that are already marked as
1905			 * descendants of 'i'.
1906			 */
1907			if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1908				continue;
1909
1910			/*
1911			 * We are only interested in descendants of 'ci'. Note
1912			 * that 'ci' itself is counted as a descendant of 'ci'.
1913			 */
1914			if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1915			    (j != ci))
1916				continue;
1917			w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1918			w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1919			w_data[i].w_num_descendants++;
1920			w_data[j].w_num_ancestors++;
1921
1922			/*
1923			 * Make sure we aren't marking a node as both an
1924			 * ancestor and descendant. We should have caught
1925			 * this as a lock order reversal earlier.
1926			 */
1927			if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1928			    (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1929				printf("witness rmatrix paradox! [%d][%d]=%d "
1930				    "both ancestor and descendant\n",
1931				    i, j, w_rmatrix[i][j]);
1932				kdb_backtrace();
1933				printf("Witness disabled.\n");
1934				witness_watch = -1;
1935			}
1936			if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1937			    (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1938				printf("witness rmatrix paradox! [%d][%d]=%d "
1939				    "both ancestor and descendant\n",
1940				    j, i, w_rmatrix[j][i]);
1941				kdb_backtrace();
1942				printf("Witness disabled.\n");
1943				witness_watch = -1;
1944			}
1945		}
1946	}
1947}
1948
1949static void
1950itismychild(struct witness *parent, struct witness *child)
1951{
1952	int unlocked;
1953
1954	MPASS(child != NULL && parent != NULL);
1955	if (witness_cold == 0)
1956		mtx_assert(&w_mtx, MA_OWNED);
1957
1958	if (!witness_lock_type_equal(parent, child)) {
1959		if (witness_cold == 0) {
1960			unlocked = 1;
1961			mtx_unlock_spin(&w_mtx);
1962		} else {
1963			unlocked = 0;
1964		}
1965		kassert_panic(
1966		    "%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1967		    "the same lock type", __func__, parent->w_name,
1968		    parent->w_class->lc_name, child->w_name,
1969		    child->w_class->lc_name);
1970		if (unlocked)
1971			mtx_lock_spin(&w_mtx);
1972	}
1973	adopt(parent, child);
1974}
1975
1976/*
1977 * Generic code for the isitmy*() functions. The rmask parameter is the
1978 * expected relationship of w1 to w2.
1979 */
1980static int
1981_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1982{
1983	unsigned char r1, r2;
1984	int i1, i2;
1985
1986	i1 = w1->w_index;
1987	i2 = w2->w_index;
1988	WITNESS_INDEX_ASSERT(i1);
1989	WITNESS_INDEX_ASSERT(i2);
1990	r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1991	r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1992
1993	/* The flags on one better be the inverse of the flags on the other */
1994	if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1995		(WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1996		printf("%s: rmatrix mismatch between %s (index %d) and %s "
1997		    "(index %d): w_rmatrix[%d][%d] == %hhx but "
1998		    "w_rmatrix[%d][%d] == %hhx\n",
1999		    fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
2000		    i2, i1, r2);
2001		kdb_backtrace();
2002		printf("Witness disabled.\n");
2003		witness_watch = -1;
2004	}
2005	return (r1 & rmask);
2006}
2007
2008/*
2009 * Checks if @child is a direct child of @parent.
2010 */
2011static int
2012isitmychild(struct witness *parent, struct witness *child)
2013{
2014
2015	return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
2016}
2017
2018/*
2019 * Checks if @descendant is a direct or inderect descendant of @ancestor.
2020 */
2021static int
2022isitmydescendant(struct witness *ancestor, struct witness *descendant)
2023{
2024
2025	return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
2026	    __func__));
2027}
2028
2029#ifdef BLESSING
2030static int
2031blessed(struct witness *w1, struct witness *w2)
2032{
2033	int i;
2034	struct witness_blessed *b;
2035
2036	for (i = 0; i < blessed_count; i++) {
2037		b = &blessed_list[i];
2038		if (strcmp(w1->w_name, b->b_lock1) == 0) {
2039			if (strcmp(w2->w_name, b->b_lock2) == 0)
2040				return (1);
2041			continue;
2042		}
2043		if (strcmp(w1->w_name, b->b_lock2) == 0)
2044			if (strcmp(w2->w_name, b->b_lock1) == 0)
2045				return (1);
2046	}
2047	return (0);
2048}
2049#endif
2050
2051static struct witness *
2052witness_get(void)
2053{
2054	struct witness *w;
2055	int index;
2056
2057	if (witness_cold == 0)
2058		mtx_assert(&w_mtx, MA_OWNED);
2059
2060	if (witness_watch == -1) {
2061		mtx_unlock_spin(&w_mtx);
2062		return (NULL);
2063	}
2064	if (STAILQ_EMPTY(&w_free)) {
2065		witness_watch = -1;
2066		mtx_unlock_spin(&w_mtx);
2067		printf("WITNESS: unable to allocate a new witness object\n");
2068		return (NULL);
2069	}
2070	w = STAILQ_FIRST(&w_free);
2071	STAILQ_REMOVE_HEAD(&w_free, w_list);
2072	w_free_cnt--;
2073	index = w->w_index;
2074	MPASS(index > 0 && index == w_max_used_index+1 &&
2075	    index < witness_count);
2076	bzero(w, sizeof(*w));
2077	w->w_index = index;
2078	if (index > w_max_used_index)
2079		w_max_used_index = index;
2080	return (w);
2081}
2082
2083static void
2084witness_free(struct witness *w)
2085{
2086
2087	STAILQ_INSERT_HEAD(&w_free, w, w_list);
2088	w_free_cnt++;
2089}
2090
2091static struct lock_list_entry *
2092witness_lock_list_get(void)
2093{
2094	struct lock_list_entry *lle;
2095
2096	if (witness_watch == -1)
2097		return (NULL);
2098	mtx_lock_spin(&w_mtx);
2099	lle = w_lock_list_free;
2100	if (lle == NULL) {
2101		witness_watch = -1;
2102		mtx_unlock_spin(&w_mtx);
2103		printf("%s: witness exhausted\n", __func__);
2104		return (NULL);
2105	}
2106	w_lock_list_free = lle->ll_next;
2107	mtx_unlock_spin(&w_mtx);
2108	bzero(lle, sizeof(*lle));
2109	return (lle);
2110}
2111
2112static void
2113witness_lock_list_free(struct lock_list_entry *lle)
2114{
2115
2116	mtx_lock_spin(&w_mtx);
2117	lle->ll_next = w_lock_list_free;
2118	w_lock_list_free = lle;
2119	mtx_unlock_spin(&w_mtx);
2120}
2121
2122static struct lock_instance *
2123find_instance(struct lock_list_entry *list, const struct lock_object *lock)
2124{
2125	struct lock_list_entry *lle;
2126	struct lock_instance *instance;
2127	int i;
2128
2129	for (lle = list; lle != NULL; lle = lle->ll_next)
2130		for (i = lle->ll_count - 1; i >= 0; i--) {
2131			instance = &lle->ll_children[i];
2132			if (instance->li_lock == lock)
2133				return (instance);
2134		}
2135	return (NULL);
2136}
2137
2138static void
2139witness_list_lock(struct lock_instance *instance,
2140    int (*prnt)(const char *fmt, ...))
2141{
2142	struct lock_object *lock;
2143
2144	lock = instance->li_lock;
2145	prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2146	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2147	if (lock->lo_witness->w_name != lock->lo_name)
2148		prnt(" (%s)", lock->lo_witness->w_name);
2149	prnt(" r = %d (%p) locked @ %s:%d\n",
2150	    instance->li_flags & LI_RECURSEMASK, lock,
2151	    fixup_filename(instance->li_file), instance->li_line);
2152}
2153
2154#ifdef DDB
2155static int
2156witness_thread_has_locks(struct thread *td)
2157{
2158
2159	if (td->td_sleeplocks == NULL)
2160		return (0);
2161	return (td->td_sleeplocks->ll_count != 0);
2162}
2163
2164static int
2165witness_proc_has_locks(struct proc *p)
2166{
2167	struct thread *td;
2168
2169	FOREACH_THREAD_IN_PROC(p, td) {
2170		if (witness_thread_has_locks(td))
2171			return (1);
2172	}
2173	return (0);
2174}
2175#endif
2176
2177int
2178witness_list_locks(struct lock_list_entry **lock_list,
2179    int (*prnt)(const char *fmt, ...))
2180{
2181	struct lock_list_entry *lle;
2182	int i, nheld;
2183
2184	nheld = 0;
2185	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2186		for (i = lle->ll_count - 1; i >= 0; i--) {
2187			witness_list_lock(&lle->ll_children[i], prnt);
2188			nheld++;
2189		}
2190	return (nheld);
2191}
2192
2193/*
2194 * This is a bit risky at best.  We call this function when we have timed
2195 * out acquiring a spin lock, and we assume that the other CPU is stuck
2196 * with this lock held.  So, we go groveling around in the other CPU's
2197 * per-cpu data to try to find the lock instance for this spin lock to
2198 * see when it was last acquired.
2199 */
2200void
2201witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2202    int (*prnt)(const char *fmt, ...))
2203{
2204	struct lock_instance *instance;
2205	struct pcpu *pc;
2206
2207	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2208		return;
2209	pc = pcpu_find(owner->td_oncpu);
2210	instance = find_instance(pc->pc_spinlocks, lock);
2211	if (instance != NULL)
2212		witness_list_lock(instance, prnt);
2213}
2214
2215void
2216witness_save(struct lock_object *lock, const char **filep, int *linep)
2217{
2218	struct lock_list_entry *lock_list;
2219	struct lock_instance *instance;
2220	struct lock_class *class;
2221
2222	/*
2223	 * This function is used independently in locking code to deal with
2224	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2225	 * is gone.
2226	 */
2227	if (SCHEDULER_STOPPED())
2228		return;
2229	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2230	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2231		return;
2232	class = LOCK_CLASS(lock);
2233	if (class->lc_flags & LC_SLEEPLOCK)
2234		lock_list = curthread->td_sleeplocks;
2235	else {
2236		if (witness_skipspin)
2237			return;
2238		lock_list = PCPU_GET(spinlocks);
2239	}
2240	instance = find_instance(lock_list, lock);
2241	if (instance == NULL) {
2242		kassert_panic("%s: lock (%s) %s not locked", __func__,
2243		    class->lc_name, lock->lo_name);
2244		return;
2245	}
2246	*filep = instance->li_file;
2247	*linep = instance->li_line;
2248}
2249
2250void
2251witness_restore(struct lock_object *lock, const char *file, int line)
2252{
2253	struct lock_list_entry *lock_list;
2254	struct lock_instance *instance;
2255	struct lock_class *class;
2256
2257	/*
2258	 * This function is used independently in locking code to deal with
2259	 * Giant, SCHEDULER_STOPPED() check can be removed here after Giant
2260	 * is gone.
2261	 */
2262	if (SCHEDULER_STOPPED())
2263		return;
2264	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2265	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2266		return;
2267	class = LOCK_CLASS(lock);
2268	if (class->lc_flags & LC_SLEEPLOCK)
2269		lock_list = curthread->td_sleeplocks;
2270	else {
2271		if (witness_skipspin)
2272			return;
2273		lock_list = PCPU_GET(spinlocks);
2274	}
2275	instance = find_instance(lock_list, lock);
2276	if (instance == NULL)
2277		kassert_panic("%s: lock (%s) %s not locked", __func__,
2278		    class->lc_name, lock->lo_name);
2279	lock->lo_witness->w_file = file;
2280	lock->lo_witness->w_line = line;
2281	if (instance == NULL)
2282		return;
2283	instance->li_file = file;
2284	instance->li_line = line;
2285}
2286
2287void
2288witness_assert(const struct lock_object *lock, int flags, const char *file,
2289    int line)
2290{
2291#ifdef INVARIANT_SUPPORT
2292	struct lock_instance *instance;
2293	struct lock_class *class;
2294
2295	if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2296		return;
2297	class = LOCK_CLASS(lock);
2298	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2299		instance = find_instance(curthread->td_sleeplocks, lock);
2300	else if ((class->lc_flags & LC_SPINLOCK) != 0)
2301		instance = find_instance(PCPU_GET(spinlocks), lock);
2302	else {
2303		kassert_panic("Lock (%s) %s is not sleep or spin!",
2304		    class->lc_name, lock->lo_name);
2305		return;
2306	}
2307	switch (flags) {
2308	case LA_UNLOCKED:
2309		if (instance != NULL)
2310			kassert_panic("Lock (%s) %s locked @ %s:%d.",
2311			    class->lc_name, lock->lo_name,
2312			    fixup_filename(file), line);
2313		break;
2314	case LA_LOCKED:
2315	case LA_LOCKED | LA_RECURSED:
2316	case LA_LOCKED | LA_NOTRECURSED:
2317	case LA_SLOCKED:
2318	case LA_SLOCKED | LA_RECURSED:
2319	case LA_SLOCKED | LA_NOTRECURSED:
2320	case LA_XLOCKED:
2321	case LA_XLOCKED | LA_RECURSED:
2322	case LA_XLOCKED | LA_NOTRECURSED:
2323		if (instance == NULL) {
2324			kassert_panic("Lock (%s) %s not locked @ %s:%d.",
2325			    class->lc_name, lock->lo_name,
2326			    fixup_filename(file), line);
2327			break;
2328		}
2329		if ((flags & LA_XLOCKED) != 0 &&
2330		    (instance->li_flags & LI_EXCLUSIVE) == 0)
2331			kassert_panic(
2332			    "Lock (%s) %s not exclusively locked @ %s:%d.",
2333			    class->lc_name, lock->lo_name,
2334			    fixup_filename(file), line);
2335		if ((flags & LA_SLOCKED) != 0 &&
2336		    (instance->li_flags & LI_EXCLUSIVE) != 0)
2337			kassert_panic(
2338			    "Lock (%s) %s exclusively locked @ %s:%d.",
2339			    class->lc_name, lock->lo_name,
2340			    fixup_filename(file), line);
2341		if ((flags & LA_RECURSED) != 0 &&
2342		    (instance->li_flags & LI_RECURSEMASK) == 0)
2343			kassert_panic("Lock (%s) %s not recursed @ %s:%d.",
2344			    class->lc_name, lock->lo_name,
2345			    fixup_filename(file), line);
2346		if ((flags & LA_NOTRECURSED) != 0 &&
2347		    (instance->li_flags & LI_RECURSEMASK) != 0)
2348			kassert_panic("Lock (%s) %s recursed @ %s:%d.",
2349			    class->lc_name, lock->lo_name,
2350			    fixup_filename(file), line);
2351		break;
2352	default:
2353		kassert_panic("Invalid lock assertion at %s:%d.",
2354		    fixup_filename(file), line);
2355
2356	}
2357#endif	/* INVARIANT_SUPPORT */
2358}
2359
2360static void
2361witness_setflag(struct lock_object *lock, int flag, int set)
2362{
2363	struct lock_list_entry *lock_list;
2364	struct lock_instance *instance;
2365	struct lock_class *class;
2366
2367	if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2368		return;
2369	class = LOCK_CLASS(lock);
2370	if (class->lc_flags & LC_SLEEPLOCK)
2371		lock_list = curthread->td_sleeplocks;
2372	else {
2373		if (witness_skipspin)
2374			return;
2375		lock_list = PCPU_GET(spinlocks);
2376	}
2377	instance = find_instance(lock_list, lock);
2378	if (instance == NULL) {
2379		kassert_panic("%s: lock (%s) %s not locked", __func__,
2380		    class->lc_name, lock->lo_name);
2381		return;
2382	}
2383
2384	if (set)
2385		instance->li_flags |= flag;
2386	else
2387		instance->li_flags &= ~flag;
2388}
2389
2390void
2391witness_norelease(struct lock_object *lock)
2392{
2393
2394	witness_setflag(lock, LI_NORELEASE, 1);
2395}
2396
2397void
2398witness_releaseok(struct lock_object *lock)
2399{
2400
2401	witness_setflag(lock, LI_NORELEASE, 0);
2402}
2403
2404#ifdef DDB
2405static void
2406witness_ddb_list(struct thread *td)
2407{
2408
2409	KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2410	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2411
2412	if (witness_watch < 1)
2413		return;
2414
2415	witness_list_locks(&td->td_sleeplocks, db_printf);
2416
2417	/*
2418	 * We only handle spinlocks if td == curthread.  This is somewhat broken
2419	 * if td is currently executing on some other CPU and holds spin locks
2420	 * as we won't display those locks.  If we had a MI way of getting
2421	 * the per-cpu data for a given cpu then we could use
2422	 * td->td_oncpu to get the list of spinlocks for this thread
2423	 * and "fix" this.
2424	 *
2425	 * That still wouldn't really fix this unless we locked the scheduler
2426	 * lock or stopped the other CPU to make sure it wasn't changing the
2427	 * list out from under us.  It is probably best to just not try to
2428	 * handle threads on other CPU's for now.
2429	 */
2430	if (td == curthread && PCPU_GET(spinlocks) != NULL)
2431		witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2432}
2433
2434DB_SHOW_COMMAND(locks, db_witness_list)
2435{
2436	struct thread *td;
2437
2438	if (have_addr)
2439		td = db_lookup_thread(addr, TRUE);
2440	else
2441		td = kdb_thread;
2442	witness_ddb_list(td);
2443}
2444
2445DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2446{
2447	struct thread *td;
2448	struct proc *p;
2449
2450	/*
2451	 * It would be nice to list only threads and processes that actually
2452	 * held sleep locks, but that information is currently not exported
2453	 * by WITNESS.
2454	 */
2455	FOREACH_PROC_IN_SYSTEM(p) {
2456		if (!witness_proc_has_locks(p))
2457			continue;
2458		FOREACH_THREAD_IN_PROC(p, td) {
2459			if (!witness_thread_has_locks(td))
2460				continue;
2461			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2462			    p->p_comm, td, td->td_tid);
2463			witness_ddb_list(td);
2464			if (db_pager_quit)
2465				return;
2466		}
2467	}
2468}
2469DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2470
2471DB_SHOW_COMMAND(witness, db_witness_display)
2472{
2473
2474	witness_ddb_display(db_printf);
2475}
2476#endif
2477
2478static int
2479sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2480{
2481	struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2482	struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2483	struct sbuf *sb;
2484	u_int w_rmatrix1, w_rmatrix2;
2485	int error, generation, i, j;
2486
2487	tmp_data1 = NULL;
2488	tmp_data2 = NULL;
2489	tmp_w1 = NULL;
2490	tmp_w2 = NULL;
2491	if (witness_watch < 1) {
2492		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2493		return (error);
2494	}
2495	if (witness_cold) {
2496		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2497		return (error);
2498	}
2499	error = 0;
2500	sb = sbuf_new(NULL, NULL, badstack_sbuf_size, SBUF_AUTOEXTEND);
2501	if (sb == NULL)
2502		return (ENOMEM);
2503
2504	/* Allocate and init temporary storage space. */
2505	tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2506	tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2507	tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2508	    M_WAITOK | M_ZERO);
2509	tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2510	    M_WAITOK | M_ZERO);
2511	stack_zero(&tmp_data1->wlod_stack);
2512	stack_zero(&tmp_data2->wlod_stack);
2513
2514restart:
2515	mtx_lock_spin(&w_mtx);
2516	generation = w_generation;
2517	mtx_unlock_spin(&w_mtx);
2518	sbuf_printf(sb, "Number of known direct relationships is %d\n",
2519	    w_lohash.wloh_count);
2520	for (i = 1; i < w_max_used_index; i++) {
2521		mtx_lock_spin(&w_mtx);
2522		if (generation != w_generation) {
2523			mtx_unlock_spin(&w_mtx);
2524
2525			/* The graph has changed, try again. */
2526			req->oldidx = 0;
2527			sbuf_clear(sb);
2528			goto restart;
2529		}
2530
2531		w1 = &w_data[i];
2532		if (w1->w_reversed == 0) {
2533			mtx_unlock_spin(&w_mtx);
2534			continue;
2535		}
2536
2537		/* Copy w1 locally so we can release the spin lock. */
2538		*tmp_w1 = *w1;
2539		mtx_unlock_spin(&w_mtx);
2540
2541		if (tmp_w1->w_reversed == 0)
2542			continue;
2543		for (j = 1; j < w_max_used_index; j++) {
2544			if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2545				continue;
2546
2547			mtx_lock_spin(&w_mtx);
2548			if (generation != w_generation) {
2549				mtx_unlock_spin(&w_mtx);
2550
2551				/* The graph has changed, try again. */
2552				req->oldidx = 0;
2553				sbuf_clear(sb);
2554				goto restart;
2555			}
2556
2557			w2 = &w_data[j];
2558			data1 = witness_lock_order_get(w1, w2);
2559			data2 = witness_lock_order_get(w2, w1);
2560
2561			/*
2562			 * Copy information locally so we can release the
2563			 * spin lock.
2564			 */
2565			*tmp_w2 = *w2;
2566			w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2567			w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2568
2569			if (data1) {
2570				stack_zero(&tmp_data1->wlod_stack);
2571				stack_copy(&data1->wlod_stack,
2572				    &tmp_data1->wlod_stack);
2573			}
2574			if (data2 && data2 != data1) {
2575				stack_zero(&tmp_data2->wlod_stack);
2576				stack_copy(&data2->wlod_stack,
2577				    &tmp_data2->wlod_stack);
2578			}
2579			mtx_unlock_spin(&w_mtx);
2580
2581			sbuf_printf(sb,
2582	    "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2583			    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2584			    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2585#if 0
2586 			sbuf_printf(sb,
2587			"w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2588 			    tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2589 			    tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2590#endif
2591			if (data1) {
2592				sbuf_printf(sb,
2593			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2594				    tmp_w1->w_name, tmp_w1->w_class->lc_name,
2595				    tmp_w2->w_name, tmp_w2->w_class->lc_name);
2596				stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2597				sbuf_printf(sb, "\n");
2598			}
2599			if (data2 && data2 != data1) {
2600				sbuf_printf(sb,
2601			"Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2602				    tmp_w2->w_name, tmp_w2->w_class->lc_name,
2603				    tmp_w1->w_name, tmp_w1->w_class->lc_name);
2604				stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2605				sbuf_printf(sb, "\n");
2606			}
2607		}
2608	}
2609	mtx_lock_spin(&w_mtx);
2610	if (generation != w_generation) {
2611		mtx_unlock_spin(&w_mtx);
2612
2613		/*
2614		 * The graph changed while we were printing stack data,
2615		 * try again.
2616		 */
2617		req->oldidx = 0;
2618		sbuf_clear(sb);
2619		goto restart;
2620	}
2621	mtx_unlock_spin(&w_mtx);
2622
2623	/* Free temporary storage space. */
2624	free(tmp_data1, M_TEMP);
2625	free(tmp_data2, M_TEMP);
2626	free(tmp_w1, M_TEMP);
2627	free(tmp_w2, M_TEMP);
2628
2629	sbuf_finish(sb);
2630	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2631	sbuf_delete(sb);
2632
2633	return (error);
2634}
2635
2636static int
2637sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2638{
2639	struct witness *w;
2640	struct sbuf *sb;
2641	int error;
2642
2643	if (witness_watch < 1) {
2644		error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2645		return (error);
2646	}
2647	if (witness_cold) {
2648		error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2649		return (error);
2650	}
2651	error = 0;
2652
2653	error = sysctl_wire_old_buffer(req, 0);
2654	if (error != 0)
2655		return (error);
2656	sb = sbuf_new_for_sysctl(NULL, NULL, FULLGRAPH_SBUF_SIZE, req);
2657	if (sb == NULL)
2658		return (ENOMEM);
2659	sbuf_printf(sb, "\n");
2660
2661	mtx_lock_spin(&w_mtx);
2662	STAILQ_FOREACH(w, &w_all, w_list)
2663		w->w_displayed = 0;
2664	STAILQ_FOREACH(w, &w_all, w_list)
2665		witness_add_fullgraph(sb, w);
2666	mtx_unlock_spin(&w_mtx);
2667
2668	/*
2669	 * Close the sbuf and return to userland.
2670	 */
2671	error = sbuf_finish(sb);
2672	sbuf_delete(sb);
2673
2674	return (error);
2675}
2676
2677static int
2678sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2679{
2680	int error, value;
2681
2682	value = witness_watch;
2683	error = sysctl_handle_int(oidp, &value, 0, req);
2684	if (error != 0 || req->newptr == NULL)
2685		return (error);
2686	if (value > 1 || value < -1 ||
2687	    (witness_watch == -1 && value != witness_watch))
2688		return (EINVAL);
2689	witness_watch = value;
2690	return (0);
2691}
2692
2693static void
2694witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2695{
2696	int i;
2697
2698	if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2699		return;
2700	w->w_displayed = 1;
2701
2702	WITNESS_INDEX_ASSERT(w->w_index);
2703	for (i = 1; i <= w_max_used_index; i++) {
2704		if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2705			sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2706			    w_data[i].w_name);
2707			witness_add_fullgraph(sb, &w_data[i]);
2708		}
2709	}
2710}
2711
2712/*
2713 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2714 * interprets the key as a string and reads until the null
2715 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2716 * hash value computed from the key.
2717 */
2718static uint32_t
2719witness_hash_djb2(const uint8_t *key, uint32_t size)
2720{
2721	unsigned int hash = 5381;
2722	int i;
2723
2724	/* hash = hash * 33 + key[i] */
2725	if (size)
2726		for (i = 0; i < size; i++)
2727			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2728	else
2729		for (i = 0; key[i] != 0; i++)
2730			hash = ((hash << 5) + hash) + (unsigned int)key[i];
2731
2732	return (hash);
2733}
2734
2735
2736/*
2737 * Initializes the two witness hash tables. Called exactly once from
2738 * witness_initialize().
2739 */
2740static void
2741witness_init_hash_tables(void)
2742{
2743	int i;
2744
2745	MPASS(witness_cold);
2746
2747	/* Initialize the hash tables. */
2748	for (i = 0; i < WITNESS_HASH_SIZE; i++)
2749		w_hash.wh_array[i] = NULL;
2750
2751	w_hash.wh_size = WITNESS_HASH_SIZE;
2752	w_hash.wh_count = 0;
2753
2754	/* Initialize the lock order data hash. */
2755	w_lofree = NULL;
2756	for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2757		memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2758		w_lodata[i].wlod_next = w_lofree;
2759		w_lofree = &w_lodata[i];
2760	}
2761	w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2762	w_lohash.wloh_count = 0;
2763	for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2764		w_lohash.wloh_array[i] = NULL;
2765}
2766
2767static struct witness *
2768witness_hash_get(const char *key)
2769{
2770	struct witness *w;
2771	uint32_t hash;
2772
2773	MPASS(key != NULL);
2774	if (witness_cold == 0)
2775		mtx_assert(&w_mtx, MA_OWNED);
2776	hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2777	w = w_hash.wh_array[hash];
2778	while (w != NULL) {
2779		if (strcmp(w->w_name, key) == 0)
2780			goto out;
2781		w = w->w_hash_next;
2782	}
2783
2784out:
2785	return (w);
2786}
2787
2788static void
2789witness_hash_put(struct witness *w)
2790{
2791	uint32_t hash;
2792
2793	MPASS(w != NULL);
2794	MPASS(w->w_name != NULL);
2795	if (witness_cold == 0)
2796		mtx_assert(&w_mtx, MA_OWNED);
2797	KASSERT(witness_hash_get(w->w_name) == NULL,
2798	    ("%s: trying to add a hash entry that already exists!", __func__));
2799	KASSERT(w->w_hash_next == NULL,
2800	    ("%s: w->w_hash_next != NULL", __func__));
2801
2802	hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2803	w->w_hash_next = w_hash.wh_array[hash];
2804	w_hash.wh_array[hash] = w;
2805	w_hash.wh_count++;
2806}
2807
2808
2809static struct witness_lock_order_data *
2810witness_lock_order_get(struct witness *parent, struct witness *child)
2811{
2812	struct witness_lock_order_data *data = NULL;
2813	struct witness_lock_order_key key;
2814	unsigned int hash;
2815
2816	MPASS(parent != NULL && child != NULL);
2817	key.from = parent->w_index;
2818	key.to = child->w_index;
2819	WITNESS_INDEX_ASSERT(key.from);
2820	WITNESS_INDEX_ASSERT(key.to);
2821	if ((w_rmatrix[parent->w_index][child->w_index]
2822	    & WITNESS_LOCK_ORDER_KNOWN) == 0)
2823		goto out;
2824
2825	hash = witness_hash_djb2((const char*)&key,
2826	    sizeof(key)) % w_lohash.wloh_size;
2827	data = w_lohash.wloh_array[hash];
2828	while (data != NULL) {
2829		if (witness_lock_order_key_equal(&data->wlod_key, &key))
2830			break;
2831		data = data->wlod_next;
2832	}
2833
2834out:
2835	return (data);
2836}
2837
2838/*
2839 * Verify that parent and child have a known relationship, are not the same,
2840 * and child is actually a child of parent.  This is done without w_mtx
2841 * to avoid contention in the common case.
2842 */
2843static int
2844witness_lock_order_check(struct witness *parent, struct witness *child)
2845{
2846
2847	if (parent != child &&
2848	    w_rmatrix[parent->w_index][child->w_index]
2849	    & WITNESS_LOCK_ORDER_KNOWN &&
2850	    isitmychild(parent, child))
2851		return (1);
2852
2853	return (0);
2854}
2855
2856static int
2857witness_lock_order_add(struct witness *parent, struct witness *child)
2858{
2859	struct witness_lock_order_data *data = NULL;
2860	struct witness_lock_order_key key;
2861	unsigned int hash;
2862
2863	MPASS(parent != NULL && child != NULL);
2864	key.from = parent->w_index;
2865	key.to = child->w_index;
2866	WITNESS_INDEX_ASSERT(key.from);
2867	WITNESS_INDEX_ASSERT(key.to);
2868	if (w_rmatrix[parent->w_index][child->w_index]
2869	    & WITNESS_LOCK_ORDER_KNOWN)
2870		return (1);
2871
2872	hash = witness_hash_djb2((const char*)&key,
2873	    sizeof(key)) % w_lohash.wloh_size;
2874	w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2875	data = w_lofree;
2876	if (data == NULL)
2877		return (0);
2878	w_lofree = data->wlod_next;
2879	data->wlod_next = w_lohash.wloh_array[hash];
2880	data->wlod_key = key;
2881	w_lohash.wloh_array[hash] = data;
2882	w_lohash.wloh_count++;
2883	stack_zero(&data->wlod_stack);
2884	stack_save(&data->wlod_stack);
2885	return (1);
2886}
2887
2888/* Call this whenver the structure of the witness graph changes. */
2889static void
2890witness_increment_graph_generation(void)
2891{
2892
2893	if (witness_cold == 0)
2894		mtx_assert(&w_mtx, MA_OWNED);
2895	w_generation++;
2896}
2897
2898#ifdef KDB
2899static void
2900_witness_debugger(int cond, const char *msg)
2901{
2902
2903	if (witness_trace && cond)
2904		kdb_backtrace();
2905	if (witness_kdb && cond)
2906		kdb_enter(KDB_WHY_WITNESS, msg);
2907}
2908#endif
2909