subr_witness.c revision 177299
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Implementation of the `witness' lock verifier.  Originally implemented for
34 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38/*
39 *	Main Entry: witness
40 *	Pronunciation: 'wit-n&s
41 *	Function: noun
42 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43 *	    testimony, witness, from 2wit
44 *	Date: before 12th century
45 *	1 : attestation of a fact or event : TESTIMONY
46 *	2 : one that gives evidence; specifically : one who testifies in
47 *	    a cause or before a judicial tribunal
48 *	3 : one asked to be present at a transaction so as to be able to
49 *	    testify to its having taken place
50 *	4 : one who has personal knowledge of something
51 *	5 a : something serving as evidence or proof : SIGN
52 *	  b : public affirmation by word or example of usually
53 *	      religious faith or conviction <the heroic witness to divine
54 *	      life -- Pilot>
55 *	6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58/*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes.  Stated another way,
62 *    no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl().  Basically, when a thread sleeps, it must release
68 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious.  Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock.  The second
76 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute.  Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 177299 2008-03-17 11:48:40Z pjd $");
86
87#include "opt_ddb.h"
88#include "opt_hwpmc_hooks.h"
89#include "opt_witness.h"
90
91#include <sys/param.h>
92#include <sys/bus.h>
93#include <sys/kdb.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/malloc.h>
98#include <sys/mutex.h>
99#include <sys/priv.h>
100#include <sys/proc.h>
101#include <sys/sysctl.h>
102#include <sys/systm.h>
103
104#include <ddb/ddb.h>
105
106#include <machine/stdarg.h>
107
108/* Note that these traces do not work with KTR_ALQ. */
109#if 0
110#define	KTR_WITNESS	KTR_SUBSYS
111#else
112#define	KTR_WITNESS	0
113#endif
114
115/* Easier to stay with the old names. */
116#define	lo_list		lo_witness_data.lod_list
117#define	lo_witness	lo_witness_data.lod_witness
118
119/* Define this to check for blessed mutexes */
120#undef BLESSING
121
122#define WITNESS_COUNT 1024
123#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
124/*
125 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
126 * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
127 * probably be safe for the most part, but it's still a SWAG.
128 */
129#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
130
131#define	WITNESS_NCHILDREN 6
132
133struct witness_child_list_entry;
134
135struct witness {
136	const	char *w_name;
137	struct	lock_class *w_class;
138	STAILQ_ENTRY(witness) w_list;		/* List of all witnesses. */
139	STAILQ_ENTRY(witness) w_typelist;	/* Witnesses of a type. */
140	struct	witness_child_list_entry *w_children;	/* Great evilness... */
141	const	char *w_file;
142	int	w_line;
143	u_int	w_level;
144	u_int	w_refcount;
145	u_char	w_Giant_squawked:1;
146	u_char	w_other_squawked:1;
147	u_char	w_same_squawked:1;
148	u_char	w_displayed:1;
149};
150
151struct witness_child_list_entry {
152	struct	witness_child_list_entry *wcl_next;
153	struct	witness *wcl_children[WITNESS_NCHILDREN];
154	u_int	wcl_count;
155};
156
157STAILQ_HEAD(witness_list, witness);
158
159#ifdef BLESSING
160struct witness_blessed {
161	const	char *b_lock1;
162	const	char *b_lock2;
163};
164#endif
165
166struct witness_order_list_entry {
167	const	char *w_name;
168	struct	lock_class *w_class;
169};
170
171#ifdef BLESSING
172static int	blessed(struct witness *, struct witness *);
173#endif
174static int	depart(struct witness *w);
175static struct	witness *enroll(const char *description,
176				struct lock_class *lock_class);
177static int	insertchild(struct witness *parent, struct witness *child);
178static int	isitmychild(struct witness *parent, struct witness *child);
179static int	isitmydescendant(struct witness *parent, struct witness *child);
180static int	itismychild(struct witness *parent, struct witness *child);
181static void	removechild(struct witness *parent, struct witness *child);
182static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
183static const char *fixup_filename(const char *file);
184static struct	witness *witness_get(void);
185static void	witness_free(struct witness *m);
186static struct	witness_child_list_entry *witness_child_get(void);
187static void	witness_child_free(struct witness_child_list_entry *wcl);
188static struct	lock_list_entry *witness_lock_list_get(void);
189static void	witness_lock_list_free(struct lock_list_entry *lle);
190static struct	lock_instance *find_instance(struct lock_list_entry *lock_list,
191					     struct lock_object *lock);
192static void	witness_list_lock(struct lock_instance *instance);
193#ifdef DDB
194static void	witness_leveldescendents(struct witness *parent, int level);
195static void	witness_levelall(void);
196static void	witness_displaydescendants(void(*)(const char *fmt, ...),
197					   struct witness *, int indent);
198static void	witness_display_list(void(*prnt)(const char *fmt, ...),
199				     struct witness_list *list);
200static void	witness_display(void(*)(const char *fmt, ...));
201static void	witness_list(struct thread *td);
202#endif
203
204SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
205
206/*
207 * If set to 0, witness is disabled.  If set to a non-zero value, witness
208 * performs full lock order checking for all locks.  At runtime, this
209 * value may be set to 0 to turn off witness.  witness is not allowed be
210 * turned on once it is turned off, however.
211 */
212static int witness_watch = 1;
213TUNABLE_INT("debug.witness.watch", &witness_watch);
214SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
215    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
216
217#ifdef KDB
218/*
219 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
220 * to drop into kdebug() when:
221 *	- a lock hierarchy violation occurs
222 *	- locks are held when going to sleep.
223 */
224#ifdef WITNESS_KDB
225int	witness_kdb = 1;
226#else
227int	witness_kdb = 0;
228#endif
229TUNABLE_INT("debug.witness.kdb", &witness_kdb);
230SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
231
232/*
233 * When KDB is enabled and witness_trace is set to 1, it will cause the system
234 * to print a stack trace:
235 *	- a lock hierarchy violation occurs
236 *	- locks are held when going to sleep.
237 */
238int	witness_trace = 1;
239TUNABLE_INT("debug.witness.trace", &witness_trace);
240SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
241#endif /* KDB */
242
243#ifdef WITNESS_SKIPSPIN
244int	witness_skipspin = 1;
245#else
246int	witness_skipspin = 0;
247#endif
248TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
249SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
250    &witness_skipspin, 0, "");
251
252static struct mtx w_mtx;
253static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
254static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
255static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
256static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
257static struct witness_child_list_entry *w_child_free = NULL;
258static struct lock_list_entry *w_lock_list_free = NULL;
259
260static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
261SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
262SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
263SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
264    "");
265SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
266    &w_child_free_cnt, 0, "");
267SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
268    "");
269
270static struct witness w_data[WITNESS_COUNT];
271static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
272static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
273
274static struct witness_order_list_entry order_lists[] = {
275	/*
276	 * sx locks
277	 */
278	{ "proctree", &lock_class_sx },
279	{ "allproc", &lock_class_sx },
280	{ "allprison", &lock_class_sx },
281	{ NULL, NULL },
282	/*
283	 * Various mutexes
284	 */
285	{ "Giant", &lock_class_mtx_sleep },
286	{ "pipe mutex", &lock_class_mtx_sleep },
287	{ "sigio lock", &lock_class_mtx_sleep },
288	{ "process group", &lock_class_mtx_sleep },
289	{ "process lock", &lock_class_mtx_sleep },
290	{ "session", &lock_class_mtx_sleep },
291	{ "uidinfo hash", &lock_class_rw },
292#ifdef	HWPMC_HOOKS
293	{ "pmc-sleep", &lock_class_mtx_sleep },
294#endif
295	{ NULL, NULL },
296	/*
297	 * Sockets
298	 */
299	{ "accept", &lock_class_mtx_sleep },
300	{ "so_snd", &lock_class_mtx_sleep },
301	{ "so_rcv", &lock_class_mtx_sleep },
302	{ "sellck", &lock_class_mtx_sleep },
303	{ NULL, NULL },
304	/*
305	 * Routing
306	 */
307	{ "so_rcv", &lock_class_mtx_sleep },
308	{ "radix node head", &lock_class_mtx_sleep },
309	{ "rtentry", &lock_class_mtx_sleep },
310	{ "ifaddr", &lock_class_mtx_sleep },
311	{ NULL, NULL },
312	/*
313	 * Multicast - protocol locks before interface locks, after UDP locks.
314	 */
315	{ "udpinp", &lock_class_mtx_sleep },
316	{ "in_multi_mtx", &lock_class_mtx_sleep },
317	{ "igmp_mtx", &lock_class_mtx_sleep },
318	{ "if_addr_mtx", &lock_class_mtx_sleep },
319	{ NULL, NULL },
320	/*
321	 * UNIX Domain Sockets
322	 */
323	{ "unp", &lock_class_mtx_sleep },
324	{ "so_snd", &lock_class_mtx_sleep },
325	{ NULL, NULL },
326	/*
327	 * UDP/IP
328	 */
329	{ "udp", &lock_class_mtx_sleep },
330	{ "udpinp", &lock_class_mtx_sleep },
331	{ "so_snd", &lock_class_mtx_sleep },
332	{ NULL, NULL },
333	/*
334	 * TCP/IP
335	 */
336	{ "tcp", &lock_class_mtx_sleep },
337	{ "tcpinp", &lock_class_mtx_sleep },
338	{ "so_snd", &lock_class_mtx_sleep },
339	{ NULL, NULL },
340	/*
341	 * SLIP
342	 */
343	{ "slip_mtx", &lock_class_mtx_sleep },
344	{ "slip sc_mtx", &lock_class_mtx_sleep },
345	{ NULL, NULL },
346	/*
347	 * netatalk
348	 */
349	{ "ddp_list_mtx", &lock_class_mtx_sleep },
350	{ "ddp_mtx", &lock_class_mtx_sleep },
351	{ NULL, NULL },
352	/*
353	 * BPF
354	 */
355	{ "bpf global lock", &lock_class_mtx_sleep },
356	{ "bpf interface lock", &lock_class_mtx_sleep },
357	{ "bpf cdev lock", &lock_class_mtx_sleep },
358	{ NULL, NULL },
359	/*
360	 * NFS server
361	 */
362	{ "nfsd_mtx", &lock_class_mtx_sleep },
363	{ "so_snd", &lock_class_mtx_sleep },
364	{ NULL, NULL },
365
366	/*
367	 * IEEE 802.11
368	 */
369	{ "802.11 com lock", &lock_class_mtx_sleep},
370	{ NULL, NULL },
371	/*
372	 * Network drivers
373	 */
374	{ "network driver", &lock_class_mtx_sleep},
375	{ NULL, NULL },
376
377	/*
378	 * Netgraph
379	 */
380	{ "ng_node", &lock_class_mtx_sleep },
381	{ "ng_worklist", &lock_class_mtx_sleep },
382	{ NULL, NULL },
383	/*
384	 * CDEV
385	 */
386	{ "system map", &lock_class_mtx_sleep },
387	{ "vm page queue mutex", &lock_class_mtx_sleep },
388	{ "vnode interlock", &lock_class_mtx_sleep },
389	{ "cdev", &lock_class_mtx_sleep },
390	{ NULL, NULL },
391	/*
392	 * kqueue/VFS interaction
393	 */
394	{ "kqueue", &lock_class_mtx_sleep },
395	{ "struct mount mtx", &lock_class_mtx_sleep },
396	{ "vnode interlock", &lock_class_mtx_sleep },
397	{ NULL, NULL },
398	/*
399	 * spin locks
400	 */
401#ifdef SMP
402	{ "ap boot", &lock_class_mtx_spin },
403#endif
404	{ "rm.mutex_mtx", &lock_class_mtx_spin },
405	{ "sio", &lock_class_mtx_spin },
406	{ "scrlock", &lock_class_mtx_spin },
407#ifdef __i386__
408	{ "cy", &lock_class_mtx_spin },
409#endif
410#ifdef __sparc64__
411	{ "pcib_mtx", &lock_class_mtx_spin },
412	{ "rtc_mtx", &lock_class_mtx_spin },
413#endif
414	{ "scc_hwmtx", &lock_class_mtx_spin },
415	{ "uart_hwmtx", &lock_class_mtx_spin },
416	{ "fast_taskqueue", &lock_class_mtx_spin },
417	{ "intr table", &lock_class_mtx_spin },
418#ifdef	HWPMC_HOOKS
419	{ "pmc-per-proc", &lock_class_mtx_spin },
420#endif
421	{ "process slock", &lock_class_mtx_spin },
422	{ "sleepq chain", &lock_class_mtx_spin },
423	{ "umtx lock", &lock_class_mtx_spin },
424	{ "rm_spinlock", &lock_class_mtx_spin },
425	{ "turnstile chain", &lock_class_mtx_spin },
426	{ "turnstile lock", &lock_class_mtx_spin },
427	{ "sched lock", &lock_class_mtx_spin },
428	{ "td_contested", &lock_class_mtx_spin },
429	{ "callout", &lock_class_mtx_spin },
430	{ "entropy harvest mutex", &lock_class_mtx_spin },
431	{ "syscons video lock", &lock_class_mtx_spin },
432	{ "time lock", &lock_class_mtx_spin },
433#ifdef SMP
434	{ "smp rendezvous", &lock_class_mtx_spin },
435#endif
436#ifdef __powerpc__
437	{ "tlb0", &lock_class_mtx_spin },
438#endif
439	/*
440	 * leaf locks
441	 */
442	{ "icu", &lock_class_mtx_spin },
443#if defined(SMP) && defined(__sparc64__)
444	{ "ipi", &lock_class_mtx_spin },
445#endif
446#ifdef __i386__
447	{ "allpmaps", &lock_class_mtx_spin },
448	{ "descriptor tables", &lock_class_mtx_spin },
449#endif
450	{ "clk", &lock_class_mtx_spin },
451	{ "mprof lock", &lock_class_mtx_spin },
452	{ "zombie lock", &lock_class_mtx_spin },
453	{ "ALD Queue", &lock_class_mtx_spin },
454#ifdef __ia64__
455	{ "MCA spin lock", &lock_class_mtx_spin },
456#endif
457#if defined(__i386__) || defined(__amd64__)
458	{ "pcicfg", &lock_class_mtx_spin },
459	{ "NDIS thread lock", &lock_class_mtx_spin },
460#endif
461	{ "tw_osl_io_lock", &lock_class_mtx_spin },
462	{ "tw_osl_q_lock", &lock_class_mtx_spin },
463	{ "tw_cl_io_lock", &lock_class_mtx_spin },
464	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
465	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
466#ifdef	HWPMC_HOOKS
467	{ "pmc-leaf", &lock_class_mtx_spin },
468#endif
469	{ "blocked lock", &lock_class_mtx_spin },
470	{ NULL, NULL },
471	{ NULL, NULL }
472};
473
474#ifdef BLESSING
475/*
476 * Pairs of locks which have been blessed
477 * Don't complain about order problems with blessed locks
478 */
479static struct witness_blessed blessed_list[] = {
480};
481static int blessed_count =
482	sizeof(blessed_list) / sizeof(struct witness_blessed);
483#endif
484
485/*
486 * List of locks initialized prior to witness being initialized whose
487 * enrollment is currently deferred.
488 */
489STAILQ_HEAD(, lock_object) pending_locks =
490    STAILQ_HEAD_INITIALIZER(pending_locks);
491
492/*
493 * This global is set to 0 once it becomes safe to use the witness code.
494 */
495static int witness_cold = 1;
496
497/*
498 * This global is set to 1 once the static lock orders have been enrolled
499 * so that a warning can be issued for any spin locks enrolled later.
500 */
501static int witness_spin_warn = 0;
502
503/*
504 * The WITNESS-enabled diagnostic code.  Note that the witness code does
505 * assume that the early boot is single-threaded at least until after this
506 * routine is completed.
507 */
508static void
509witness_initialize(void *dummy __unused)
510{
511	struct lock_object *lock;
512	struct witness_order_list_entry *order;
513	struct witness *w, *w1;
514	int i;
515
516	/*
517	 * We have to release Giant before initializing its witness
518	 * structure so that WITNESS doesn't get confused.
519	 */
520	mtx_unlock(&Giant);
521	mtx_assert(&Giant, MA_NOTOWNED);
522
523	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
524	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
525	    MTX_NOWITNESS | MTX_NOPROFILE);
526	for (i = 0; i < WITNESS_COUNT; i++)
527		witness_free(&w_data[i]);
528	for (i = 0; i < WITNESS_CHILDCOUNT; i++)
529		witness_child_free(&w_childdata[i]);
530	for (i = 0; i < LOCK_CHILDCOUNT; i++)
531		witness_lock_list_free(&w_locklistdata[i]);
532
533	/* First add in all the specified order lists. */
534	for (order = order_lists; order->w_name != NULL; order++) {
535		w = enroll(order->w_name, order->w_class);
536		if (w == NULL)
537			continue;
538		w->w_file = "order list";
539		for (order++; order->w_name != NULL; order++) {
540			w1 = enroll(order->w_name, order->w_class);
541			if (w1 == NULL)
542				continue;
543			w1->w_file = "order list";
544			if (!itismychild(w, w1))
545				panic("Not enough memory for static orders!");
546			w = w1;
547		}
548	}
549	witness_spin_warn = 1;
550
551	/* Iterate through all locks and add them to witness. */
552	while (!STAILQ_EMPTY(&pending_locks)) {
553		lock = STAILQ_FIRST(&pending_locks);
554		STAILQ_REMOVE_HEAD(&pending_locks, lo_list);
555		KASSERT(lock->lo_flags & LO_WITNESS,
556		    ("%s: lock %s is on pending list but not LO_WITNESS",
557		    __func__, lock->lo_name));
558		lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock));
559	}
560
561	/* Mark the witness code as being ready for use. */
562	witness_cold = 0;
563
564	mtx_lock(&Giant);
565}
566SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
567    NULL);
568
569static int
570sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
571{
572	int error, value;
573
574	value = witness_watch;
575	error = sysctl_handle_int(oidp, &value, 0, req);
576	if (error != 0 || req->newptr == NULL)
577		return (error);
578	if (value == witness_watch)
579		return (0);
580	if (value != 0)
581		return (EINVAL);
582	witness_watch = 0;
583	return (0);
584}
585
586void
587witness_init(struct lock_object *lock)
588{
589	struct lock_class *class;
590
591	/* Various sanity checks. */
592	class = LOCK_CLASS(lock);
593	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
594	    (class->lc_flags & LC_RECURSABLE) == 0)
595		panic("%s: lock (%s) %s can not be recursable", __func__,
596		    class->lc_name, lock->lo_name);
597	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
598	    (class->lc_flags & LC_SLEEPABLE) == 0)
599		panic("%s: lock (%s) %s can not be sleepable", __func__,
600		    class->lc_name, lock->lo_name);
601	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
602	    (class->lc_flags & LC_UPGRADABLE) == 0)
603		panic("%s: lock (%s) %s can not be upgradable", __func__,
604		    class->lc_name, lock->lo_name);
605
606	/*
607	 * If we shouldn't watch this lock, then just clear lo_witness.
608	 * Otherwise, if witness_cold is set, then it is too early to
609	 * enroll this lock, so defer it to witness_initialize() by adding
610	 * it to the pending_locks list.  If it is not too early, then enroll
611	 * the lock now.
612	 */
613	if (witness_watch == 0 || panicstr != NULL ||
614	    (lock->lo_flags & LO_WITNESS) == 0)
615		lock->lo_witness = NULL;
616	else if (witness_cold) {
617		STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list);
618		lock->lo_flags |= LO_ENROLLPEND;
619	} else
620		lock->lo_witness = enroll(lock->lo_type, class);
621}
622
623void
624witness_destroy(struct lock_object *lock)
625{
626	struct lock_class *class;
627	struct witness *w;
628
629	class = LOCK_CLASS(lock);
630	if (witness_cold)
631		panic("lock (%s) %s destroyed while witness_cold",
632		    class->lc_name, lock->lo_name);
633
634	/* XXX: need to verify that no one holds the lock */
635	if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS &&
636	    lock->lo_witness != NULL) {
637		w = lock->lo_witness;
638		mtx_lock_spin(&w_mtx);
639		MPASS(w->w_refcount > 0);
640		w->w_refcount--;
641
642		/*
643		 * Lock is already released if we have an allocation failure
644		 * and depart() fails.
645		 */
646		if (w->w_refcount != 0 || depart(w))
647			mtx_unlock_spin(&w_mtx);
648	}
649
650	/*
651	 * If this lock is destroyed before witness is up and running,
652	 * remove it from the pending list.
653	 */
654	if (lock->lo_flags & LO_ENROLLPEND) {
655		STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list);
656		lock->lo_flags &= ~LO_ENROLLPEND;
657	}
658}
659
660#ifdef DDB
661static void
662witness_levelall (void)
663{
664	struct witness_list *list;
665	struct witness *w, *w1;
666
667	/*
668	 * First clear all levels.
669	 */
670	STAILQ_FOREACH(w, &w_all, w_list) {
671		w->w_level = 0;
672	}
673
674	/*
675	 * Look for locks with no parent and level all their descendants.
676	 */
677	STAILQ_FOREACH(w, &w_all, w_list) {
678		/*
679		 * This is just an optimization, technically we could get
680		 * away just walking the all list each time.
681		 */
682		if (w->w_class->lc_flags & LC_SLEEPLOCK)
683			list = &w_sleep;
684		else
685			list = &w_spin;
686		STAILQ_FOREACH(w1, list, w_typelist) {
687			if (isitmychild(w1, w))
688				goto skip;
689		}
690		witness_leveldescendents(w, 0);
691	skip:
692		;	/* silence GCC 3.x */
693	}
694}
695
696static void
697witness_leveldescendents(struct witness *parent, int level)
698{
699	struct witness_child_list_entry *wcl;
700	int i;
701
702	if (parent->w_level < level)
703		parent->w_level = level;
704	level++;
705	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
706		for (i = 0; i < wcl->wcl_count; i++)
707			witness_leveldescendents(wcl->wcl_children[i], level);
708}
709
710static void
711witness_displaydescendants(void(*prnt)(const char *fmt, ...),
712			   struct witness *parent, int indent)
713{
714	struct witness_child_list_entry *wcl;
715	int i, level;
716
717	level = parent->w_level;
718	prnt("%-2d", level);
719	for (i = 0; i < indent; i++)
720		prnt(" ");
721	if (parent->w_refcount > 0)
722		prnt("%s", parent->w_name);
723	else
724		prnt("(dead)");
725	if (parent->w_displayed) {
726		prnt(" -- (already displayed)\n");
727		return;
728	}
729	parent->w_displayed = 1;
730	if (parent->w_refcount > 0) {
731		if (parent->w_file != NULL)
732			prnt(" -- last acquired @ %s:%d", parent->w_file,
733			    parent->w_line);
734	}
735	prnt("\n");
736	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
737		for (i = 0; i < wcl->wcl_count; i++)
738			    witness_displaydescendants(prnt,
739				wcl->wcl_children[i], indent + 1);
740}
741
742static void
743witness_display_list(void(*prnt)(const char *fmt, ...),
744		     struct witness_list *list)
745{
746	struct witness *w;
747
748	STAILQ_FOREACH(w, list, w_typelist) {
749		if (w->w_file == NULL || w->w_level > 0)
750			continue;
751		/*
752		 * This lock has no anscestors, display its descendants.
753		 */
754		witness_displaydescendants(prnt, w, 0);
755	}
756}
757
758static void
759witness_display(void(*prnt)(const char *fmt, ...))
760{
761	struct witness *w;
762
763	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
764	witness_levelall();
765
766	/* Clear all the displayed flags. */
767	STAILQ_FOREACH(w, &w_all, w_list) {
768		w->w_displayed = 0;
769	}
770
771	/*
772	 * First, handle sleep locks which have been acquired at least
773	 * once.
774	 */
775	prnt("Sleep locks:\n");
776	witness_display_list(prnt, &w_sleep);
777
778	/*
779	 * Now do spin locks which have been acquired at least once.
780	 */
781	prnt("\nSpin locks:\n");
782	witness_display_list(prnt, &w_spin);
783
784	/*
785	 * Finally, any locks which have not been acquired yet.
786	 */
787	prnt("\nLocks which were never acquired:\n");
788	STAILQ_FOREACH(w, &w_all, w_list) {
789		if (w->w_file != NULL || w->w_refcount == 0)
790			continue;
791		prnt("%s\n", w->w_name);
792	}
793}
794#endif /* DDB */
795
796/* Trim useless garbage from filenames. */
797static const char *
798fixup_filename(const char *file)
799{
800
801	if (file == NULL)
802		return (NULL);
803	while (strncmp(file, "../", 3) == 0)
804		file += 3;
805	return (file);
806}
807
808int
809witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
810{
811
812	if (witness_watch == 0 || panicstr != NULL)
813		return (0);
814
815	/* Require locks that witness knows about. */
816	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
817	    lock2->lo_witness == NULL)
818		return (EINVAL);
819
820	MPASS(!mtx_owned(&w_mtx));
821	mtx_lock_spin(&w_mtx);
822
823	/*
824	 * If we already have either an explicit or implied lock order that
825	 * is the other way around, then return an error.
826	 */
827	if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
828		mtx_unlock_spin(&w_mtx);
829		return (EDOOFUS);
830	}
831
832	/* Try to add the new order. */
833	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
834	    lock2->lo_type, lock1->lo_type);
835	if (!itismychild(lock1->lo_witness, lock2->lo_witness))
836		return (ENOMEM);
837	mtx_unlock_spin(&w_mtx);
838	return (0);
839}
840
841void
842witness_checkorder(struct lock_object *lock, int flags, const char *file,
843    int line)
844{
845	struct lock_list_entry **lock_list, *lle;
846	struct lock_instance *lock1, *lock2;
847	struct lock_class *class;
848	struct witness *w, *w1;
849	struct thread *td;
850	int i, j;
851
852	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
853	    panicstr != NULL)
854		return;
855
856	/*
857	 * Try locks do not block if they fail to acquire the lock, thus
858	 * there is no danger of deadlocks or of switching while holding a
859	 * spin lock if we acquire a lock via a try operation.  This
860	 * function shouldn't even be called for try locks, so panic if
861	 * that happens.
862	 */
863	if (flags & LOP_TRYLOCK)
864		panic("%s should not be called for try lock operations",
865		    __func__);
866
867	w = lock->lo_witness;
868	class = LOCK_CLASS(lock);
869	td = curthread;
870	file = fixup_filename(file);
871
872	if (class->lc_flags & LC_SLEEPLOCK) {
873		/*
874		 * Since spin locks include a critical section, this check
875		 * implicitly enforces a lock order of all sleep locks before
876		 * all spin locks.
877		 */
878		if (td->td_critnest != 0 && !kdb_active)
879			panic("blockable sleep lock (%s) %s @ %s:%d",
880			    class->lc_name, lock->lo_name, file, line);
881
882		/*
883		 * If this is the first lock acquired then just return as
884		 * no order checking is needed.
885		 */
886		if (td->td_sleeplocks == NULL)
887			return;
888		lock_list = &td->td_sleeplocks;
889	} else {
890		/*
891		 * If this is the first lock, just return as no order
892		 * checking is needed.  We check this in both if clauses
893		 * here as unifying the check would require us to use a
894		 * critical section to ensure we don't migrate while doing
895		 * the check.  Note that if this is not the first lock, we
896		 * are already in a critical section and are safe for the
897		 * rest of the check.
898		 */
899		if (PCPU_GET(spinlocks) == NULL)
900			return;
901		lock_list = PCPU_PTR(spinlocks);
902	}
903
904	/*
905	 * Check to see if we are recursing on a lock we already own.  If
906	 * so, make sure that we don't mismatch exclusive and shared lock
907	 * acquires.
908	 */
909	lock1 = find_instance(*lock_list, lock);
910	if (lock1 != NULL) {
911		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
912		    (flags & LOP_EXCLUSIVE) == 0) {
913			printf("shared lock of (%s) %s @ %s:%d\n",
914			    class->lc_name, lock->lo_name, file, line);
915			printf("while exclusively locked from %s:%d\n",
916			    lock1->li_file, lock1->li_line);
917			panic("share->excl");
918		}
919		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
920		    (flags & LOP_EXCLUSIVE) != 0) {
921			printf("exclusive lock of (%s) %s @ %s:%d\n",
922			    class->lc_name, lock->lo_name, file, line);
923			printf("while share locked from %s:%d\n",
924			    lock1->li_file, lock1->li_line);
925			panic("excl->share");
926		}
927		return;
928	}
929
930	/*
931	 * Try locks do not block if they fail to acquire the lock, thus
932	 * there is no danger of deadlocks or of switching while holding a
933	 * spin lock if we acquire a lock via a try operation.
934	 */
935	if (flags & LOP_TRYLOCK)
936		return;
937
938	/*
939	 * Check for duplicate locks of the same type.  Note that we only
940	 * have to check for this on the last lock we just acquired.  Any
941	 * other cases will be caught as lock order violations.
942	 */
943	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
944	w1 = lock1->li_lock->lo_witness;
945	if (w1 == w) {
946		if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
947		    (flags & LOP_DUPOK))
948			return;
949		w->w_same_squawked = 1;
950		printf("acquiring duplicate lock of same type: \"%s\"\n",
951			lock->lo_type);
952		printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
953		    lock1->li_file, lock1->li_line);
954		printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
955#ifdef KDB
956		goto debugger;
957#else
958		return;
959#endif
960	}
961	MPASS(!mtx_owned(&w_mtx));
962	mtx_lock_spin(&w_mtx);
963	/*
964	 * If we know that the the lock we are acquiring comes after
965	 * the lock we most recently acquired in the lock order tree,
966	 * then there is no need for any further checks.
967	 */
968	if (isitmychild(w1, w)) {
969		mtx_unlock_spin(&w_mtx);
970		return;
971	}
972	for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
973		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
974
975			MPASS(j < WITNESS_COUNT);
976			lock1 = &lle->ll_children[i];
977			w1 = lock1->li_lock->lo_witness;
978
979			/*
980			 * If this lock doesn't undergo witness checking,
981			 * then skip it.
982			 */
983			if (w1 == NULL) {
984				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
985				    ("lock missing witness structure"));
986				continue;
987			}
988			/*
989			 * If we are locking Giant and this is a sleepable
990			 * lock, then skip it.
991			 */
992			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
993			    lock == &Giant.lock_object)
994				continue;
995			/*
996			 * If we are locking a sleepable lock and this lock
997			 * is Giant, then skip it.
998			 */
999			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1000			    lock1->li_lock == &Giant.lock_object)
1001				continue;
1002			/*
1003			 * If we are locking a sleepable lock and this lock
1004			 * isn't sleepable, we want to treat it as a lock
1005			 * order violation to enfore a general lock order of
1006			 * sleepable locks before non-sleepable locks.
1007			 */
1008			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1009			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1010				goto reversal;
1011			/*
1012			 * If we are locking Giant and this is a non-sleepable
1013			 * lock, then treat it as a reversal.
1014			 */
1015			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1016			    lock == &Giant.lock_object)
1017				goto reversal;
1018			/*
1019			 * Check the lock order hierarchy for a reveresal.
1020			 */
1021			if (!isitmydescendant(w, w1))
1022				continue;
1023		reversal:
1024			/*
1025			 * We have a lock order violation, check to see if it
1026			 * is allowed or has already been yelled about.
1027			 */
1028			mtx_unlock_spin(&w_mtx);
1029#ifdef BLESSING
1030			/*
1031			 * If the lock order is blessed, just bail.  We don't
1032			 * look for other lock order violations though, which
1033			 * may be a bug.
1034			 */
1035			if (blessed(w, w1))
1036				return;
1037#endif
1038			if (lock1->li_lock == &Giant.lock_object) {
1039				if (w1->w_Giant_squawked)
1040					return;
1041				else
1042					w1->w_Giant_squawked = 1;
1043			} else {
1044				if (w1->w_other_squawked)
1045					return;
1046				else
1047					w1->w_other_squawked = 1;
1048			}
1049			/*
1050			 * Ok, yell about it.
1051			 */
1052			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1053			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1054				printf(
1055		"lock order reversal: (sleepable after non-sleepable)\n");
1056			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1057			    && lock == &Giant.lock_object)
1058				printf(
1059		"lock order reversal: (Giant after non-sleepable)\n");
1060			else
1061				printf("lock order reversal:\n");
1062			/*
1063			 * Try to locate an earlier lock with
1064			 * witness w in our list.
1065			 */
1066			do {
1067				lock2 = &lle->ll_children[i];
1068				MPASS(lock2->li_lock != NULL);
1069				if (lock2->li_lock->lo_witness == w)
1070					break;
1071				if (i == 0 && lle->ll_next != NULL) {
1072					lle = lle->ll_next;
1073					i = lle->ll_count - 1;
1074					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1075				} else
1076					i--;
1077			} while (i >= 0);
1078			if (i < 0) {
1079				printf(" 1st %p %s (%s) @ %s:%d\n",
1080				    lock1->li_lock, lock1->li_lock->lo_name,
1081				    lock1->li_lock->lo_type, lock1->li_file,
1082				    lock1->li_line);
1083				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1084				    lock->lo_name, lock->lo_type, file, line);
1085			} else {
1086				printf(" 1st %p %s (%s) @ %s:%d\n",
1087				    lock2->li_lock, lock2->li_lock->lo_name,
1088				    lock2->li_lock->lo_type, lock2->li_file,
1089				    lock2->li_line);
1090				printf(" 2nd %p %s (%s) @ %s:%d\n",
1091				    lock1->li_lock, lock1->li_lock->lo_name,
1092				    lock1->li_lock->lo_type, lock1->li_file,
1093				    lock1->li_line);
1094				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1095				    lock->lo_name, lock->lo_type, file, line);
1096			}
1097#ifdef KDB
1098			goto debugger;
1099#else
1100			return;
1101#endif
1102		}
1103	}
1104	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1105	/*
1106	 * If requested, build a new lock order.  However, don't build a new
1107	 * relationship between a sleepable lock and Giant if it is in the
1108	 * wrong direction.  The correct lock order is that sleepable locks
1109	 * always come before Giant.
1110	 */
1111	if (flags & LOP_NEWORDER &&
1112	    !(lock1->li_lock == &Giant.lock_object &&
1113	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1114		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1115		    lock->lo_type, lock1->li_lock->lo_type);
1116		if (!itismychild(lock1->li_lock->lo_witness, w))
1117			/* Witness is dead. */
1118			return;
1119	}
1120	mtx_unlock_spin(&w_mtx);
1121	return;
1122
1123#ifdef KDB
1124debugger:
1125	if (witness_trace)
1126		kdb_backtrace();
1127	if (witness_kdb)
1128		kdb_enter(KDB_WHY_WITNESS, __func__);
1129#endif
1130}
1131
1132void
1133witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1134{
1135	struct lock_list_entry **lock_list, *lle;
1136	struct lock_instance *instance;
1137	struct witness *w;
1138	struct thread *td;
1139
1140	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1141	    panicstr != NULL)
1142		return;
1143	w = lock->lo_witness;
1144	td = curthread;
1145	file = fixup_filename(file);
1146
1147	/* Determine lock list for this lock. */
1148	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1149		lock_list = &td->td_sleeplocks;
1150	else
1151		lock_list = PCPU_PTR(spinlocks);
1152
1153	/* Check to see if we are recursing on a lock we already own. */
1154	instance = find_instance(*lock_list, lock);
1155	if (instance != NULL) {
1156		instance->li_flags++;
1157		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1158		    td->td_proc->p_pid, lock->lo_name,
1159		    instance->li_flags & LI_RECURSEMASK);
1160		instance->li_file = file;
1161		instance->li_line = line;
1162		return;
1163	}
1164
1165	/* Update per-witness last file and line acquire. */
1166	w->w_file = file;
1167	w->w_line = line;
1168
1169	/* Find the next open lock instance in the list and fill it. */
1170	lle = *lock_list;
1171	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1172		lle = witness_lock_list_get();
1173		if (lle == NULL)
1174			return;
1175		lle->ll_next = *lock_list;
1176		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1177		    td->td_proc->p_pid, lle);
1178		*lock_list = lle;
1179	}
1180	instance = &lle->ll_children[lle->ll_count++];
1181	instance->li_lock = lock;
1182	instance->li_line = line;
1183	instance->li_file = file;
1184	if ((flags & LOP_EXCLUSIVE) != 0)
1185		instance->li_flags = LI_EXCLUSIVE;
1186	else
1187		instance->li_flags = 0;
1188	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1189	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1190}
1191
1192void
1193witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1194{
1195	struct lock_instance *instance;
1196	struct lock_class *class;
1197
1198	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1199	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1200		return;
1201	class = LOCK_CLASS(lock);
1202	file = fixup_filename(file);
1203	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1204		panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1205		    class->lc_name, lock->lo_name, file, line);
1206	if ((flags & LOP_TRYLOCK) == 0)
1207		panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1208		    lock->lo_name, file, line);
1209	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1210		panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1211		    class->lc_name, lock->lo_name, file, line);
1212	instance = find_instance(curthread->td_sleeplocks, lock);
1213	if (instance == NULL)
1214		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1215		    class->lc_name, lock->lo_name, file, line);
1216	if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1217		panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1218		    class->lc_name, lock->lo_name, file, line);
1219	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1220		panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1221		    class->lc_name, lock->lo_name,
1222		    instance->li_flags & LI_RECURSEMASK, file, line);
1223	instance->li_flags |= LI_EXCLUSIVE;
1224}
1225
1226void
1227witness_downgrade(struct lock_object *lock, int flags, const char *file,
1228    int line)
1229{
1230	struct lock_instance *instance;
1231	struct lock_class *class;
1232
1233	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1234	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1235		return;
1236	class = LOCK_CLASS(lock);
1237	file = fixup_filename(file);
1238	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1239		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1240		    class->lc_name, lock->lo_name, file, line);
1241	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1242		panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1243		    class->lc_name, lock->lo_name, file, line);
1244	instance = find_instance(curthread->td_sleeplocks, lock);
1245	if (instance == NULL)
1246		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1247		    class->lc_name, lock->lo_name, file, line);
1248	if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1249		panic("downgrade of shared lock (%s) %s @ %s:%d",
1250		    class->lc_name, lock->lo_name, file, line);
1251	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1252		panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1253		    class->lc_name, lock->lo_name,
1254		    instance->li_flags & LI_RECURSEMASK, file, line);
1255	instance->li_flags &= ~LI_EXCLUSIVE;
1256}
1257
1258void
1259witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1260{
1261	struct lock_list_entry **lock_list, *lle;
1262	struct lock_instance *instance;
1263	struct lock_class *class;
1264	struct thread *td;
1265	register_t s;
1266	int i, j;
1267
1268	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1269	    panicstr != NULL)
1270		return;
1271	td = curthread;
1272	class = LOCK_CLASS(lock);
1273	file = fixup_filename(file);
1274
1275	/* Find lock instance associated with this lock. */
1276	if (class->lc_flags & LC_SLEEPLOCK)
1277		lock_list = &td->td_sleeplocks;
1278	else
1279		lock_list = PCPU_PTR(spinlocks);
1280	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1281		for (i = 0; i < (*lock_list)->ll_count; i++) {
1282			instance = &(*lock_list)->ll_children[i];
1283			if (instance->li_lock == lock)
1284				goto found;
1285		}
1286	panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1287	    file, line);
1288found:
1289
1290	/* First, check for shared/exclusive mismatches. */
1291	if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1292	    (flags & LOP_EXCLUSIVE) == 0) {
1293		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1294		    lock->lo_name, file, line);
1295		printf("while exclusively locked from %s:%d\n",
1296		    instance->li_file, instance->li_line);
1297		panic("excl->ushare");
1298	}
1299	if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1300	    (flags & LOP_EXCLUSIVE) != 0) {
1301		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1302		    lock->lo_name, file, line);
1303		printf("while share locked from %s:%d\n", instance->li_file,
1304		    instance->li_line);
1305		panic("share->uexcl");
1306	}
1307
1308	/* If we are recursed, unrecurse. */
1309	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1310		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1311		    td->td_proc->p_pid, instance->li_lock->lo_name,
1312		    instance->li_flags);
1313		instance->li_flags--;
1314		return;
1315	}
1316
1317	/* Otherwise, remove this item from the list. */
1318	s = intr_disable();
1319	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1320	    td->td_proc->p_pid, instance->li_lock->lo_name,
1321	    (*lock_list)->ll_count - 1);
1322	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1323		(*lock_list)->ll_children[j] =
1324		    (*lock_list)->ll_children[j + 1];
1325	(*lock_list)->ll_count--;
1326	intr_restore(s);
1327
1328	/* If this lock list entry is now empty, free it. */
1329	if ((*lock_list)->ll_count == 0) {
1330		lle = *lock_list;
1331		*lock_list = lle->ll_next;
1332		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1333		    td->td_proc->p_pid, lle);
1334		witness_lock_list_free(lle);
1335	}
1336}
1337
1338/*
1339 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1340 * exempt Giant and sleepable locks from the checks as well.  If any
1341 * non-exempt locks are held, then a supplied message is printed to the
1342 * console along with a list of the offending locks.  If indicated in the
1343 * flags then a failure results in a panic as well.
1344 */
1345int
1346witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1347{
1348	struct lock_list_entry *lle;
1349	struct lock_instance *lock1;
1350	struct thread *td;
1351	va_list ap;
1352	int i, n;
1353
1354	if (witness_cold || witness_watch == 0 || panicstr != NULL)
1355		return (0);
1356	n = 0;
1357	td = curthread;
1358	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1359		for (i = lle->ll_count - 1; i >= 0; i--) {
1360			lock1 = &lle->ll_children[i];
1361			if (lock1->li_lock == lock)
1362				continue;
1363			if (flags & WARN_GIANTOK &&
1364			    lock1->li_lock == &Giant.lock_object)
1365				continue;
1366			if (flags & WARN_SLEEPOK &&
1367			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1368				continue;
1369			if (n == 0) {
1370				va_start(ap, fmt);
1371				vprintf(fmt, ap);
1372				va_end(ap);
1373				printf(" with the following");
1374				if (flags & WARN_SLEEPOK)
1375					printf(" non-sleepable");
1376				printf(" locks held:\n");
1377			}
1378			n++;
1379			witness_list_lock(lock1);
1380		}
1381	if (PCPU_GET(spinlocks) != NULL) {
1382		/*
1383		 * Since we already hold a spinlock preemption is
1384		 * already blocked.
1385		 */
1386		if (n == 0) {
1387			va_start(ap, fmt);
1388			vprintf(fmt, ap);
1389			va_end(ap);
1390			printf(" with the following");
1391			if (flags & WARN_SLEEPOK)
1392				printf(" non-sleepable");
1393			printf(" locks held:\n");
1394		}
1395		n += witness_list_locks(PCPU_PTR(spinlocks));
1396	}
1397	if (flags & WARN_PANIC && n)
1398		panic("witness_warn");
1399#ifdef KDB
1400	else if (witness_kdb && n)
1401		kdb_enter(KDB_WHY_WITNESS, __func__);
1402	else if (witness_trace && n)
1403		kdb_backtrace();
1404#endif
1405	return (n);
1406}
1407
1408const char *
1409witness_file(struct lock_object *lock)
1410{
1411	struct witness *w;
1412
1413	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1414		return ("?");
1415	w = lock->lo_witness;
1416	return (w->w_file);
1417}
1418
1419int
1420witness_line(struct lock_object *lock)
1421{
1422	struct witness *w;
1423
1424	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1425		return (0);
1426	w = lock->lo_witness;
1427	return (w->w_line);
1428}
1429
1430static struct witness *
1431enroll(const char *description, struct lock_class *lock_class)
1432{
1433	struct witness *w;
1434
1435	if (witness_watch == 0 || panicstr != NULL)
1436		return (NULL);
1437	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1438		return (NULL);
1439	mtx_lock_spin(&w_mtx);
1440	STAILQ_FOREACH(w, &w_all, w_list) {
1441		if (w->w_name == description || (w->w_refcount > 0 &&
1442		    strcmp(description, w->w_name) == 0)) {
1443			w->w_refcount++;
1444			mtx_unlock_spin(&w_mtx);
1445			if (lock_class != w->w_class)
1446				panic(
1447				"lock (%s) %s does not match earlier (%s) lock",
1448				    description, lock_class->lc_name,
1449				    w->w_class->lc_name);
1450			return (w);
1451		}
1452	}
1453	if ((w = witness_get()) == NULL)
1454		goto out;
1455	w->w_name = description;
1456	w->w_class = lock_class;
1457	w->w_refcount = 1;
1458	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1459	if (lock_class->lc_flags & LC_SPINLOCK) {
1460		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1461		w_spin_cnt++;
1462	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1463		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1464		w_sleep_cnt++;
1465	} else {
1466		mtx_unlock_spin(&w_mtx);
1467		panic("lock class %s is not sleep or spin",
1468		    lock_class->lc_name);
1469	}
1470	mtx_unlock_spin(&w_mtx);
1471out:
1472	/*
1473	 * We issue a warning for any spin locks not defined in the static
1474	 * order list as a way to discourage their use (folks should really
1475	 * be using non-spin mutexes most of the time).  However, several
1476	 * 3rd part device drivers use spin locks because that is all they
1477	 * have available on Windows and Linux and they think that normal
1478	 * mutexes are insufficient.
1479	 */
1480	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1481		printf("WITNESS: spin lock %s not in order list\n",
1482		    description);
1483	return (w);
1484}
1485
1486/* Don't let the door bang you on the way out... */
1487static int
1488depart(struct witness *w)
1489{
1490	struct witness_child_list_entry *wcl, *nwcl;
1491	struct witness_list *list;
1492	struct witness *parent;
1493
1494	MPASS(w->w_refcount == 0);
1495	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1496		list = &w_sleep;
1497		w_sleep_cnt--;
1498	} else {
1499		list = &w_spin;
1500		w_spin_cnt--;
1501	}
1502	/*
1503	 * First, we run through the entire tree looking for any
1504	 * witnesses that the outgoing witness is a child of.  For
1505	 * each parent that we find, we reparent all the direct
1506	 * children of the outgoing witness to its parent.
1507	 */
1508	STAILQ_FOREACH(parent, list, w_typelist) {
1509		if (!isitmychild(parent, w))
1510			continue;
1511		removechild(parent, w);
1512	}
1513
1514	/*
1515	 * Now we go through and free up the child list of the
1516	 * outgoing witness.
1517	 */
1518	for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1519		nwcl = wcl->wcl_next;
1520        	w_child_cnt--;
1521		witness_child_free(wcl);
1522	}
1523
1524	/*
1525	 * Detach from various lists and free.
1526	 */
1527	STAILQ_REMOVE(list, w, witness, w_typelist);
1528	STAILQ_REMOVE(&w_all, w, witness, w_list);
1529	witness_free(w);
1530
1531	return (1);
1532}
1533
1534/*
1535 * Add "child" as a direct child of "parent".  Returns false if
1536 * we fail due to out of memory.
1537 */
1538static int
1539insertchild(struct witness *parent, struct witness *child)
1540{
1541	struct witness_child_list_entry **wcl;
1542
1543	MPASS(child != NULL && parent != NULL);
1544
1545	/*
1546	 * Insert "child" after "parent"
1547	 */
1548	wcl = &parent->w_children;
1549	while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1550		wcl = &(*wcl)->wcl_next;
1551	if (*wcl == NULL) {
1552		*wcl = witness_child_get();
1553		if (*wcl == NULL)
1554			return (0);
1555        	w_child_cnt++;
1556	}
1557	(*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1558
1559	return (1);
1560}
1561
1562
1563static int
1564itismychild(struct witness *parent, struct witness *child)
1565{
1566	struct witness_list *list;
1567
1568	MPASS(child != NULL && parent != NULL);
1569	if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1570	    (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1571		panic(
1572		"%s: parent (%s) and child (%s) are not the same lock type",
1573		    __func__, parent->w_class->lc_name,
1574		    child->w_class->lc_name);
1575
1576	if (!insertchild(parent, child))
1577		return (0);
1578
1579	if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1580		list = &w_sleep;
1581	else
1582		list = &w_spin;
1583	return (1);
1584}
1585
1586static void
1587removechild(struct witness *parent, struct witness *child)
1588{
1589	struct witness_child_list_entry **wcl, *wcl1;
1590	int i;
1591
1592	for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1593		for (i = 0; i < (*wcl)->wcl_count; i++)
1594			if ((*wcl)->wcl_children[i] == child)
1595				goto found;
1596	return;
1597found:
1598	(*wcl)->wcl_count--;
1599	if ((*wcl)->wcl_count > i)
1600		(*wcl)->wcl_children[i] =
1601		    (*wcl)->wcl_children[(*wcl)->wcl_count];
1602	MPASS((*wcl)->wcl_children[i] != NULL);
1603	if ((*wcl)->wcl_count != 0)
1604		return;
1605	wcl1 = *wcl;
1606	*wcl = wcl1->wcl_next;
1607	w_child_cnt--;
1608	witness_child_free(wcl1);
1609}
1610
1611static int
1612isitmychild(struct witness *parent, struct witness *child)
1613{
1614	struct witness_child_list_entry *wcl;
1615	int i;
1616
1617	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1618		for (i = 0; i < wcl->wcl_count; i++) {
1619			if (wcl->wcl_children[i] == child)
1620				return (1);
1621		}
1622	}
1623	return (0);
1624}
1625
1626static int
1627isitmydescendant(struct witness *parent, struct witness *child)
1628{
1629	struct witness_child_list_entry *wcl;
1630	int i, j;
1631
1632	if (isitmychild(parent, child))
1633		return (1);
1634	j = 0;
1635	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1636		MPASS(j < 1000);
1637		for (i = 0; i < wcl->wcl_count; i++) {
1638			if (isitmydescendant(wcl->wcl_children[i], child))
1639				return (1);
1640		}
1641		j++;
1642	}
1643	return (0);
1644}
1645
1646#ifdef BLESSING
1647static int
1648blessed(struct witness *w1, struct witness *w2)
1649{
1650	int i;
1651	struct witness_blessed *b;
1652
1653	for (i = 0; i < blessed_count; i++) {
1654		b = &blessed_list[i];
1655		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1656			if (strcmp(w2->w_name, b->b_lock2) == 0)
1657				return (1);
1658			continue;
1659		}
1660		if (strcmp(w1->w_name, b->b_lock2) == 0)
1661			if (strcmp(w2->w_name, b->b_lock1) == 0)
1662				return (1);
1663	}
1664	return (0);
1665}
1666#endif
1667
1668static struct witness *
1669witness_get(void)
1670{
1671	struct witness *w;
1672
1673	if (witness_watch == 0) {
1674		mtx_unlock_spin(&w_mtx);
1675		return (NULL);
1676	}
1677	if (STAILQ_EMPTY(&w_free)) {
1678		witness_watch = 0;
1679		mtx_unlock_spin(&w_mtx);
1680		printf("%s: witness exhausted\n", __func__);
1681		return (NULL);
1682	}
1683	w = STAILQ_FIRST(&w_free);
1684	STAILQ_REMOVE_HEAD(&w_free, w_list);
1685	w_free_cnt--;
1686	bzero(w, sizeof(*w));
1687	return (w);
1688}
1689
1690static void
1691witness_free(struct witness *w)
1692{
1693
1694	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1695	w_free_cnt++;
1696}
1697
1698static struct witness_child_list_entry *
1699witness_child_get(void)
1700{
1701	struct witness_child_list_entry *wcl;
1702
1703	if (witness_watch == 0) {
1704		mtx_unlock_spin(&w_mtx);
1705		return (NULL);
1706	}
1707	wcl = w_child_free;
1708	if (wcl == NULL) {
1709		witness_watch = 0;
1710		mtx_unlock_spin(&w_mtx);
1711		printf("%s: witness exhausted\n", __func__);
1712		return (NULL);
1713	}
1714	w_child_free = wcl->wcl_next;
1715	w_child_free_cnt--;
1716	bzero(wcl, sizeof(*wcl));
1717	return (wcl);
1718}
1719
1720static void
1721witness_child_free(struct witness_child_list_entry *wcl)
1722{
1723
1724	wcl->wcl_next = w_child_free;
1725	w_child_free = wcl;
1726	w_child_free_cnt++;
1727}
1728
1729static struct lock_list_entry *
1730witness_lock_list_get(void)
1731{
1732	struct lock_list_entry *lle;
1733
1734	if (witness_watch == 0)
1735		return (NULL);
1736	mtx_lock_spin(&w_mtx);
1737	lle = w_lock_list_free;
1738	if (lle == NULL) {
1739		witness_watch = 0;
1740		mtx_unlock_spin(&w_mtx);
1741		printf("%s: witness exhausted\n", __func__);
1742		return (NULL);
1743	}
1744	w_lock_list_free = lle->ll_next;
1745	mtx_unlock_spin(&w_mtx);
1746	bzero(lle, sizeof(*lle));
1747	return (lle);
1748}
1749
1750static void
1751witness_lock_list_free(struct lock_list_entry *lle)
1752{
1753
1754	mtx_lock_spin(&w_mtx);
1755	lle->ll_next = w_lock_list_free;
1756	w_lock_list_free = lle;
1757	mtx_unlock_spin(&w_mtx);
1758}
1759
1760static struct lock_instance *
1761find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1762{
1763	struct lock_list_entry *lle;
1764	struct lock_instance *instance;
1765	int i;
1766
1767	for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1768		for (i = lle->ll_count - 1; i >= 0; i--) {
1769			instance = &lle->ll_children[i];
1770			if (instance->li_lock == lock)
1771				return (instance);
1772		}
1773	return (NULL);
1774}
1775
1776static void
1777witness_list_lock(struct lock_instance *instance)
1778{
1779	struct lock_object *lock;
1780
1781	lock = instance->li_lock;
1782	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1783	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1784	if (lock->lo_type != lock->lo_name)
1785		printf(" (%s)", lock->lo_type);
1786	printf(" r = %d (%p) locked @ %s:%d\n",
1787	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1788	    instance->li_line);
1789}
1790
1791#ifdef DDB
1792static int
1793witness_thread_has_locks(struct thread *td)
1794{
1795
1796	return (td->td_sleeplocks != NULL);
1797}
1798
1799static int
1800witness_proc_has_locks(struct proc *p)
1801{
1802	struct thread *td;
1803
1804	FOREACH_THREAD_IN_PROC(p, td) {
1805		if (witness_thread_has_locks(td))
1806			return (1);
1807	}
1808	return (0);
1809}
1810#endif
1811
1812int
1813witness_list_locks(struct lock_list_entry **lock_list)
1814{
1815	struct lock_list_entry *lle;
1816	int i, nheld;
1817
1818	nheld = 0;
1819	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1820		for (i = lle->ll_count - 1; i >= 0; i--) {
1821			witness_list_lock(&lle->ll_children[i]);
1822			nheld++;
1823		}
1824	return (nheld);
1825}
1826
1827/*
1828 * This is a bit risky at best.  We call this function when we have timed
1829 * out acquiring a spin lock, and we assume that the other CPU is stuck
1830 * with this lock held.  So, we go groveling around in the other CPU's
1831 * per-cpu data to try to find the lock instance for this spin lock to
1832 * see when it was last acquired.
1833 */
1834void
1835witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1836{
1837	struct lock_instance *instance;
1838	struct pcpu *pc;
1839
1840	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1841		return;
1842	pc = pcpu_find(owner->td_oncpu);
1843	instance = find_instance(pc->pc_spinlocks, lock);
1844	if (instance != NULL)
1845		witness_list_lock(instance);
1846}
1847
1848void
1849witness_save(struct lock_object *lock, const char **filep, int *linep)
1850{
1851	struct lock_list_entry *lock_list;
1852	struct lock_instance *instance;
1853	struct lock_class *class;
1854
1855	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1856	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1857		return;
1858	class = LOCK_CLASS(lock);
1859	if (class->lc_flags & LC_SLEEPLOCK)
1860		lock_list = curthread->td_sleeplocks;
1861	else {
1862		if (witness_skipspin)
1863			return;
1864		lock_list = PCPU_GET(spinlocks);
1865	}
1866	instance = find_instance(lock_list, lock);
1867	if (instance == NULL)
1868		panic("%s: lock (%s) %s not locked", __func__,
1869		    class->lc_name, lock->lo_name);
1870	*filep = instance->li_file;
1871	*linep = instance->li_line;
1872}
1873
1874void
1875witness_restore(struct lock_object *lock, const char *file, int line)
1876{
1877	struct lock_list_entry *lock_list;
1878	struct lock_instance *instance;
1879	struct lock_class *class;
1880
1881	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1882	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1883		return;
1884	class = LOCK_CLASS(lock);
1885	if (class->lc_flags & LC_SLEEPLOCK)
1886		lock_list = curthread->td_sleeplocks;
1887	else {
1888		if (witness_skipspin)
1889			return;
1890		lock_list = PCPU_GET(spinlocks);
1891	}
1892	instance = find_instance(lock_list, lock);
1893	if (instance == NULL)
1894		panic("%s: lock (%s) %s not locked", __func__,
1895		    class->lc_name, lock->lo_name);
1896	lock->lo_witness->w_file = file;
1897	lock->lo_witness->w_line = line;
1898	instance->li_file = file;
1899	instance->li_line = line;
1900}
1901
1902void
1903witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1904{
1905#ifdef INVARIANT_SUPPORT
1906	struct lock_instance *instance;
1907	struct lock_class *class;
1908
1909	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1910		return;
1911	class = LOCK_CLASS(lock);
1912	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1913		instance = find_instance(curthread->td_sleeplocks, lock);
1914	else if ((class->lc_flags & LC_SPINLOCK) != 0)
1915		instance = find_instance(PCPU_GET(spinlocks), lock);
1916	else {
1917		panic("Lock (%s) %s is not sleep or spin!",
1918		    class->lc_name, lock->lo_name);
1919	}
1920	file = fixup_filename(file);
1921	switch (flags) {
1922	case LA_UNLOCKED:
1923		if (instance != NULL)
1924			panic("Lock (%s) %s locked @ %s:%d.",
1925			    class->lc_name, lock->lo_name, file, line);
1926		break;
1927	case LA_LOCKED:
1928	case LA_LOCKED | LA_RECURSED:
1929	case LA_LOCKED | LA_NOTRECURSED:
1930	case LA_SLOCKED:
1931	case LA_SLOCKED | LA_RECURSED:
1932	case LA_SLOCKED | LA_NOTRECURSED:
1933	case LA_XLOCKED:
1934	case LA_XLOCKED | LA_RECURSED:
1935	case LA_XLOCKED | LA_NOTRECURSED:
1936		if (instance == NULL) {
1937			panic("Lock (%s) %s not locked @ %s:%d.",
1938			    class->lc_name, lock->lo_name, file, line);
1939			break;
1940		}
1941		if ((flags & LA_XLOCKED) != 0 &&
1942		    (instance->li_flags & LI_EXCLUSIVE) == 0)
1943			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1944			    class->lc_name, lock->lo_name, file, line);
1945		if ((flags & LA_SLOCKED) != 0 &&
1946		    (instance->li_flags & LI_EXCLUSIVE) != 0)
1947			panic("Lock (%s) %s exclusively locked @ %s:%d.",
1948			    class->lc_name, lock->lo_name, file, line);
1949		if ((flags & LA_RECURSED) != 0 &&
1950		    (instance->li_flags & LI_RECURSEMASK) == 0)
1951			panic("Lock (%s) %s not recursed @ %s:%d.",
1952			    class->lc_name, lock->lo_name, file, line);
1953		if ((flags & LA_NOTRECURSED) != 0 &&
1954		    (instance->li_flags & LI_RECURSEMASK) != 0)
1955			panic("Lock (%s) %s recursed @ %s:%d.",
1956			    class->lc_name, lock->lo_name, file, line);
1957		break;
1958	default:
1959		panic("Invalid lock assertion at %s:%d.", file, line);
1960
1961	}
1962#endif	/* INVARIANT_SUPPORT */
1963}
1964
1965#ifdef DDB
1966static void
1967witness_list(struct thread *td)
1968{
1969
1970	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1971	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1972
1973	if (witness_watch == 0)
1974		return;
1975
1976	witness_list_locks(&td->td_sleeplocks);
1977
1978	/*
1979	 * We only handle spinlocks if td == curthread.  This is somewhat broken
1980	 * if td is currently executing on some other CPU and holds spin locks
1981	 * as we won't display those locks.  If we had a MI way of getting
1982	 * the per-cpu data for a given cpu then we could use
1983	 * td->td_oncpu to get the list of spinlocks for this thread
1984	 * and "fix" this.
1985	 *
1986	 * That still wouldn't really fix this unless we locked the scheduler
1987	 * lock or stopped the other CPU to make sure it wasn't changing the
1988	 * list out from under us.  It is probably best to just not try to
1989	 * handle threads on other CPU's for now.
1990	 */
1991	if (td == curthread && PCPU_GET(spinlocks) != NULL)
1992		witness_list_locks(PCPU_PTR(spinlocks));
1993}
1994
1995DB_SHOW_COMMAND(locks, db_witness_list)
1996{
1997	struct thread *td;
1998
1999	if (have_addr)
2000		td = db_lookup_thread(addr, TRUE);
2001	else
2002		td = kdb_thread;
2003	witness_list(td);
2004}
2005
2006DB_SHOW_COMMAND(alllocks, db_witness_list_all)
2007{
2008	struct thread *td;
2009	struct proc *p;
2010
2011	/*
2012	 * It would be nice to list only threads and processes that actually
2013	 * held sleep locks, but that information is currently not exported
2014	 * by WITNESS.
2015	 */
2016	FOREACH_PROC_IN_SYSTEM(p) {
2017		if (!witness_proc_has_locks(p))
2018			continue;
2019		FOREACH_THREAD_IN_PROC(p, td) {
2020			if (!witness_thread_has_locks(td))
2021				continue;
2022			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2023			    td->td_name, td, td->td_tid);
2024			witness_list(td);
2025		}
2026	}
2027}
2028
2029DB_SHOW_COMMAND(witness, db_witness_display)
2030{
2031
2032	witness_display(db_printf);
2033}
2034#endif
2035