subr_witness.c revision 164159
151237Speter/*-
251237Speter * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
341266Sobrien *
4164015Sobrien * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Implementation of the `witness' lock verifier.  Originally implemented for
34 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38/*
39 *	Main Entry: witness
40 *	Pronunciation: 'wit-n&s
41 *	Function: noun
42 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43 *	    testimony, witness, from 2wit
44 *	Date: before 12th century
45 *	1 : attestation of a fact or event : TESTIMONY
46 *	2 : one that gives evidence; specifically : one who testifies in
47 *	    a cause or before a judicial tribunal
48 *	3 : one asked to be present at a transaction so as to be able to
49 *	    testify to its having taken place
50 *	4 : one who has personal knowledge of something
51 *	5 a : something serving as evidence or proof : SIGN
52 *	  b : public affirmation by word or example of usually
53 *	      religious faith or conviction <the heroic witness to divine
54 *	      life -- Pilot>
55 *	6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58/*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes.  Stated another way,
62 *    no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl().  Basically, when a thread sleeps, it must release
68 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious.  Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock.  The second
76 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute.  Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 164159 2006-11-11 03:18:07Z kmacy $");
86
87#include "opt_ddb.h"
88#include "opt_witness.h"
89
90#include <sys/param.h>
91#include <sys/bus.h>
92#include <sys/kdb.h>
93#include <sys/kernel.h>
94#include <sys/ktr.h>
95#include <sys/lock.h>
96#include <sys/malloc.h>
97#include <sys/mutex.h>
98#include <sys/priv.h>
99#include <sys/proc.h>
100#include <sys/sysctl.h>
101#include <sys/systm.h>
102
103#include <ddb/ddb.h>
104
105#include <machine/stdarg.h>
106
107/* Note that these traces do not work with KTR_ALQ. */
108#if 0
109#define	KTR_WITNESS	KTR_SUBSYS
110#else
111#define	KTR_WITNESS	0
112#endif
113
114/* Easier to stay with the old names. */
115#define	lo_list		lo_witness_data.lod_list
116#define	lo_witness	lo_witness_data.lod_witness
117
118/* Define this to check for blessed mutexes */
119#undef BLESSING
120
121#define WITNESS_COUNT 1024
122#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
123/*
124 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
125 * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
126 * probably be safe for the most part, but it's still a SWAG.
127 */
128#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
129
130#define	WITNESS_NCHILDREN 6
131
132struct witness_child_list_entry;
133
134struct witness {
135	const	char *w_name;
136	struct	lock_class *w_class;
137	STAILQ_ENTRY(witness) w_list;		/* List of all witnesses. */
138	STAILQ_ENTRY(witness) w_typelist;	/* Witnesses of a type. */
139	struct	witness_child_list_entry *w_children;	/* Great evilness... */
140	const	char *w_file;
141	int	w_line;
142	u_int	w_level;
143	u_int	w_refcount;
144	u_char	w_Giant_squawked:1;
145	u_char	w_other_squawked:1;
146	u_char	w_same_squawked:1;
147	u_char	w_displayed:1;
148};
149
150struct witness_child_list_entry {
151	struct	witness_child_list_entry *wcl_next;
152	struct	witness *wcl_children[WITNESS_NCHILDREN];
153	u_int	wcl_count;
154};
155
156STAILQ_HEAD(witness_list, witness);
157
158#ifdef BLESSING
159struct witness_blessed {
160	const	char *b_lock1;
161	const	char *b_lock2;
162};
163#endif
164
165struct witness_order_list_entry {
166	const	char *w_name;
167	struct	lock_class *w_class;
168};
169
170#ifdef BLESSING
171static int	blessed(struct witness *, struct witness *);
172#endif
173static int	depart(struct witness *w);
174static struct	witness *enroll(const char *description,
175				struct lock_class *lock_class);
176static int	insertchild(struct witness *parent, struct witness *child);
177static int	isitmychild(struct witness *parent, struct witness *child);
178static int	isitmydescendant(struct witness *parent, struct witness *child);
179static int	itismychild(struct witness *parent, struct witness *child);
180static void	removechild(struct witness *parent, struct witness *child);
181static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
182static const char *fixup_filename(const char *file);
183static struct	witness *witness_get(void);
184static void	witness_free(struct witness *m);
185static struct	witness_child_list_entry *witness_child_get(void);
186static void	witness_child_free(struct witness_child_list_entry *wcl);
187static struct	lock_list_entry *witness_lock_list_get(void);
188static void	witness_lock_list_free(struct lock_list_entry *lle);
189static struct	lock_instance *find_instance(struct lock_list_entry *lock_list,
190					     struct lock_object *lock);
191static void	witness_list_lock(struct lock_instance *instance);
192#ifdef DDB
193static void	witness_leveldescendents(struct witness *parent, int level);
194static void	witness_levelall(void);
195static void	witness_displaydescendants(void(*)(const char *fmt, ...),
196					   struct witness *, int indent);
197static void	witness_display_list(void(*prnt)(const char *fmt, ...),
198				     struct witness_list *list);
199static void	witness_display(void(*)(const char *fmt, ...));
200static void	witness_list(struct thread *td);
201#endif
202
203SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
204
205/*
206 * If set to 0, witness is disabled.  If set to a non-zero value, witness
207 * performs full lock order checking for all locks.  At runtime, this
208 * value may be set to 0 to turn off witness.  witness is not allowed be
209 * turned on once it is turned off, however.
210 */
211static int witness_watch = 1;
212TUNABLE_INT("debug.witness.watch", &witness_watch);
213SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
214    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
215
216#ifdef KDB
217/*
218 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
219 * to drop into kdebug() when:
220 *	- a lock hierarchy violation occurs
221 *	- locks are held when going to sleep.
222 */
223#ifdef WITNESS_KDB
224int	witness_kdb = 1;
225#else
226int	witness_kdb = 0;
227#endif
228TUNABLE_INT("debug.witness.kdb", &witness_kdb);
229SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
230
231/*
232 * When KDB is enabled and witness_trace is set to 1, it will cause the system
233 * to print a stack trace:
234 *	- a lock hierarchy violation occurs
235 *	- locks are held when going to sleep.
236 */
237int	witness_trace = 1;
238TUNABLE_INT("debug.witness.trace", &witness_trace);
239SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
240#endif /* KDB */
241
242#ifdef WITNESS_SKIPSPIN
243int	witness_skipspin = 1;
244#else
245int	witness_skipspin = 0;
246#endif
247TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
248SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
249    &witness_skipspin, 0, "");
250
251static struct mtx w_mtx;
252static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
253static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
254static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
255static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
256static struct witness_child_list_entry *w_child_free = NULL;
257static struct lock_list_entry *w_lock_list_free = NULL;
258
259static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
260SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
261SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
262SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
263    "");
264SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
265    &w_child_free_cnt, 0, "");
266SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
267    "");
268
269static struct witness w_data[WITNESS_COUNT];
270static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
271static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
272
273static struct witness_order_list_entry order_lists[] = {
274	/*
275	 * sx locks
276	 */
277	{ "proctree", &lock_class_sx },
278	{ "allproc", &lock_class_sx },
279	{ NULL, NULL },
280	/*
281	 * Various mutexes
282	 */
283	{ "Giant", &lock_class_mtx_sleep },
284	{ "filedesc structure", &lock_class_mtx_sleep },
285	{ "pipe mutex", &lock_class_mtx_sleep },
286	{ "sigio lock", &lock_class_mtx_sleep },
287	{ "process group", &lock_class_mtx_sleep },
288	{ "process lock", &lock_class_mtx_sleep },
289	{ "session", &lock_class_mtx_sleep },
290	{ "uidinfo hash", &lock_class_mtx_sleep },
291	{ "uidinfo struct", &lock_class_mtx_sleep },
292	{ "allprison", &lock_class_mtx_sleep },
293	{ NULL, NULL },
294	/*
295	 * Sockets
296	 */
297	{ "filedesc structure", &lock_class_mtx_sleep },
298	{ "accept", &lock_class_mtx_sleep },
299	{ "so_snd", &lock_class_mtx_sleep },
300	{ "so_rcv", &lock_class_mtx_sleep },
301	{ "sellck", &lock_class_mtx_sleep },
302	{ NULL, NULL },
303	/*
304	 * Routing
305	 */
306	{ "so_rcv", &lock_class_mtx_sleep },
307	{ "radix node head", &lock_class_mtx_sleep },
308	{ "rtentry", &lock_class_mtx_sleep },
309	{ "ifaddr", &lock_class_mtx_sleep },
310	{ NULL, NULL },
311	/*
312	 * Multicast - protocol locks before interface locks, after UDP locks.
313	 */
314	{ "udpinp", &lock_class_mtx_sleep },
315	{ "in_multi_mtx", &lock_class_mtx_sleep },
316	{ "igmp_mtx", &lock_class_mtx_sleep },
317	{ "if_addr_mtx", &lock_class_mtx_sleep },
318	{ NULL, NULL },
319	/*
320	 * UNIX Domain Sockets
321	 */
322	{ "unp", &lock_class_mtx_sleep },
323	{ "so_snd", &lock_class_mtx_sleep },
324	{ NULL, NULL },
325	/*
326	 * UDP/IP
327	 */
328	{ "udp", &lock_class_mtx_sleep },
329	{ "udpinp", &lock_class_mtx_sleep },
330	{ "so_snd", &lock_class_mtx_sleep },
331	{ NULL, NULL },
332	/*
333	 * TCP/IP
334	 */
335	{ "tcp", &lock_class_mtx_sleep },
336	{ "tcpinp", &lock_class_mtx_sleep },
337	{ "so_snd", &lock_class_mtx_sleep },
338	{ NULL, NULL },
339	/*
340	 * SLIP
341	 */
342	{ "slip_mtx", &lock_class_mtx_sleep },
343	{ "slip sc_mtx", &lock_class_mtx_sleep },
344	{ NULL, NULL },
345	/*
346	 * netatalk
347	 */
348	{ "ddp_list_mtx", &lock_class_mtx_sleep },
349	{ "ddp_mtx", &lock_class_mtx_sleep },
350	{ NULL, NULL },
351	/*
352	 * BPF
353	 */
354	{ "bpf global lock", &lock_class_mtx_sleep },
355	{ "bpf interface lock", &lock_class_mtx_sleep },
356	{ "bpf cdev lock", &lock_class_mtx_sleep },
357	{ NULL, NULL },
358	/*
359	 * NFS server
360	 */
361	{ "nfsd_mtx", &lock_class_mtx_sleep },
362	{ "so_snd", &lock_class_mtx_sleep },
363	{ NULL, NULL },
364	/*
365	 * CDEV
366	 */
367	{ "system map", &lock_class_mtx_sleep },
368	{ "vm page queue mutex", &lock_class_mtx_sleep },
369	{ "vnode interlock", &lock_class_mtx_sleep },
370	{ "cdev", &lock_class_mtx_sleep },
371	{ NULL, NULL },
372	/*
373	 * spin locks
374	 */
375#ifdef SMP
376	{ "ap boot", &lock_class_mtx_spin },
377#endif
378	{ "rm.mutex_mtx", &lock_class_mtx_spin },
379	{ "hptlock", &lock_class_mtx_spin },
380	{ "sio", &lock_class_mtx_spin },
381#ifdef __i386__
382	{ "cy", &lock_class_mtx_spin },
383#endif
384	{ "scc_hwmtx", &lock_class_mtx_spin },
385	{ "uart_hwmtx", &lock_class_mtx_spin },
386	{ "zstty", &lock_class_mtx_spin },
387	{ "ng_node", &lock_class_mtx_spin },
388	{ "ng_worklist", &lock_class_mtx_spin },
389	{ "fast_taskqueue", &lock_class_mtx_spin },
390	{ "intr table", &lock_class_mtx_spin },
391	{ "sleepq chain", &lock_class_mtx_spin },
392	{ "sched lock", &lock_class_mtx_spin },
393	{ "turnstile chain", &lock_class_mtx_spin },
394	{ "td_contested", &lock_class_mtx_spin },
395	{ "callout", &lock_class_mtx_spin },
396	{ "entropy harvest mutex", &lock_class_mtx_spin },
397	{ "syscons video lock", &lock_class_mtx_spin },
398	/*
399	 * leaf locks
400	 */
401	{ "allpmaps", &lock_class_mtx_spin },
402	{ "vm page queue free mutex", &lock_class_mtx_spin },
403	{ "icu", &lock_class_mtx_spin },
404#ifdef SMP
405	{ "smp rendezvous", &lock_class_mtx_spin },
406#if defined(__i386__) || defined(__amd64__)
407	{ "tlb", &lock_class_mtx_spin },
408#endif
409#ifdef __sparc64__
410	{ "ipi", &lock_class_mtx_spin },
411	{ "rtc_mtx", &lock_class_mtx_spin },
412#endif
413#endif
414	{ "clk", &lock_class_mtx_spin },
415	{ "mutex profiling lock", &lock_class_mtx_spin },
416	{ "kse zombie lock", &lock_class_mtx_spin },
417	{ "ALD Queue", &lock_class_mtx_spin },
418#ifdef __ia64__
419	{ "MCA spin lock", &lock_class_mtx_spin },
420#endif
421#if defined(__i386__) || defined(__amd64__)
422	{ "pcicfg", &lock_class_mtx_spin },
423	{ "NDIS thread lock", &lock_class_mtx_spin },
424#endif
425	{ "tw_osl_io_lock", &lock_class_mtx_spin },
426	{ "tw_osl_q_lock", &lock_class_mtx_spin },
427	{ "tw_cl_io_lock", &lock_class_mtx_spin },
428	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
429	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
430	{ NULL, NULL },
431	{ NULL, NULL }
432};
433
434#ifdef BLESSING
435/*
436 * Pairs of locks which have been blessed
437 * Don't complain about order problems with blessed locks
438 */
439static struct witness_blessed blessed_list[] = {
440};
441static int blessed_count =
442	sizeof(blessed_list) / sizeof(struct witness_blessed);
443#endif
444
445/*
446 * List of locks initialized prior to witness being initialized whose
447 * enrollment is currently deferred.
448 */
449STAILQ_HEAD(, lock_object) pending_locks =
450    STAILQ_HEAD_INITIALIZER(pending_locks);
451
452/*
453 * This global is set to 0 once it becomes safe to use the witness code.
454 */
455static int witness_cold = 1;
456
457/*
458 * This global is set to 1 once the static lock orders have been enrolled
459 * so that a warning can be issued for any spin locks enrolled later.
460 */
461static int witness_spin_warn = 0;
462
463/*
464 * The WITNESS-enabled diagnostic code.  Note that the witness code does
465 * assume that the early boot is single-threaded at least until after this
466 * routine is completed.
467 */
468static void
469witness_initialize(void *dummy __unused)
470{
471	struct lock_object *lock;
472	struct witness_order_list_entry *order;
473	struct witness *w, *w1;
474	int i;
475
476	/*
477	 * We have to release Giant before initializing its witness
478	 * structure so that WITNESS doesn't get confused.
479	 */
480	mtx_unlock(&Giant);
481	mtx_assert(&Giant, MA_NOTOWNED);
482
483	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
484	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
485	    MTX_NOWITNESS | MTX_NOPROFILE);
486	for (i = 0; i < WITNESS_COUNT; i++)
487		witness_free(&w_data[i]);
488	for (i = 0; i < WITNESS_CHILDCOUNT; i++)
489		witness_child_free(&w_childdata[i]);
490	for (i = 0; i < LOCK_CHILDCOUNT; i++)
491		witness_lock_list_free(&w_locklistdata[i]);
492
493	/* First add in all the specified order lists. */
494	for (order = order_lists; order->w_name != NULL; order++) {
495		w = enroll(order->w_name, order->w_class);
496		if (w == NULL)
497			continue;
498		w->w_file = "order list";
499		for (order++; order->w_name != NULL; order++) {
500			w1 = enroll(order->w_name, order->w_class);
501			if (w1 == NULL)
502				continue;
503			w1->w_file = "order list";
504			if (!itismychild(w, w1))
505				panic("Not enough memory for static orders!");
506			w = w1;
507		}
508	}
509	witness_spin_warn = 1;
510
511	/* Iterate through all locks and add them to witness. */
512	while (!STAILQ_EMPTY(&pending_locks)) {
513		lock = STAILQ_FIRST(&pending_locks);
514		STAILQ_REMOVE_HEAD(&pending_locks, lo_list);
515		KASSERT(lock->lo_flags & LO_WITNESS,
516		    ("%s: lock %s is on pending list but not LO_WITNESS",
517		    __func__, lock->lo_name));
518		lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock));
519	}
520
521	/* Mark the witness code as being ready for use. */
522	witness_cold = 0;
523
524	mtx_lock(&Giant);
525}
526SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
527
528static int
529sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
530{
531	int error, value;
532
533	value = witness_watch;
534	error = sysctl_handle_int(oidp, &value, 0, req);
535	if (error != 0 || req->newptr == NULL)
536		return (error);
537	/*
538	 * XXXRW: Why a priv check here?
539	 */
540	error = priv_check(req->td, PRIV_WITNESS);
541	if (error != 0)
542		return (error);
543	if (value == witness_watch)
544		return (0);
545	if (value != 0)
546		return (EINVAL);
547	witness_watch = 0;
548	return (0);
549}
550
551void
552witness_init(struct lock_object *lock)
553{
554	struct lock_class *class;
555
556	/* Various sanity checks. */
557	class = LOCK_CLASS(lock);
558	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
559	    (class->lc_flags & LC_RECURSABLE) == 0)
560		panic("%s: lock (%s) %s can not be recursable", __func__,
561		    class->lc_name, lock->lo_name);
562	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
563	    (class->lc_flags & LC_SLEEPABLE) == 0)
564		panic("%s: lock (%s) %s can not be sleepable", __func__,
565		    class->lc_name, lock->lo_name);
566	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
567	    (class->lc_flags & LC_UPGRADABLE) == 0)
568		panic("%s: lock (%s) %s can not be upgradable", __func__,
569		    class->lc_name, lock->lo_name);
570
571	/*
572	 * If we shouldn't watch this lock, then just clear lo_witness.
573	 * Otherwise, if witness_cold is set, then it is too early to
574	 * enroll this lock, so defer it to witness_initialize() by adding
575	 * it to the pending_locks list.  If it is not too early, then enroll
576	 * the lock now.
577	 */
578	if (witness_watch == 0 || panicstr != NULL ||
579	    (lock->lo_flags & LO_WITNESS) == 0)
580		lock->lo_witness = NULL;
581	else if (witness_cold) {
582		STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list);
583		lock->lo_flags |= LO_ENROLLPEND;
584	} else
585		lock->lo_witness = enroll(lock->lo_type, class);
586}
587
588void
589witness_destroy(struct lock_object *lock)
590{
591	struct lock_class *class;
592	struct witness *w;
593
594	class = LOCK_CLASS(lock);
595	if (witness_cold)
596		panic("lock (%s) %s destroyed while witness_cold",
597		    class->lc_name, lock->lo_name);
598
599	/* XXX: need to verify that no one holds the lock */
600	if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS &&
601	    lock->lo_witness != NULL) {
602		w = lock->lo_witness;
603		mtx_lock_spin(&w_mtx);
604		MPASS(w->w_refcount > 0);
605		w->w_refcount--;
606
607		/*
608		 * Lock is already released if we have an allocation failure
609		 * and depart() fails.
610		 */
611		if (w->w_refcount != 0 || depart(w))
612			mtx_unlock_spin(&w_mtx);
613	}
614
615	/*
616	 * If this lock is destroyed before witness is up and running,
617	 * remove it from the pending list.
618	 */
619	if (lock->lo_flags & LO_ENROLLPEND) {
620		STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list);
621		lock->lo_flags &= ~LO_ENROLLPEND;
622	}
623}
624
625#ifdef DDB
626static void
627witness_levelall (void)
628{
629	struct witness_list *list;
630	struct witness *w, *w1;
631
632	/*
633	 * First clear all levels.
634	 */
635	STAILQ_FOREACH(w, &w_all, w_list) {
636		w->w_level = 0;
637	}
638
639	/*
640	 * Look for locks with no parent and level all their descendants.
641	 */
642	STAILQ_FOREACH(w, &w_all, w_list) {
643		/*
644		 * This is just an optimization, technically we could get
645		 * away just walking the all list each time.
646		 */
647		if (w->w_class->lc_flags & LC_SLEEPLOCK)
648			list = &w_sleep;
649		else
650			list = &w_spin;
651		STAILQ_FOREACH(w1, list, w_typelist) {
652			if (isitmychild(w1, w))
653				goto skip;
654		}
655		witness_leveldescendents(w, 0);
656	skip:
657		;	/* silence GCC 3.x */
658	}
659}
660
661static void
662witness_leveldescendents(struct witness *parent, int level)
663{
664	struct witness_child_list_entry *wcl;
665	int i;
666
667	if (parent->w_level < level)
668		parent->w_level = level;
669	level++;
670	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
671		for (i = 0; i < wcl->wcl_count; i++)
672			witness_leveldescendents(wcl->wcl_children[i], level);
673}
674
675static void
676witness_displaydescendants(void(*prnt)(const char *fmt, ...),
677			   struct witness *parent, int indent)
678{
679	struct witness_child_list_entry *wcl;
680	int i, level;
681
682	level = parent->w_level;
683	prnt("%-2d", level);
684	for (i = 0; i < indent; i++)
685		prnt(" ");
686	if (parent->w_refcount > 0)
687		prnt("%s", parent->w_name);
688	else
689		prnt("(dead)");
690	if (parent->w_displayed) {
691		prnt(" -- (already displayed)\n");
692		return;
693	}
694	parent->w_displayed = 1;
695	if (parent->w_refcount > 0) {
696		if (parent->w_file != NULL)
697			prnt(" -- last acquired @ %s:%d", parent->w_file,
698			    parent->w_line);
699	}
700	prnt("\n");
701	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
702		for (i = 0; i < wcl->wcl_count; i++)
703			    witness_displaydescendants(prnt,
704				wcl->wcl_children[i], indent + 1);
705}
706
707static void
708witness_display_list(void(*prnt)(const char *fmt, ...),
709		     struct witness_list *list)
710{
711	struct witness *w;
712
713	STAILQ_FOREACH(w, list, w_typelist) {
714		if (w->w_file == NULL || w->w_level > 0)
715			continue;
716		/*
717		 * This lock has no anscestors, display its descendants.
718		 */
719		witness_displaydescendants(prnt, w, 0);
720	}
721}
722
723static void
724witness_display(void(*prnt)(const char *fmt, ...))
725{
726	struct witness *w;
727
728	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
729	witness_levelall();
730
731	/* Clear all the displayed flags. */
732	STAILQ_FOREACH(w, &w_all, w_list) {
733		w->w_displayed = 0;
734	}
735
736	/*
737	 * First, handle sleep locks which have been acquired at least
738	 * once.
739	 */
740	prnt("Sleep locks:\n");
741	witness_display_list(prnt, &w_sleep);
742
743	/*
744	 * Now do spin locks which have been acquired at least once.
745	 */
746	prnt("\nSpin locks:\n");
747	witness_display_list(prnt, &w_spin);
748
749	/*
750	 * Finally, any locks which have not been acquired yet.
751	 */
752	prnt("\nLocks which were never acquired:\n");
753	STAILQ_FOREACH(w, &w_all, w_list) {
754		if (w->w_file != NULL || w->w_refcount == 0)
755			continue;
756		prnt("%s\n", w->w_name);
757	}
758}
759#endif /* DDB */
760
761/* Trim useless garbage from filenames. */
762static const char *
763fixup_filename(const char *file)
764{
765
766	if (file == NULL)
767		return (NULL);
768	while (strncmp(file, "../", 3) == 0)
769		file += 3;
770	return (file);
771}
772
773int
774witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
775{
776
777	if (witness_watch == 0 || panicstr != NULL)
778		return (0);
779
780	/* Require locks that witness knows about. */
781	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
782	    lock2->lo_witness == NULL)
783		return (EINVAL);
784
785	MPASS(!mtx_owned(&w_mtx));
786	mtx_lock_spin(&w_mtx);
787
788	/*
789	 * If we already have either an explicit or implied lock order that
790	 * is the other way around, then return an error.
791	 */
792	if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
793		mtx_unlock_spin(&w_mtx);
794		return (EDOOFUS);
795	}
796
797	/* Try to add the new order. */
798	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
799	    lock2->lo_type, lock1->lo_type);
800	if (!itismychild(lock1->lo_witness, lock2->lo_witness))
801		return (ENOMEM);
802	mtx_unlock_spin(&w_mtx);
803	return (0);
804}
805
806void
807witness_checkorder(struct lock_object *lock, int flags, const char *file,
808    int line)
809{
810	struct lock_list_entry **lock_list, *lle;
811	struct lock_instance *lock1, *lock2;
812	struct lock_class *class;
813	struct witness *w, *w1;
814	struct thread *td;
815	int i, j;
816
817	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
818	    panicstr != NULL)
819		return;
820
821	/*
822	 * Try locks do not block if they fail to acquire the lock, thus
823	 * there is no danger of deadlocks or of switching while holding a
824	 * spin lock if we acquire a lock via a try operation.  This
825	 * function shouldn't even be called for try locks, so panic if
826	 * that happens.
827	 */
828	if (flags & LOP_TRYLOCK)
829		panic("%s should not be called for try lock operations",
830		    __func__);
831
832	w = lock->lo_witness;
833	class = LOCK_CLASS(lock);
834	td = curthread;
835	file = fixup_filename(file);
836
837	if (class->lc_flags & LC_SLEEPLOCK) {
838		/*
839		 * Since spin locks include a critical section, this check
840		 * implicitly enforces a lock order of all sleep locks before
841		 * all spin locks.
842		 */
843		if (td->td_critnest != 0 && !kdb_active)
844			panic("blockable sleep lock (%s) %s @ %s:%d",
845			    class->lc_name, lock->lo_name, file, line);
846
847		/*
848		 * If this is the first lock acquired then just return as
849		 * no order checking is needed.
850		 */
851		if (td->td_sleeplocks == NULL)
852			return;
853		lock_list = &td->td_sleeplocks;
854	} else {
855		/*
856		 * If this is the first lock, just return as no order
857		 * checking is needed.  We check this in both if clauses
858		 * here as unifying the check would require us to use a
859		 * critical section to ensure we don't migrate while doing
860		 * the check.  Note that if this is not the first lock, we
861		 * are already in a critical section and are safe for the
862		 * rest of the check.
863		 */
864		if (PCPU_GET(spinlocks) == NULL)
865			return;
866		lock_list = PCPU_PTR(spinlocks);
867	}
868
869	/*
870	 * Check to see if we are recursing on a lock we already own.  If
871	 * so, make sure that we don't mismatch exclusive and shared lock
872	 * acquires.
873	 */
874	lock1 = find_instance(*lock_list, lock);
875	if (lock1 != NULL) {
876		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
877		    (flags & LOP_EXCLUSIVE) == 0) {
878			printf("shared lock of (%s) %s @ %s:%d\n",
879			    class->lc_name, lock->lo_name, file, line);
880			printf("while exclusively locked from %s:%d\n",
881			    lock1->li_file, lock1->li_line);
882			panic("share->excl");
883		}
884		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
885		    (flags & LOP_EXCLUSIVE) != 0) {
886			printf("exclusive lock of (%s) %s @ %s:%d\n",
887			    class->lc_name, lock->lo_name, file, line);
888			printf("while share locked from %s:%d\n",
889			    lock1->li_file, lock1->li_line);
890			panic("excl->share");
891		}
892		return;
893	}
894
895	/*
896	 * Try locks do not block if they fail to acquire the lock, thus
897	 * there is no danger of deadlocks or of switching while holding a
898	 * spin lock if we acquire a lock via a try operation.
899	 */
900	if (flags & LOP_TRYLOCK)
901		return;
902
903	/*
904	 * Check for duplicate locks of the same type.  Note that we only
905	 * have to check for this on the last lock we just acquired.  Any
906	 * other cases will be caught as lock order violations.
907	 */
908	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
909	w1 = lock1->li_lock->lo_witness;
910	if (w1 == w) {
911		if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
912		    (flags & LOP_DUPOK))
913			return;
914		w->w_same_squawked = 1;
915		printf("acquiring duplicate lock of same type: \"%s\"\n",
916			lock->lo_type);
917		printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
918		    lock1->li_file, lock1->li_line);
919		printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
920#ifdef KDB
921		goto debugger;
922#else
923		return;
924#endif
925	}
926	MPASS(!mtx_owned(&w_mtx));
927	mtx_lock_spin(&w_mtx);
928	/*
929	 * If we know that the the lock we are acquiring comes after
930	 * the lock we most recently acquired in the lock order tree,
931	 * then there is no need for any further checks.
932	 */
933	if (isitmychild(w1, w)) {
934		mtx_unlock_spin(&w_mtx);
935		return;
936	}
937	for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
938		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
939
940			MPASS(j < WITNESS_COUNT);
941			lock1 = &lle->ll_children[i];
942			w1 = lock1->li_lock->lo_witness;
943
944			/*
945			 * If this lock doesn't undergo witness checking,
946			 * then skip it.
947			 */
948			if (w1 == NULL) {
949				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
950				    ("lock missing witness structure"));
951				continue;
952			}
953			/*
954			 * If we are locking Giant and this is a sleepable
955			 * lock, then skip it.
956			 */
957			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
958			    lock == &Giant.mtx_object)
959				continue;
960			/*
961			 * If we are locking a sleepable lock and this lock
962			 * is Giant, then skip it.
963			 */
964			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
965			    lock1->li_lock == &Giant.mtx_object)
966				continue;
967			/*
968			 * If we are locking a sleepable lock and this lock
969			 * isn't sleepable, we want to treat it as a lock
970			 * order violation to enfore a general lock order of
971			 * sleepable locks before non-sleepable locks.
972			 */
973			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
974			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
975				goto reversal;
976			/*
977			 * If we are locking Giant and this is a non-sleepable
978			 * lock, then treat it as a reversal.
979			 */
980			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
981			    lock == &Giant.mtx_object)
982				goto reversal;
983			/*
984			 * Check the lock order hierarchy for a reveresal.
985			 */
986			if (!isitmydescendant(w, w1))
987				continue;
988		reversal:
989			/*
990			 * We have a lock order violation, check to see if it
991			 * is allowed or has already been yelled about.
992			 */
993			mtx_unlock_spin(&w_mtx);
994#ifdef BLESSING
995			/*
996			 * If the lock order is blessed, just bail.  We don't
997			 * look for other lock order violations though, which
998			 * may be a bug.
999			 */
1000			if (blessed(w, w1))
1001				return;
1002#endif
1003			if (lock1->li_lock == &Giant.mtx_object) {
1004				if (w1->w_Giant_squawked)
1005					return;
1006				else
1007					w1->w_Giant_squawked = 1;
1008			} else {
1009				if (w1->w_other_squawked)
1010					return;
1011				else
1012					w1->w_other_squawked = 1;
1013			}
1014			/*
1015			 * Ok, yell about it.
1016			 */
1017			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1018			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1019				printf(
1020		"lock order reversal: (sleepable after non-sleepable)\n");
1021			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1022			    && lock == &Giant.mtx_object)
1023				printf(
1024		"lock order reversal: (Giant after non-sleepable)\n");
1025			else
1026				printf("lock order reversal:\n");
1027			/*
1028			 * Try to locate an earlier lock with
1029			 * witness w in our list.
1030			 */
1031			do {
1032				lock2 = &lle->ll_children[i];
1033				MPASS(lock2->li_lock != NULL);
1034				if (lock2->li_lock->lo_witness == w)
1035					break;
1036				if (i == 0 && lle->ll_next != NULL) {
1037					lle = lle->ll_next;
1038					i = lle->ll_count - 1;
1039					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1040				} else
1041					i--;
1042			} while (i >= 0);
1043			if (i < 0) {
1044				printf(" 1st %p %s (%s) @ %s:%d\n",
1045				    lock1->li_lock, lock1->li_lock->lo_name,
1046				    lock1->li_lock->lo_type, lock1->li_file,
1047				    lock1->li_line);
1048				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1049				    lock->lo_name, lock->lo_type, file, line);
1050			} else {
1051				printf(" 1st %p %s (%s) @ %s:%d\n",
1052				    lock2->li_lock, lock2->li_lock->lo_name,
1053				    lock2->li_lock->lo_type, lock2->li_file,
1054				    lock2->li_line);
1055				printf(" 2nd %p %s (%s) @ %s:%d\n",
1056				    lock1->li_lock, lock1->li_lock->lo_name,
1057				    lock1->li_lock->lo_type, lock1->li_file,
1058				    lock1->li_line);
1059				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1060				    lock->lo_name, lock->lo_type, file, line);
1061			}
1062#ifdef KDB
1063			goto debugger;
1064#else
1065			return;
1066#endif
1067		}
1068	}
1069	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1070	/*
1071	 * If requested, build a new lock order.  However, don't build a new
1072	 * relationship between a sleepable lock and Giant if it is in the
1073	 * wrong direction.  The correct lock order is that sleepable locks
1074	 * always come before Giant.
1075	 */
1076	if (flags & LOP_NEWORDER &&
1077	    !(lock1->li_lock == &Giant.mtx_object &&
1078	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1079		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1080		    lock->lo_type, lock1->li_lock->lo_type);
1081		if (!itismychild(lock1->li_lock->lo_witness, w))
1082			/* Witness is dead. */
1083			return;
1084	}
1085	mtx_unlock_spin(&w_mtx);
1086	return;
1087
1088#ifdef KDB
1089debugger:
1090	if (witness_trace)
1091		kdb_backtrace();
1092	if (witness_kdb)
1093		kdb_enter(__func__);
1094#endif
1095}
1096
1097void
1098witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1099{
1100	struct lock_list_entry **lock_list, *lle;
1101	struct lock_instance *instance;
1102	struct witness *w;
1103	struct thread *td;
1104
1105	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1106	    panicstr != NULL)
1107		return;
1108	w = lock->lo_witness;
1109	td = curthread;
1110	file = fixup_filename(file);
1111
1112	/* Determine lock list for this lock. */
1113	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1114		lock_list = &td->td_sleeplocks;
1115	else
1116		lock_list = PCPU_PTR(spinlocks);
1117
1118	/* Check to see if we are recursing on a lock we already own. */
1119	instance = find_instance(*lock_list, lock);
1120	if (instance != NULL) {
1121		instance->li_flags++;
1122		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1123		    td->td_proc->p_pid, lock->lo_name,
1124		    instance->li_flags & LI_RECURSEMASK);
1125		instance->li_file = file;
1126		instance->li_line = line;
1127		return;
1128	}
1129
1130	/* Update per-witness last file and line acquire. */
1131	w->w_file = file;
1132	w->w_line = line;
1133
1134	/* Find the next open lock instance in the list and fill it. */
1135	lle = *lock_list;
1136	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1137		lle = witness_lock_list_get();
1138		if (lle == NULL)
1139			return;
1140		lle->ll_next = *lock_list;
1141		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1142		    td->td_proc->p_pid, lle);
1143		*lock_list = lle;
1144	}
1145	instance = &lle->ll_children[lle->ll_count++];
1146	instance->li_lock = lock;
1147	instance->li_line = line;
1148	instance->li_file = file;
1149	if ((flags & LOP_EXCLUSIVE) != 0)
1150		instance->li_flags = LI_EXCLUSIVE;
1151	else
1152		instance->li_flags = 0;
1153	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1154	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1155}
1156
1157void
1158witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1159{
1160	struct lock_instance *instance;
1161	struct lock_class *class;
1162
1163	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1164	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1165		return;
1166	class = LOCK_CLASS(lock);
1167	file = fixup_filename(file);
1168	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1169		panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1170		    class->lc_name, lock->lo_name, file, line);
1171	if ((flags & LOP_TRYLOCK) == 0)
1172		panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1173		    lock->lo_name, file, line);
1174	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1175		panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1176		    class->lc_name, lock->lo_name, file, line);
1177	instance = find_instance(curthread->td_sleeplocks, lock);
1178	if (instance == NULL)
1179		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1180		    class->lc_name, lock->lo_name, file, line);
1181	if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1182		panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1183		    class->lc_name, lock->lo_name, file, line);
1184	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1185		panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1186		    class->lc_name, lock->lo_name,
1187		    instance->li_flags & LI_RECURSEMASK, file, line);
1188	instance->li_flags |= LI_EXCLUSIVE;
1189}
1190
1191void
1192witness_downgrade(struct lock_object *lock, int flags, const char *file,
1193    int line)
1194{
1195	struct lock_instance *instance;
1196	struct lock_class *class;
1197
1198	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1199	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1200		return;
1201	class = LOCK_CLASS(lock);
1202	file = fixup_filename(file);
1203	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1204		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1205		    class->lc_name, lock->lo_name, file, line);
1206	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1207		panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1208		    class->lc_name, lock->lo_name, file, line);
1209	instance = find_instance(curthread->td_sleeplocks, lock);
1210	if (instance == NULL)
1211		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1212		    class->lc_name, lock->lo_name, file, line);
1213	if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1214		panic("downgrade of shared lock (%s) %s @ %s:%d",
1215		    class->lc_name, lock->lo_name, file, line);
1216	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1217		panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1218		    class->lc_name, lock->lo_name,
1219		    instance->li_flags & LI_RECURSEMASK, file, line);
1220	instance->li_flags &= ~LI_EXCLUSIVE;
1221}
1222
1223void
1224witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1225{
1226	struct lock_list_entry **lock_list, *lle;
1227	struct lock_instance *instance;
1228	struct lock_class *class;
1229	struct thread *td;
1230	register_t s;
1231	int i, j;
1232
1233	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1234	    panicstr != NULL)
1235		return;
1236	td = curthread;
1237	class = LOCK_CLASS(lock);
1238	file = fixup_filename(file);
1239
1240	/* Find lock instance associated with this lock. */
1241	if (class->lc_flags & LC_SLEEPLOCK)
1242		lock_list = &td->td_sleeplocks;
1243	else
1244		lock_list = PCPU_PTR(spinlocks);
1245	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1246		for (i = 0; i < (*lock_list)->ll_count; i++) {
1247			instance = &(*lock_list)->ll_children[i];
1248			if (instance->li_lock == lock)
1249				goto found;
1250		}
1251	panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1252	    file, line);
1253found:
1254
1255	/* First, check for shared/exclusive mismatches. */
1256	if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1257	    (flags & LOP_EXCLUSIVE) == 0) {
1258		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1259		    lock->lo_name, file, line);
1260		printf("while exclusively locked from %s:%d\n",
1261		    instance->li_file, instance->li_line);
1262		panic("excl->ushare");
1263	}
1264	if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1265	    (flags & LOP_EXCLUSIVE) != 0) {
1266		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1267		    lock->lo_name, file, line);
1268		printf("while share locked from %s:%d\n", instance->li_file,
1269		    instance->li_line);
1270		panic("share->uexcl");
1271	}
1272
1273	/* If we are recursed, unrecurse. */
1274	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1275		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1276		    td->td_proc->p_pid, instance->li_lock->lo_name,
1277		    instance->li_flags);
1278		instance->li_flags--;
1279		return;
1280	}
1281
1282	/* Otherwise, remove this item from the list. */
1283	s = intr_disable();
1284	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1285	    td->td_proc->p_pid, instance->li_lock->lo_name,
1286	    (*lock_list)->ll_count - 1);
1287	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1288		(*lock_list)->ll_children[j] =
1289		    (*lock_list)->ll_children[j + 1];
1290	(*lock_list)->ll_count--;
1291	intr_restore(s);
1292
1293	/* If this lock list entry is now empty, free it. */
1294	if ((*lock_list)->ll_count == 0) {
1295		lle = *lock_list;
1296		*lock_list = lle->ll_next;
1297		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1298		    td->td_proc->p_pid, lle);
1299		witness_lock_list_free(lle);
1300	}
1301}
1302
1303/*
1304 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1305 * exempt Giant and sleepable locks from the checks as well.  If any
1306 * non-exempt locks are held, then a supplied message is printed to the
1307 * console along with a list of the offending locks.  If indicated in the
1308 * flags then a failure results in a panic as well.
1309 */
1310int
1311witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1312{
1313	struct lock_list_entry *lle;
1314	struct lock_instance *lock1;
1315	struct thread *td;
1316	va_list ap;
1317	int i, n;
1318
1319	if (witness_cold || witness_watch == 0 || panicstr != NULL)
1320		return (0);
1321	n = 0;
1322	td = curthread;
1323	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1324		for (i = lle->ll_count - 1; i >= 0; i--) {
1325			lock1 = &lle->ll_children[i];
1326			if (lock1->li_lock == lock)
1327				continue;
1328			if (flags & WARN_GIANTOK &&
1329			    lock1->li_lock == &Giant.mtx_object)
1330				continue;
1331			if (flags & WARN_SLEEPOK &&
1332			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1333				continue;
1334			if (n == 0) {
1335				va_start(ap, fmt);
1336				vprintf(fmt, ap);
1337				va_end(ap);
1338				printf(" with the following");
1339				if (flags & WARN_SLEEPOK)
1340					printf(" non-sleepable");
1341				printf(" locks held:\n");
1342			}
1343			n++;
1344			witness_list_lock(lock1);
1345		}
1346	if (PCPU_GET(spinlocks) != NULL) {
1347		/*
1348		 * Since we already hold a spinlock preemption is
1349		 * already blocked.
1350		 */
1351		if (n == 0) {
1352			va_start(ap, fmt);
1353			vprintf(fmt, ap);
1354			va_end(ap);
1355			printf(" with the following");
1356			if (flags & WARN_SLEEPOK)
1357				printf(" non-sleepable");
1358			printf(" locks held:\n");
1359		}
1360		n += witness_list_locks(PCPU_PTR(spinlocks));
1361	}
1362	if (flags & WARN_PANIC && n)
1363		panic("witness_warn");
1364#ifdef KDB
1365	else if (witness_kdb && n)
1366		kdb_enter(__func__);
1367	else if (witness_trace && n)
1368		kdb_backtrace();
1369#endif
1370	return (n);
1371}
1372
1373const char *
1374witness_file(struct lock_object *lock)
1375{
1376	struct witness *w;
1377
1378	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1379		return ("?");
1380	w = lock->lo_witness;
1381	return (w->w_file);
1382}
1383
1384int
1385witness_line(struct lock_object *lock)
1386{
1387	struct witness *w;
1388
1389	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1390		return (0);
1391	w = lock->lo_witness;
1392	return (w->w_line);
1393}
1394
1395static struct witness *
1396enroll(const char *description, struct lock_class *lock_class)
1397{
1398	struct witness *w;
1399
1400	if (witness_watch == 0 || panicstr != NULL)
1401		return (NULL);
1402	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1403		return (NULL);
1404	mtx_lock_spin(&w_mtx);
1405	STAILQ_FOREACH(w, &w_all, w_list) {
1406		if (w->w_name == description || (w->w_refcount > 0 &&
1407		    strcmp(description, w->w_name) == 0)) {
1408			w->w_refcount++;
1409			mtx_unlock_spin(&w_mtx);
1410			if (lock_class != w->w_class)
1411				panic(
1412				"lock (%s) %s does not match earlier (%s) lock",
1413				    description, lock_class->lc_name,
1414				    w->w_class->lc_name);
1415			return (w);
1416		}
1417	}
1418	if ((w = witness_get()) == NULL)
1419		goto out;
1420	w->w_name = description;
1421	w->w_class = lock_class;
1422	w->w_refcount = 1;
1423	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1424	if (lock_class->lc_flags & LC_SPINLOCK) {
1425		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1426		w_spin_cnt++;
1427	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1428		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1429		w_sleep_cnt++;
1430	} else {
1431		mtx_unlock_spin(&w_mtx);
1432		panic("lock class %s is not sleep or spin",
1433		    lock_class->lc_name);
1434	}
1435	mtx_unlock_spin(&w_mtx);
1436out:
1437	/*
1438	 * We issue a warning for any spin locks not defined in the static
1439	 * order list as a way to discourage their use (folks should really
1440	 * be using non-spin mutexes most of the time).  However, several
1441	 * 3rd part device drivers use spin locks because that is all they
1442	 * have available on Windows and Linux and they think that normal
1443	 * mutexes are insufficient.
1444	 */
1445	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1446		printf("WITNESS: spin lock %s not in order list\n",
1447		    description);
1448	return (w);
1449}
1450
1451/* Don't let the door bang you on the way out... */
1452static int
1453depart(struct witness *w)
1454{
1455	struct witness_child_list_entry *wcl, *nwcl;
1456	struct witness_list *list;
1457	struct witness *parent;
1458
1459	MPASS(w->w_refcount == 0);
1460	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1461		list = &w_sleep;
1462		w_sleep_cnt--;
1463	} else {
1464		list = &w_spin;
1465		w_spin_cnt--;
1466	}
1467	/*
1468	 * First, we run through the entire tree looking for any
1469	 * witnesses that the outgoing witness is a child of.  For
1470	 * each parent that we find, we reparent all the direct
1471	 * children of the outgoing witness to its parent.
1472	 */
1473	STAILQ_FOREACH(parent, list, w_typelist) {
1474		if (!isitmychild(parent, w))
1475			continue;
1476		removechild(parent, w);
1477	}
1478
1479	/*
1480	 * Now we go through and free up the child list of the
1481	 * outgoing witness.
1482	 */
1483	for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1484		nwcl = wcl->wcl_next;
1485        	w_child_cnt--;
1486		witness_child_free(wcl);
1487	}
1488
1489	/*
1490	 * Detach from various lists and free.
1491	 */
1492	STAILQ_REMOVE(list, w, witness, w_typelist);
1493	STAILQ_REMOVE(&w_all, w, witness, w_list);
1494	witness_free(w);
1495
1496	return (1);
1497}
1498
1499/*
1500 * Add "child" as a direct child of "parent".  Returns false if
1501 * we fail due to out of memory.
1502 */
1503static int
1504insertchild(struct witness *parent, struct witness *child)
1505{
1506	struct witness_child_list_entry **wcl;
1507
1508	MPASS(child != NULL && parent != NULL);
1509
1510	/*
1511	 * Insert "child" after "parent"
1512	 */
1513	wcl = &parent->w_children;
1514	while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1515		wcl = &(*wcl)->wcl_next;
1516	if (*wcl == NULL) {
1517		*wcl = witness_child_get();
1518		if (*wcl == NULL)
1519			return (0);
1520        	w_child_cnt++;
1521	}
1522	(*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1523
1524	return (1);
1525}
1526
1527
1528static int
1529itismychild(struct witness *parent, struct witness *child)
1530{
1531	struct witness_list *list;
1532
1533	MPASS(child != NULL && parent != NULL);
1534	if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1535	    (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1536		panic(
1537		"%s: parent (%s) and child (%s) are not the same lock type",
1538		    __func__, parent->w_class->lc_name,
1539		    child->w_class->lc_name);
1540
1541	if (!insertchild(parent, child))
1542		return (0);
1543
1544	if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1545		list = &w_sleep;
1546	else
1547		list = &w_spin;
1548	return (1);
1549}
1550
1551static void
1552removechild(struct witness *parent, struct witness *child)
1553{
1554	struct witness_child_list_entry **wcl, *wcl1;
1555	int i;
1556
1557	for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1558		for (i = 0; i < (*wcl)->wcl_count; i++)
1559			if ((*wcl)->wcl_children[i] == child)
1560				goto found;
1561	return;
1562found:
1563	(*wcl)->wcl_count--;
1564	if ((*wcl)->wcl_count > i)
1565		(*wcl)->wcl_children[i] =
1566		    (*wcl)->wcl_children[(*wcl)->wcl_count];
1567	MPASS((*wcl)->wcl_children[i] != NULL);
1568	if ((*wcl)->wcl_count != 0)
1569		return;
1570	wcl1 = *wcl;
1571	*wcl = wcl1->wcl_next;
1572	w_child_cnt--;
1573	witness_child_free(wcl1);
1574}
1575
1576static int
1577isitmychild(struct witness *parent, struct witness *child)
1578{
1579	struct witness_child_list_entry *wcl;
1580	int i;
1581
1582	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1583		for (i = 0; i < wcl->wcl_count; i++) {
1584			if (wcl->wcl_children[i] == child)
1585				return (1);
1586		}
1587	}
1588	return (0);
1589}
1590
1591static int
1592isitmydescendant(struct witness *parent, struct witness *child)
1593{
1594	struct witness_child_list_entry *wcl;
1595	int i, j;
1596
1597	if (isitmychild(parent, child))
1598		return (1);
1599	j = 0;
1600	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1601		MPASS(j < 1000);
1602		for (i = 0; i < wcl->wcl_count; i++) {
1603			if (isitmydescendant(wcl->wcl_children[i], child))
1604				return (1);
1605		}
1606		j++;
1607	}
1608	return (0);
1609}
1610
1611#ifdef BLESSING
1612static int
1613blessed(struct witness *w1, struct witness *w2)
1614{
1615	int i;
1616	struct witness_blessed *b;
1617
1618	for (i = 0; i < blessed_count; i++) {
1619		b = &blessed_list[i];
1620		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1621			if (strcmp(w2->w_name, b->b_lock2) == 0)
1622				return (1);
1623			continue;
1624		}
1625		if (strcmp(w1->w_name, b->b_lock2) == 0)
1626			if (strcmp(w2->w_name, b->b_lock1) == 0)
1627				return (1);
1628	}
1629	return (0);
1630}
1631#endif
1632
1633static struct witness *
1634witness_get(void)
1635{
1636	struct witness *w;
1637
1638	if (witness_watch == 0) {
1639		mtx_unlock_spin(&w_mtx);
1640		return (NULL);
1641	}
1642	if (STAILQ_EMPTY(&w_free)) {
1643		witness_watch = 0;
1644		mtx_unlock_spin(&w_mtx);
1645		printf("%s: witness exhausted\n", __func__);
1646		return (NULL);
1647	}
1648	w = STAILQ_FIRST(&w_free);
1649	STAILQ_REMOVE_HEAD(&w_free, w_list);
1650	w_free_cnt--;
1651	bzero(w, sizeof(*w));
1652	return (w);
1653}
1654
1655static void
1656witness_free(struct witness *w)
1657{
1658
1659	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1660	w_free_cnt++;
1661}
1662
1663static struct witness_child_list_entry *
1664witness_child_get(void)
1665{
1666	struct witness_child_list_entry *wcl;
1667
1668	if (witness_watch == 0) {
1669		mtx_unlock_spin(&w_mtx);
1670		return (NULL);
1671	}
1672	wcl = w_child_free;
1673	if (wcl == NULL) {
1674		witness_watch = 0;
1675		mtx_unlock_spin(&w_mtx);
1676		printf("%s: witness exhausted\n", __func__);
1677		return (NULL);
1678	}
1679	w_child_free = wcl->wcl_next;
1680	w_child_free_cnt--;
1681	bzero(wcl, sizeof(*wcl));
1682	return (wcl);
1683}
1684
1685static void
1686witness_child_free(struct witness_child_list_entry *wcl)
1687{
1688
1689	wcl->wcl_next = w_child_free;
1690	w_child_free = wcl;
1691	w_child_free_cnt++;
1692}
1693
1694static struct lock_list_entry *
1695witness_lock_list_get(void)
1696{
1697	struct lock_list_entry *lle;
1698
1699	if (witness_watch == 0)
1700		return (NULL);
1701	mtx_lock_spin(&w_mtx);
1702	lle = w_lock_list_free;
1703	if (lle == NULL) {
1704		witness_watch = 0;
1705		mtx_unlock_spin(&w_mtx);
1706		printf("%s: witness exhausted\n", __func__);
1707		return (NULL);
1708	}
1709	w_lock_list_free = lle->ll_next;
1710	mtx_unlock_spin(&w_mtx);
1711	bzero(lle, sizeof(*lle));
1712	return (lle);
1713}
1714
1715static void
1716witness_lock_list_free(struct lock_list_entry *lle)
1717{
1718
1719	mtx_lock_spin(&w_mtx);
1720	lle->ll_next = w_lock_list_free;
1721	w_lock_list_free = lle;
1722	mtx_unlock_spin(&w_mtx);
1723}
1724
1725static struct lock_instance *
1726find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1727{
1728	struct lock_list_entry *lle;
1729	struct lock_instance *instance;
1730	int i;
1731
1732	for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1733		for (i = lle->ll_count - 1; i >= 0; i--) {
1734			instance = &lle->ll_children[i];
1735			if (instance->li_lock == lock)
1736				return (instance);
1737		}
1738	return (NULL);
1739}
1740
1741static void
1742witness_list_lock(struct lock_instance *instance)
1743{
1744	struct lock_object *lock;
1745
1746	lock = instance->li_lock;
1747	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1748	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1749	if (lock->lo_type != lock->lo_name)
1750		printf(" (%s)", lock->lo_type);
1751	printf(" r = %d (%p) locked @ %s:%d\n",
1752	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1753	    instance->li_line);
1754}
1755
1756#ifdef DDB
1757static int
1758witness_thread_has_locks(struct thread *td)
1759{
1760
1761	return (td->td_sleeplocks != NULL);
1762}
1763
1764static int
1765witness_proc_has_locks(struct proc *p)
1766{
1767	struct thread *td;
1768
1769	FOREACH_THREAD_IN_PROC(p, td) {
1770		if (witness_thread_has_locks(td))
1771			return (1);
1772	}
1773	return (0);
1774}
1775#endif
1776
1777int
1778witness_list_locks(struct lock_list_entry **lock_list)
1779{
1780	struct lock_list_entry *lle;
1781	int i, nheld;
1782
1783	nheld = 0;
1784	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1785		for (i = lle->ll_count - 1; i >= 0; i--) {
1786			witness_list_lock(&lle->ll_children[i]);
1787			nheld++;
1788		}
1789	return (nheld);
1790}
1791
1792/*
1793 * This is a bit risky at best.  We call this function when we have timed
1794 * out acquiring a spin lock, and we assume that the other CPU is stuck
1795 * with this lock held.  So, we go groveling around in the other CPU's
1796 * per-cpu data to try to find the lock instance for this spin lock to
1797 * see when it was last acquired.
1798 */
1799void
1800witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1801{
1802	struct lock_instance *instance;
1803	struct pcpu *pc;
1804
1805	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1806		return;
1807	pc = pcpu_find(owner->td_oncpu);
1808	instance = find_instance(pc->pc_spinlocks, lock);
1809	if (instance != NULL)
1810		witness_list_lock(instance);
1811}
1812
1813void
1814witness_save(struct lock_object *lock, const char **filep, int *linep)
1815{
1816	struct lock_list_entry *lock_list;
1817	struct lock_instance *instance;
1818	struct lock_class *class;
1819
1820	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1821	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1822		return;
1823	class = LOCK_CLASS(lock);
1824	if (class->lc_flags & LC_SLEEPLOCK)
1825		lock_list = curthread->td_sleeplocks;
1826	else {
1827		if (witness_skipspin)
1828			return;
1829		lock_list = PCPU_GET(spinlocks);
1830	}
1831	instance = find_instance(lock_list, lock);
1832	if (instance == NULL)
1833		panic("%s: lock (%s) %s not locked", __func__,
1834		    class->lc_name, lock->lo_name);
1835	*filep = instance->li_file;
1836	*linep = instance->li_line;
1837}
1838
1839void
1840witness_restore(struct lock_object *lock, const char *file, int line)
1841{
1842	struct lock_list_entry *lock_list;
1843	struct lock_instance *instance;
1844	struct lock_class *class;
1845
1846	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1847	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1848		return;
1849	class = LOCK_CLASS(lock);
1850	if (class->lc_flags & LC_SLEEPLOCK)
1851		lock_list = curthread->td_sleeplocks;
1852	else {
1853		if (witness_skipspin)
1854			return;
1855		lock_list = PCPU_GET(spinlocks);
1856	}
1857	instance = find_instance(lock_list, lock);
1858	if (instance == NULL)
1859		panic("%s: lock (%s) %s not locked", __func__,
1860		    class->lc_name, lock->lo_name);
1861	lock->lo_witness->w_file = file;
1862	lock->lo_witness->w_line = line;
1863	instance->li_file = file;
1864	instance->li_line = line;
1865}
1866
1867void
1868witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1869{
1870#ifdef INVARIANT_SUPPORT
1871	struct lock_instance *instance;
1872	struct lock_class *class;
1873
1874	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1875		return;
1876	class = LOCK_CLASS(lock);
1877	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1878		instance = find_instance(curthread->td_sleeplocks, lock);
1879	else if ((class->lc_flags & LC_SPINLOCK) != 0)
1880		instance = find_instance(PCPU_GET(spinlocks), lock);
1881	else {
1882		panic("Lock (%s) %s is not sleep or spin!",
1883		    class->lc_name, lock->lo_name);
1884	}
1885	file = fixup_filename(file);
1886	switch (flags) {
1887	case LA_UNLOCKED:
1888		if (instance != NULL)
1889			panic("Lock (%s) %s locked @ %s:%d.",
1890			    class->lc_name, lock->lo_name, file, line);
1891		break;
1892	case LA_LOCKED:
1893	case LA_LOCKED | LA_RECURSED:
1894	case LA_LOCKED | LA_NOTRECURSED:
1895	case LA_SLOCKED:
1896	case LA_SLOCKED | LA_RECURSED:
1897	case LA_SLOCKED | LA_NOTRECURSED:
1898	case LA_XLOCKED:
1899	case LA_XLOCKED | LA_RECURSED:
1900	case LA_XLOCKED | LA_NOTRECURSED:
1901		if (instance == NULL) {
1902			panic("Lock (%s) %s not locked @ %s:%d.",
1903			    class->lc_name, lock->lo_name, file, line);
1904			break;
1905		}
1906		if ((flags & LA_XLOCKED) != 0 &&
1907		    (instance->li_flags & LI_EXCLUSIVE) == 0)
1908			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1909			    class->lc_name, lock->lo_name, file, line);
1910		if ((flags & LA_SLOCKED) != 0 &&
1911		    (instance->li_flags & LI_EXCLUSIVE) != 0)
1912			panic("Lock (%s) %s exclusively locked @ %s:%d.",
1913			    class->lc_name, lock->lo_name, file, line);
1914		if ((flags & LA_RECURSED) != 0 &&
1915		    (instance->li_flags & LI_RECURSEMASK) == 0)
1916			panic("Lock (%s) %s not recursed @ %s:%d.",
1917			    class->lc_name, lock->lo_name, file, line);
1918		if ((flags & LA_NOTRECURSED) != 0 &&
1919		    (instance->li_flags & LI_RECURSEMASK) != 0)
1920			panic("Lock (%s) %s recursed @ %s:%d.",
1921			    class->lc_name, lock->lo_name, file, line);
1922		break;
1923	default:
1924		panic("Invalid lock assertion at %s:%d.", file, line);
1925
1926	}
1927#endif	/* INVARIANT_SUPPORT */
1928}
1929
1930#ifdef DDB
1931static void
1932witness_list(struct thread *td)
1933{
1934
1935	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1936	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1937
1938	if (witness_watch == 0)
1939		return;
1940
1941	witness_list_locks(&td->td_sleeplocks);
1942
1943	/*
1944	 * We only handle spinlocks if td == curthread.  This is somewhat broken
1945	 * if td is currently executing on some other CPU and holds spin locks
1946	 * as we won't display those locks.  If we had a MI way of getting
1947	 * the per-cpu data for a given cpu then we could use
1948	 * td->td_oncpu to get the list of spinlocks for this thread
1949	 * and "fix" this.
1950	 *
1951	 * That still wouldn't really fix this unless we locked sched_lock
1952	 * or stopped the other CPU to make sure it wasn't changing the list
1953	 * out from under us.  It is probably best to just not try to handle
1954	 * threads on other CPU's for now.
1955	 */
1956	if (td == curthread && PCPU_GET(spinlocks) != NULL)
1957		witness_list_locks(PCPU_PTR(spinlocks));
1958}
1959
1960DB_SHOW_COMMAND(locks, db_witness_list)
1961{
1962	struct thread *td;
1963
1964	if (have_addr)
1965		td = db_lookup_thread(addr, TRUE);
1966	else
1967		td = kdb_thread;
1968	witness_list(td);
1969}
1970
1971DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1972{
1973	struct thread *td;
1974	struct proc *p;
1975
1976	/*
1977	 * It would be nice to list only threads and processes that actually
1978	 * held sleep locks, but that information is currently not exported
1979	 * by WITNESS.
1980	 */
1981	FOREACH_PROC_IN_SYSTEM(p) {
1982		if (!witness_proc_has_locks(p))
1983			continue;
1984		FOREACH_THREAD_IN_PROC(p, td) {
1985			if (!witness_thread_has_locks(td))
1986				continue;
1987			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
1988			    p->p_comm, td, td->td_tid);
1989			witness_list(td);
1990		}
1991	}
1992}
1993
1994DB_SHOW_COMMAND(witness, db_witness_display)
1995{
1996
1997	witness_display(db_printf);
1998}
1999#endif
2000