subr_witness.c revision 166857
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Implementation of the `witness' lock verifier.  Originally implemented for
34 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38/*
39 *	Main Entry: witness
40 *	Pronunciation: 'wit-n&s
41 *	Function: noun
42 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43 *	    testimony, witness, from 2wit
44 *	Date: before 12th century
45 *	1 : attestation of a fact or event : TESTIMONY
46 *	2 : one that gives evidence; specifically : one who testifies in
47 *	    a cause or before a judicial tribunal
48 *	3 : one asked to be present at a transaction so as to be able to
49 *	    testify to its having taken place
50 *	4 : one who has personal knowledge of something
51 *	5 a : something serving as evidence or proof : SIGN
52 *	  b : public affirmation by word or example of usually
53 *	      religious faith or conviction <the heroic witness to divine
54 *	      life -- Pilot>
55 *	6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58/*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes.  Stated another way,
62 *    no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl().  Basically, when a thread sleeps, it must release
68 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious.  Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock.  The second
76 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute.  Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 166857 2007-02-20 23:49:31Z rwatson $");
86
87#include "opt_ddb.h"
88#include "opt_witness.h"
89
90#include <sys/param.h>
91#include <sys/bus.h>
92#include <sys/kdb.h>
93#include <sys/kernel.h>
94#include <sys/ktr.h>
95#include <sys/lock.h>
96#include <sys/malloc.h>
97#include <sys/mutex.h>
98#include <sys/priv.h>
99#include <sys/proc.h>
100#include <sys/sysctl.h>
101#include <sys/systm.h>
102
103#include <ddb/ddb.h>
104
105#include <machine/stdarg.h>
106
107/* Note that these traces do not work with KTR_ALQ. */
108#if 0
109#define	KTR_WITNESS	KTR_SUBSYS
110#else
111#define	KTR_WITNESS	0
112#endif
113
114/* Easier to stay with the old names. */
115#define	lo_list		lo_witness_data.lod_list
116#define	lo_witness	lo_witness_data.lod_witness
117
118/* Define this to check for blessed mutexes */
119#undef BLESSING
120
121#define WITNESS_COUNT 1024
122#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
123/*
124 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
125 * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
126 * probably be safe for the most part, but it's still a SWAG.
127 */
128#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
129
130#define	WITNESS_NCHILDREN 6
131
132struct witness_child_list_entry;
133
134struct witness {
135	const	char *w_name;
136	struct	lock_class *w_class;
137	STAILQ_ENTRY(witness) w_list;		/* List of all witnesses. */
138	STAILQ_ENTRY(witness) w_typelist;	/* Witnesses of a type. */
139	struct	witness_child_list_entry *w_children;	/* Great evilness... */
140	const	char *w_file;
141	int	w_line;
142	u_int	w_level;
143	u_int	w_refcount;
144	u_char	w_Giant_squawked:1;
145	u_char	w_other_squawked:1;
146	u_char	w_same_squawked:1;
147	u_char	w_displayed:1;
148};
149
150struct witness_child_list_entry {
151	struct	witness_child_list_entry *wcl_next;
152	struct	witness *wcl_children[WITNESS_NCHILDREN];
153	u_int	wcl_count;
154};
155
156STAILQ_HEAD(witness_list, witness);
157
158#ifdef BLESSING
159struct witness_blessed {
160	const	char *b_lock1;
161	const	char *b_lock2;
162};
163#endif
164
165struct witness_order_list_entry {
166	const	char *w_name;
167	struct	lock_class *w_class;
168};
169
170#ifdef BLESSING
171static int	blessed(struct witness *, struct witness *);
172#endif
173static int	depart(struct witness *w);
174static struct	witness *enroll(const char *description,
175				struct lock_class *lock_class);
176static int	insertchild(struct witness *parent, struct witness *child);
177static int	isitmychild(struct witness *parent, struct witness *child);
178static int	isitmydescendant(struct witness *parent, struct witness *child);
179static int	itismychild(struct witness *parent, struct witness *child);
180static void	removechild(struct witness *parent, struct witness *child);
181static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
182static const char *fixup_filename(const char *file);
183static struct	witness *witness_get(void);
184static void	witness_free(struct witness *m);
185static struct	witness_child_list_entry *witness_child_get(void);
186static void	witness_child_free(struct witness_child_list_entry *wcl);
187static struct	lock_list_entry *witness_lock_list_get(void);
188static void	witness_lock_list_free(struct lock_list_entry *lle);
189static struct	lock_instance *find_instance(struct lock_list_entry *lock_list,
190					     struct lock_object *lock);
191static void	witness_list_lock(struct lock_instance *instance);
192#ifdef DDB
193static void	witness_leveldescendents(struct witness *parent, int level);
194static void	witness_levelall(void);
195static void	witness_displaydescendants(void(*)(const char *fmt, ...),
196					   struct witness *, int indent);
197static void	witness_display_list(void(*prnt)(const char *fmt, ...),
198				     struct witness_list *list);
199static void	witness_display(void(*)(const char *fmt, ...));
200static void	witness_list(struct thread *td);
201#endif
202
203SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
204
205/*
206 * If set to 0, witness is disabled.  If set to a non-zero value, witness
207 * performs full lock order checking for all locks.  At runtime, this
208 * value may be set to 0 to turn off witness.  witness is not allowed be
209 * turned on once it is turned off, however.
210 */
211static int witness_watch = 1;
212TUNABLE_INT("debug.witness.watch", &witness_watch);
213SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
214    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
215
216#ifdef KDB
217/*
218 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
219 * to drop into kdebug() when:
220 *	- a lock hierarchy violation occurs
221 *	- locks are held when going to sleep.
222 */
223#ifdef WITNESS_KDB
224int	witness_kdb = 1;
225#else
226int	witness_kdb = 0;
227#endif
228TUNABLE_INT("debug.witness.kdb", &witness_kdb);
229SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
230
231/*
232 * When KDB is enabled and witness_trace is set to 1, it will cause the system
233 * to print a stack trace:
234 *	- a lock hierarchy violation occurs
235 *	- locks are held when going to sleep.
236 */
237int	witness_trace = 1;
238TUNABLE_INT("debug.witness.trace", &witness_trace);
239SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
240#endif /* KDB */
241
242#ifdef WITNESS_SKIPSPIN
243int	witness_skipspin = 1;
244#else
245int	witness_skipspin = 0;
246#endif
247TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
248SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
249    &witness_skipspin, 0, "");
250
251static struct mtx w_mtx;
252static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
253static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
254static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
255static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
256static struct witness_child_list_entry *w_child_free = NULL;
257static struct lock_list_entry *w_lock_list_free = NULL;
258
259static int w_free_cnt, w_spin_cnt, w_sleep_cnt, w_child_free_cnt, w_child_cnt;
260SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
261SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
262SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
263    "");
264SYSCTL_INT(_debug_witness, OID_AUTO, child_free_cnt, CTLFLAG_RD,
265    &w_child_free_cnt, 0, "");
266SYSCTL_INT(_debug_witness, OID_AUTO, child_cnt, CTLFLAG_RD, &w_child_cnt, 0,
267    "");
268
269static struct witness w_data[WITNESS_COUNT];
270static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
271static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
272
273static struct witness_order_list_entry order_lists[] = {
274	/*
275	 * sx locks
276	 */
277	{ "proctree", &lock_class_sx },
278	{ "allproc", &lock_class_sx },
279	{ NULL, NULL },
280	/*
281	 * Various mutexes
282	 */
283	{ "Giant", &lock_class_mtx_sleep },
284	{ "filedesc structure", &lock_class_mtx_sleep },
285	{ "pipe mutex", &lock_class_mtx_sleep },
286	{ "sigio lock", &lock_class_mtx_sleep },
287	{ "process group", &lock_class_mtx_sleep },
288	{ "process lock", &lock_class_mtx_sleep },
289	{ "session", &lock_class_mtx_sleep },
290	{ "uidinfo hash", &lock_class_mtx_sleep },
291	{ "uidinfo struct", &lock_class_mtx_sleep },
292	{ "allprison", &lock_class_mtx_sleep },
293	{ NULL, NULL },
294	/*
295	 * Sockets
296	 */
297	{ "filedesc structure", &lock_class_mtx_sleep },
298	{ "accept", &lock_class_mtx_sleep },
299	{ "so_snd", &lock_class_mtx_sleep },
300	{ "so_rcv", &lock_class_mtx_sleep },
301	{ "sellck", &lock_class_mtx_sleep },
302	{ NULL, NULL },
303	/*
304	 * Routing
305	 */
306	{ "so_rcv", &lock_class_mtx_sleep },
307	{ "radix node head", &lock_class_mtx_sleep },
308	{ "rtentry", &lock_class_mtx_sleep },
309	{ "ifaddr", &lock_class_mtx_sleep },
310	{ NULL, NULL },
311	/*
312	 * Multicast - protocol locks before interface locks, after UDP locks.
313	 */
314	{ "udpinp", &lock_class_mtx_sleep },
315	{ "in_multi_mtx", &lock_class_mtx_sleep },
316	{ "igmp_mtx", &lock_class_mtx_sleep },
317	{ "if_addr_mtx", &lock_class_mtx_sleep },
318	{ NULL, NULL },
319	/*
320	 * UNIX Domain Sockets
321	 */
322	{ "unp", &lock_class_mtx_sleep },
323	{ "so_snd", &lock_class_mtx_sleep },
324	{ NULL, NULL },
325	/*
326	 * UDP/IP
327	 */
328	{ "udp", &lock_class_mtx_sleep },
329	{ "udpinp", &lock_class_mtx_sleep },
330	{ "so_snd", &lock_class_mtx_sleep },
331	{ NULL, NULL },
332	/*
333	 * TCP/IP
334	 */
335	{ "tcp", &lock_class_mtx_sleep },
336	{ "tcpinp", &lock_class_mtx_sleep },
337	{ "so_snd", &lock_class_mtx_sleep },
338	{ NULL, NULL },
339	/*
340	 * SLIP
341	 */
342	{ "slip_mtx", &lock_class_mtx_sleep },
343	{ "slip sc_mtx", &lock_class_mtx_sleep },
344	{ NULL, NULL },
345	/*
346	 * netatalk
347	 */
348	{ "ddp_list_mtx", &lock_class_mtx_sleep },
349	{ "ddp_mtx", &lock_class_mtx_sleep },
350	{ NULL, NULL },
351	/*
352	 * BPF
353	 */
354	{ "bpf global lock", &lock_class_mtx_sleep },
355	{ "bpf interface lock", &lock_class_mtx_sleep },
356	{ "bpf cdev lock", &lock_class_mtx_sleep },
357	{ NULL, NULL },
358	/*
359	 * NFS server
360	 */
361	{ "nfsd_mtx", &lock_class_mtx_sleep },
362	{ "so_snd", &lock_class_mtx_sleep },
363	{ NULL, NULL },
364	/*
365	 * CDEV
366	 */
367	{ "system map", &lock_class_mtx_sleep },
368	{ "vm page queue mutex", &lock_class_mtx_sleep },
369	{ "vnode interlock", &lock_class_mtx_sleep },
370	{ "cdev", &lock_class_mtx_sleep },
371	{ NULL, NULL },
372	/*
373	 * kqueue/VFS interaction
374	 */
375	{ "kqueue", &lock_class_mtx_sleep },
376	{ "struct mount mtx", &lock_class_mtx_sleep },
377	{ "vnode interlock", &lock_class_mtx_sleep },
378	{ NULL, NULL },
379	/*
380	 * spin locks
381	 */
382#ifdef SMP
383	{ "ap boot", &lock_class_mtx_spin },
384#endif
385	{ "rm.mutex_mtx", &lock_class_mtx_spin },
386	{ "sio", &lock_class_mtx_spin },
387#ifdef __i386__
388	{ "cy", &lock_class_mtx_spin },
389#endif
390	{ "scc_hwmtx", &lock_class_mtx_spin },
391	{ "uart_hwmtx", &lock_class_mtx_spin },
392	{ "zstty", &lock_class_mtx_spin },
393	{ "ng_node", &lock_class_mtx_spin },
394	{ "ng_worklist", &lock_class_mtx_spin },
395	{ "fast_taskqueue", &lock_class_mtx_spin },
396	{ "intr table", &lock_class_mtx_spin },
397	{ "sleepq chain", &lock_class_mtx_spin },
398	{ "sched lock", &lock_class_mtx_spin },
399	{ "turnstile chain", &lock_class_mtx_spin },
400	{ "td_contested", &lock_class_mtx_spin },
401	{ "callout", &lock_class_mtx_spin },
402	{ "entropy harvest mutex", &lock_class_mtx_spin },
403	{ "syscons video lock", &lock_class_mtx_spin },
404	/*
405	 * leaf locks
406	 */
407	{ "allpmaps", &lock_class_mtx_spin },
408	{ "icu", &lock_class_mtx_spin },
409#ifdef SMP
410	{ "smp rendezvous", &lock_class_mtx_spin },
411#if defined(__i386__) || defined(__amd64__)
412	{ "tlb", &lock_class_mtx_spin },
413#endif
414#ifdef __sparc64__
415	{ "ipi", &lock_class_mtx_spin },
416	{ "rtc_mtx", &lock_class_mtx_spin },
417#endif
418#endif
419	{ "clk", &lock_class_mtx_spin },
420	{ "mutex profiling lock", &lock_class_mtx_spin },
421	{ "kse zombie lock", &lock_class_mtx_spin },
422	{ "ALD Queue", &lock_class_mtx_spin },
423#ifdef __ia64__
424	{ "MCA spin lock", &lock_class_mtx_spin },
425#endif
426#if defined(__i386__) || defined(__amd64__)
427	{ "pcicfg", &lock_class_mtx_spin },
428	{ "NDIS thread lock", &lock_class_mtx_spin },
429#endif
430	{ "tw_osl_io_lock", &lock_class_mtx_spin },
431	{ "tw_osl_q_lock", &lock_class_mtx_spin },
432	{ "tw_cl_io_lock", &lock_class_mtx_spin },
433	{ "tw_cl_intr_lock", &lock_class_mtx_spin },
434	{ "tw_cl_gen_lock", &lock_class_mtx_spin },
435	{ NULL, NULL },
436	{ NULL, NULL }
437};
438
439#ifdef BLESSING
440/*
441 * Pairs of locks which have been blessed
442 * Don't complain about order problems with blessed locks
443 */
444static struct witness_blessed blessed_list[] = {
445};
446static int blessed_count =
447	sizeof(blessed_list) / sizeof(struct witness_blessed);
448#endif
449
450/*
451 * List of locks initialized prior to witness being initialized whose
452 * enrollment is currently deferred.
453 */
454STAILQ_HEAD(, lock_object) pending_locks =
455    STAILQ_HEAD_INITIALIZER(pending_locks);
456
457/*
458 * This global is set to 0 once it becomes safe to use the witness code.
459 */
460static int witness_cold = 1;
461
462/*
463 * This global is set to 1 once the static lock orders have been enrolled
464 * so that a warning can be issued for any spin locks enrolled later.
465 */
466static int witness_spin_warn = 0;
467
468/*
469 * The WITNESS-enabled diagnostic code.  Note that the witness code does
470 * assume that the early boot is single-threaded at least until after this
471 * routine is completed.
472 */
473static void
474witness_initialize(void *dummy __unused)
475{
476	struct lock_object *lock;
477	struct witness_order_list_entry *order;
478	struct witness *w, *w1;
479	int i;
480
481	/*
482	 * We have to release Giant before initializing its witness
483	 * structure so that WITNESS doesn't get confused.
484	 */
485	mtx_unlock(&Giant);
486	mtx_assert(&Giant, MA_NOTOWNED);
487
488	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
489	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
490	    MTX_NOWITNESS | MTX_NOPROFILE);
491	for (i = 0; i < WITNESS_COUNT; i++)
492		witness_free(&w_data[i]);
493	for (i = 0; i < WITNESS_CHILDCOUNT; i++)
494		witness_child_free(&w_childdata[i]);
495	for (i = 0; i < LOCK_CHILDCOUNT; i++)
496		witness_lock_list_free(&w_locklistdata[i]);
497
498	/* First add in all the specified order lists. */
499	for (order = order_lists; order->w_name != NULL; order++) {
500		w = enroll(order->w_name, order->w_class);
501		if (w == NULL)
502			continue;
503		w->w_file = "order list";
504		for (order++; order->w_name != NULL; order++) {
505			w1 = enroll(order->w_name, order->w_class);
506			if (w1 == NULL)
507				continue;
508			w1->w_file = "order list";
509			if (!itismychild(w, w1))
510				panic("Not enough memory for static orders!");
511			w = w1;
512		}
513	}
514	witness_spin_warn = 1;
515
516	/* Iterate through all locks and add them to witness. */
517	while (!STAILQ_EMPTY(&pending_locks)) {
518		lock = STAILQ_FIRST(&pending_locks);
519		STAILQ_REMOVE_HEAD(&pending_locks, lo_list);
520		KASSERT(lock->lo_flags & LO_WITNESS,
521		    ("%s: lock %s is on pending list but not LO_WITNESS",
522		    __func__, lock->lo_name));
523		lock->lo_witness = enroll(lock->lo_type, LOCK_CLASS(lock));
524	}
525
526	/* Mark the witness code as being ready for use. */
527	witness_cold = 0;
528
529	mtx_lock(&Giant);
530}
531SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
532
533static int
534sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
535{
536	int error, value;
537
538	value = witness_watch;
539	error = sysctl_handle_int(oidp, &value, 0, req);
540	if (error != 0 || req->newptr == NULL)
541		return (error);
542	if (value == witness_watch)
543		return (0);
544	if (value != 0)
545		return (EINVAL);
546	witness_watch = 0;
547	return (0);
548}
549
550void
551witness_init(struct lock_object *lock)
552{
553	struct lock_class *class;
554
555	/* Various sanity checks. */
556	class = LOCK_CLASS(lock);
557	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
558	    (class->lc_flags & LC_RECURSABLE) == 0)
559		panic("%s: lock (%s) %s can not be recursable", __func__,
560		    class->lc_name, lock->lo_name);
561	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
562	    (class->lc_flags & LC_SLEEPABLE) == 0)
563		panic("%s: lock (%s) %s can not be sleepable", __func__,
564		    class->lc_name, lock->lo_name);
565	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
566	    (class->lc_flags & LC_UPGRADABLE) == 0)
567		panic("%s: lock (%s) %s can not be upgradable", __func__,
568		    class->lc_name, lock->lo_name);
569
570	/*
571	 * If we shouldn't watch this lock, then just clear lo_witness.
572	 * Otherwise, if witness_cold is set, then it is too early to
573	 * enroll this lock, so defer it to witness_initialize() by adding
574	 * it to the pending_locks list.  If it is not too early, then enroll
575	 * the lock now.
576	 */
577	if (witness_watch == 0 || panicstr != NULL ||
578	    (lock->lo_flags & LO_WITNESS) == 0)
579		lock->lo_witness = NULL;
580	else if (witness_cold) {
581		STAILQ_INSERT_TAIL(&pending_locks, lock, lo_list);
582		lock->lo_flags |= LO_ENROLLPEND;
583	} else
584		lock->lo_witness = enroll(lock->lo_type, class);
585}
586
587void
588witness_destroy(struct lock_object *lock)
589{
590	struct lock_class *class;
591	struct witness *w;
592
593	class = LOCK_CLASS(lock);
594	if (witness_cold)
595		panic("lock (%s) %s destroyed while witness_cold",
596		    class->lc_name, lock->lo_name);
597
598	/* XXX: need to verify that no one holds the lock */
599	if ((lock->lo_flags & (LO_WITNESS | LO_ENROLLPEND)) == LO_WITNESS &&
600	    lock->lo_witness != NULL) {
601		w = lock->lo_witness;
602		mtx_lock_spin(&w_mtx);
603		MPASS(w->w_refcount > 0);
604		w->w_refcount--;
605
606		/*
607		 * Lock is already released if we have an allocation failure
608		 * and depart() fails.
609		 */
610		if (w->w_refcount != 0 || depart(w))
611			mtx_unlock_spin(&w_mtx);
612	}
613
614	/*
615	 * If this lock is destroyed before witness is up and running,
616	 * remove it from the pending list.
617	 */
618	if (lock->lo_flags & LO_ENROLLPEND) {
619		STAILQ_REMOVE(&pending_locks, lock, lock_object, lo_list);
620		lock->lo_flags &= ~LO_ENROLLPEND;
621	}
622}
623
624#ifdef DDB
625static void
626witness_levelall (void)
627{
628	struct witness_list *list;
629	struct witness *w, *w1;
630
631	/*
632	 * First clear all levels.
633	 */
634	STAILQ_FOREACH(w, &w_all, w_list) {
635		w->w_level = 0;
636	}
637
638	/*
639	 * Look for locks with no parent and level all their descendants.
640	 */
641	STAILQ_FOREACH(w, &w_all, w_list) {
642		/*
643		 * This is just an optimization, technically we could get
644		 * away just walking the all list each time.
645		 */
646		if (w->w_class->lc_flags & LC_SLEEPLOCK)
647			list = &w_sleep;
648		else
649			list = &w_spin;
650		STAILQ_FOREACH(w1, list, w_typelist) {
651			if (isitmychild(w1, w))
652				goto skip;
653		}
654		witness_leveldescendents(w, 0);
655	skip:
656		;	/* silence GCC 3.x */
657	}
658}
659
660static void
661witness_leveldescendents(struct witness *parent, int level)
662{
663	struct witness_child_list_entry *wcl;
664	int i;
665
666	if (parent->w_level < level)
667		parent->w_level = level;
668	level++;
669	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
670		for (i = 0; i < wcl->wcl_count; i++)
671			witness_leveldescendents(wcl->wcl_children[i], level);
672}
673
674static void
675witness_displaydescendants(void(*prnt)(const char *fmt, ...),
676			   struct witness *parent, int indent)
677{
678	struct witness_child_list_entry *wcl;
679	int i, level;
680
681	level = parent->w_level;
682	prnt("%-2d", level);
683	for (i = 0; i < indent; i++)
684		prnt(" ");
685	if (parent->w_refcount > 0)
686		prnt("%s", parent->w_name);
687	else
688		prnt("(dead)");
689	if (parent->w_displayed) {
690		prnt(" -- (already displayed)\n");
691		return;
692	}
693	parent->w_displayed = 1;
694	if (parent->w_refcount > 0) {
695		if (parent->w_file != NULL)
696			prnt(" -- last acquired @ %s:%d", parent->w_file,
697			    parent->w_line);
698	}
699	prnt("\n");
700	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
701		for (i = 0; i < wcl->wcl_count; i++)
702			    witness_displaydescendants(prnt,
703				wcl->wcl_children[i], indent + 1);
704}
705
706static void
707witness_display_list(void(*prnt)(const char *fmt, ...),
708		     struct witness_list *list)
709{
710	struct witness *w;
711
712	STAILQ_FOREACH(w, list, w_typelist) {
713		if (w->w_file == NULL || w->w_level > 0)
714			continue;
715		/*
716		 * This lock has no anscestors, display its descendants.
717		 */
718		witness_displaydescendants(prnt, w, 0);
719	}
720}
721
722static void
723witness_display(void(*prnt)(const char *fmt, ...))
724{
725	struct witness *w;
726
727	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
728	witness_levelall();
729
730	/* Clear all the displayed flags. */
731	STAILQ_FOREACH(w, &w_all, w_list) {
732		w->w_displayed = 0;
733	}
734
735	/*
736	 * First, handle sleep locks which have been acquired at least
737	 * once.
738	 */
739	prnt("Sleep locks:\n");
740	witness_display_list(prnt, &w_sleep);
741
742	/*
743	 * Now do spin locks which have been acquired at least once.
744	 */
745	prnt("\nSpin locks:\n");
746	witness_display_list(prnt, &w_spin);
747
748	/*
749	 * Finally, any locks which have not been acquired yet.
750	 */
751	prnt("\nLocks which were never acquired:\n");
752	STAILQ_FOREACH(w, &w_all, w_list) {
753		if (w->w_file != NULL || w->w_refcount == 0)
754			continue;
755		prnt("%s\n", w->w_name);
756	}
757}
758#endif /* DDB */
759
760/* Trim useless garbage from filenames. */
761static const char *
762fixup_filename(const char *file)
763{
764
765	if (file == NULL)
766		return (NULL);
767	while (strncmp(file, "../", 3) == 0)
768		file += 3;
769	return (file);
770}
771
772int
773witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
774{
775
776	if (witness_watch == 0 || panicstr != NULL)
777		return (0);
778
779	/* Require locks that witness knows about. */
780	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
781	    lock2->lo_witness == NULL)
782		return (EINVAL);
783
784	MPASS(!mtx_owned(&w_mtx));
785	mtx_lock_spin(&w_mtx);
786
787	/*
788	 * If we already have either an explicit or implied lock order that
789	 * is the other way around, then return an error.
790	 */
791	if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
792		mtx_unlock_spin(&w_mtx);
793		return (EDOOFUS);
794	}
795
796	/* Try to add the new order. */
797	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
798	    lock2->lo_type, lock1->lo_type);
799	if (!itismychild(lock1->lo_witness, lock2->lo_witness))
800		return (ENOMEM);
801	mtx_unlock_spin(&w_mtx);
802	return (0);
803}
804
805void
806witness_checkorder(struct lock_object *lock, int flags, const char *file,
807    int line)
808{
809	struct lock_list_entry **lock_list, *lle;
810	struct lock_instance *lock1, *lock2;
811	struct lock_class *class;
812	struct witness *w, *w1;
813	struct thread *td;
814	int i, j;
815
816	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
817	    panicstr != NULL)
818		return;
819
820	/*
821	 * Try locks do not block if they fail to acquire the lock, thus
822	 * there is no danger of deadlocks or of switching while holding a
823	 * spin lock if we acquire a lock via a try operation.  This
824	 * function shouldn't even be called for try locks, so panic if
825	 * that happens.
826	 */
827	if (flags & LOP_TRYLOCK)
828		panic("%s should not be called for try lock operations",
829		    __func__);
830
831	w = lock->lo_witness;
832	class = LOCK_CLASS(lock);
833	td = curthread;
834	file = fixup_filename(file);
835
836	if (class->lc_flags & LC_SLEEPLOCK) {
837		/*
838		 * Since spin locks include a critical section, this check
839		 * implicitly enforces a lock order of all sleep locks before
840		 * all spin locks.
841		 */
842		if (td->td_critnest != 0 && !kdb_active)
843			panic("blockable sleep lock (%s) %s @ %s:%d",
844			    class->lc_name, lock->lo_name, file, line);
845
846		/*
847		 * If this is the first lock acquired then just return as
848		 * no order checking is needed.
849		 */
850		if (td->td_sleeplocks == NULL)
851			return;
852		lock_list = &td->td_sleeplocks;
853	} else {
854		/*
855		 * If this is the first lock, just return as no order
856		 * checking is needed.  We check this in both if clauses
857		 * here as unifying the check would require us to use a
858		 * critical section to ensure we don't migrate while doing
859		 * the check.  Note that if this is not the first lock, we
860		 * are already in a critical section and are safe for the
861		 * rest of the check.
862		 */
863		if (PCPU_GET(spinlocks) == NULL)
864			return;
865		lock_list = PCPU_PTR(spinlocks);
866	}
867
868	/*
869	 * Check to see if we are recursing on a lock we already own.  If
870	 * so, make sure that we don't mismatch exclusive and shared lock
871	 * acquires.
872	 */
873	lock1 = find_instance(*lock_list, lock);
874	if (lock1 != NULL) {
875		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
876		    (flags & LOP_EXCLUSIVE) == 0) {
877			printf("shared lock of (%s) %s @ %s:%d\n",
878			    class->lc_name, lock->lo_name, file, line);
879			printf("while exclusively locked from %s:%d\n",
880			    lock1->li_file, lock1->li_line);
881			panic("share->excl");
882		}
883		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
884		    (flags & LOP_EXCLUSIVE) != 0) {
885			printf("exclusive lock of (%s) %s @ %s:%d\n",
886			    class->lc_name, lock->lo_name, file, line);
887			printf("while share locked from %s:%d\n",
888			    lock1->li_file, lock1->li_line);
889			panic("excl->share");
890		}
891		return;
892	}
893
894	/*
895	 * Try locks do not block if they fail to acquire the lock, thus
896	 * there is no danger of deadlocks or of switching while holding a
897	 * spin lock if we acquire a lock via a try operation.
898	 */
899	if (flags & LOP_TRYLOCK)
900		return;
901
902	/*
903	 * Check for duplicate locks of the same type.  Note that we only
904	 * have to check for this on the last lock we just acquired.  Any
905	 * other cases will be caught as lock order violations.
906	 */
907	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
908	w1 = lock1->li_lock->lo_witness;
909	if (w1 == w) {
910		if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK) ||
911		    (flags & LOP_DUPOK))
912			return;
913		w->w_same_squawked = 1;
914		printf("acquiring duplicate lock of same type: \"%s\"\n",
915			lock->lo_type);
916		printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
917		    lock1->li_file, lock1->li_line);
918		printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
919#ifdef KDB
920		goto debugger;
921#else
922		return;
923#endif
924	}
925	MPASS(!mtx_owned(&w_mtx));
926	mtx_lock_spin(&w_mtx);
927	/*
928	 * If we know that the the lock we are acquiring comes after
929	 * the lock we most recently acquired in the lock order tree,
930	 * then there is no need for any further checks.
931	 */
932	if (isitmychild(w1, w)) {
933		mtx_unlock_spin(&w_mtx);
934		return;
935	}
936	for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
937		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
938
939			MPASS(j < WITNESS_COUNT);
940			lock1 = &lle->ll_children[i];
941			w1 = lock1->li_lock->lo_witness;
942
943			/*
944			 * If this lock doesn't undergo witness checking,
945			 * then skip it.
946			 */
947			if (w1 == NULL) {
948				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
949				    ("lock missing witness structure"));
950				continue;
951			}
952			/*
953			 * If we are locking Giant and this is a sleepable
954			 * lock, then skip it.
955			 */
956			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
957			    lock == &Giant.mtx_object)
958				continue;
959			/*
960			 * If we are locking a sleepable lock and this lock
961			 * is Giant, then skip it.
962			 */
963			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
964			    lock1->li_lock == &Giant.mtx_object)
965				continue;
966			/*
967			 * If we are locking a sleepable lock and this lock
968			 * isn't sleepable, we want to treat it as a lock
969			 * order violation to enfore a general lock order of
970			 * sleepable locks before non-sleepable locks.
971			 */
972			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
973			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
974				goto reversal;
975			/*
976			 * If we are locking Giant and this is a non-sleepable
977			 * lock, then treat it as a reversal.
978			 */
979			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
980			    lock == &Giant.mtx_object)
981				goto reversal;
982			/*
983			 * Check the lock order hierarchy for a reveresal.
984			 */
985			if (!isitmydescendant(w, w1))
986				continue;
987		reversal:
988			/*
989			 * We have a lock order violation, check to see if it
990			 * is allowed or has already been yelled about.
991			 */
992			mtx_unlock_spin(&w_mtx);
993#ifdef BLESSING
994			/*
995			 * If the lock order is blessed, just bail.  We don't
996			 * look for other lock order violations though, which
997			 * may be a bug.
998			 */
999			if (blessed(w, w1))
1000				return;
1001#endif
1002			if (lock1->li_lock == &Giant.mtx_object) {
1003				if (w1->w_Giant_squawked)
1004					return;
1005				else
1006					w1->w_Giant_squawked = 1;
1007			} else {
1008				if (w1->w_other_squawked)
1009					return;
1010				else
1011					w1->w_other_squawked = 1;
1012			}
1013			/*
1014			 * Ok, yell about it.
1015			 */
1016			if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1017			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1018				printf(
1019		"lock order reversal: (sleepable after non-sleepable)\n");
1020			else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1021			    && lock == &Giant.mtx_object)
1022				printf(
1023		"lock order reversal: (Giant after non-sleepable)\n");
1024			else
1025				printf("lock order reversal:\n");
1026			/*
1027			 * Try to locate an earlier lock with
1028			 * witness w in our list.
1029			 */
1030			do {
1031				lock2 = &lle->ll_children[i];
1032				MPASS(lock2->li_lock != NULL);
1033				if (lock2->li_lock->lo_witness == w)
1034					break;
1035				if (i == 0 && lle->ll_next != NULL) {
1036					lle = lle->ll_next;
1037					i = lle->ll_count - 1;
1038					MPASS(i >= 0 && i < LOCK_NCHILDREN);
1039				} else
1040					i--;
1041			} while (i >= 0);
1042			if (i < 0) {
1043				printf(" 1st %p %s (%s) @ %s:%d\n",
1044				    lock1->li_lock, lock1->li_lock->lo_name,
1045				    lock1->li_lock->lo_type, lock1->li_file,
1046				    lock1->li_line);
1047				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1048				    lock->lo_name, lock->lo_type, file, line);
1049			} else {
1050				printf(" 1st %p %s (%s) @ %s:%d\n",
1051				    lock2->li_lock, lock2->li_lock->lo_name,
1052				    lock2->li_lock->lo_type, lock2->li_file,
1053				    lock2->li_line);
1054				printf(" 2nd %p %s (%s) @ %s:%d\n",
1055				    lock1->li_lock, lock1->li_lock->lo_name,
1056				    lock1->li_lock->lo_type, lock1->li_file,
1057				    lock1->li_line);
1058				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1059				    lock->lo_name, lock->lo_type, file, line);
1060			}
1061#ifdef KDB
1062			goto debugger;
1063#else
1064			return;
1065#endif
1066		}
1067	}
1068	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
1069	/*
1070	 * If requested, build a new lock order.  However, don't build a new
1071	 * relationship between a sleepable lock and Giant if it is in the
1072	 * wrong direction.  The correct lock order is that sleepable locks
1073	 * always come before Giant.
1074	 */
1075	if (flags & LOP_NEWORDER &&
1076	    !(lock1->li_lock == &Giant.mtx_object &&
1077	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1078		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1079		    lock->lo_type, lock1->li_lock->lo_type);
1080		if (!itismychild(lock1->li_lock->lo_witness, w))
1081			/* Witness is dead. */
1082			return;
1083	}
1084	mtx_unlock_spin(&w_mtx);
1085	return;
1086
1087#ifdef KDB
1088debugger:
1089	if (witness_trace)
1090		kdb_backtrace();
1091	if (witness_kdb)
1092		kdb_enter(__func__);
1093#endif
1094}
1095
1096void
1097witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1098{
1099	struct lock_list_entry **lock_list, *lle;
1100	struct lock_instance *instance;
1101	struct witness *w;
1102	struct thread *td;
1103
1104	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1105	    panicstr != NULL)
1106		return;
1107	w = lock->lo_witness;
1108	td = curthread;
1109	file = fixup_filename(file);
1110
1111	/* Determine lock list for this lock. */
1112	if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1113		lock_list = &td->td_sleeplocks;
1114	else
1115		lock_list = PCPU_PTR(spinlocks);
1116
1117	/* Check to see if we are recursing on a lock we already own. */
1118	instance = find_instance(*lock_list, lock);
1119	if (instance != NULL) {
1120		instance->li_flags++;
1121		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1122		    td->td_proc->p_pid, lock->lo_name,
1123		    instance->li_flags & LI_RECURSEMASK);
1124		instance->li_file = file;
1125		instance->li_line = line;
1126		return;
1127	}
1128
1129	/* Update per-witness last file and line acquire. */
1130	w->w_file = file;
1131	w->w_line = line;
1132
1133	/* Find the next open lock instance in the list and fill it. */
1134	lle = *lock_list;
1135	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1136		lle = witness_lock_list_get();
1137		if (lle == NULL)
1138			return;
1139		lle->ll_next = *lock_list;
1140		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1141		    td->td_proc->p_pid, lle);
1142		*lock_list = lle;
1143	}
1144	instance = &lle->ll_children[lle->ll_count++];
1145	instance->li_lock = lock;
1146	instance->li_line = line;
1147	instance->li_file = file;
1148	if ((flags & LOP_EXCLUSIVE) != 0)
1149		instance->li_flags = LI_EXCLUSIVE;
1150	else
1151		instance->li_flags = 0;
1152	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1153	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1154}
1155
1156void
1157witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1158{
1159	struct lock_instance *instance;
1160	struct lock_class *class;
1161
1162	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1163	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1164		return;
1165	class = LOCK_CLASS(lock);
1166	file = fixup_filename(file);
1167	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1168		panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1169		    class->lc_name, lock->lo_name, file, line);
1170	if ((flags & LOP_TRYLOCK) == 0)
1171		panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1172		    lock->lo_name, file, line);
1173	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1174		panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1175		    class->lc_name, lock->lo_name, file, line);
1176	instance = find_instance(curthread->td_sleeplocks, lock);
1177	if (instance == NULL)
1178		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1179		    class->lc_name, lock->lo_name, file, line);
1180	if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1181		panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1182		    class->lc_name, lock->lo_name, file, line);
1183	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1184		panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1185		    class->lc_name, lock->lo_name,
1186		    instance->li_flags & LI_RECURSEMASK, file, line);
1187	instance->li_flags |= LI_EXCLUSIVE;
1188}
1189
1190void
1191witness_downgrade(struct lock_object *lock, int flags, const char *file,
1192    int line)
1193{
1194	struct lock_instance *instance;
1195	struct lock_class *class;
1196
1197	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1198	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1199		return;
1200	class = LOCK_CLASS(lock);
1201	file = fixup_filename(file);
1202	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1203		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1204		    class->lc_name, lock->lo_name, file, line);
1205	if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1206		panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1207		    class->lc_name, lock->lo_name, file, line);
1208	instance = find_instance(curthread->td_sleeplocks, lock);
1209	if (instance == NULL)
1210		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1211		    class->lc_name, lock->lo_name, file, line);
1212	if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1213		panic("downgrade of shared lock (%s) %s @ %s:%d",
1214		    class->lc_name, lock->lo_name, file, line);
1215	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1216		panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1217		    class->lc_name, lock->lo_name,
1218		    instance->li_flags & LI_RECURSEMASK, file, line);
1219	instance->li_flags &= ~LI_EXCLUSIVE;
1220}
1221
1222void
1223witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1224{
1225	struct lock_list_entry **lock_list, *lle;
1226	struct lock_instance *instance;
1227	struct lock_class *class;
1228	struct thread *td;
1229	register_t s;
1230	int i, j;
1231
1232	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1233	    panicstr != NULL)
1234		return;
1235	td = curthread;
1236	class = LOCK_CLASS(lock);
1237	file = fixup_filename(file);
1238
1239	/* Find lock instance associated with this lock. */
1240	if (class->lc_flags & LC_SLEEPLOCK)
1241		lock_list = &td->td_sleeplocks;
1242	else
1243		lock_list = PCPU_PTR(spinlocks);
1244	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1245		for (i = 0; i < (*lock_list)->ll_count; i++) {
1246			instance = &(*lock_list)->ll_children[i];
1247			if (instance->li_lock == lock)
1248				goto found;
1249		}
1250	panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1251	    file, line);
1252found:
1253
1254	/* First, check for shared/exclusive mismatches. */
1255	if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1256	    (flags & LOP_EXCLUSIVE) == 0) {
1257		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1258		    lock->lo_name, file, line);
1259		printf("while exclusively locked from %s:%d\n",
1260		    instance->li_file, instance->li_line);
1261		panic("excl->ushare");
1262	}
1263	if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1264	    (flags & LOP_EXCLUSIVE) != 0) {
1265		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1266		    lock->lo_name, file, line);
1267		printf("while share locked from %s:%d\n", instance->li_file,
1268		    instance->li_line);
1269		panic("share->uexcl");
1270	}
1271
1272	/* If we are recursed, unrecurse. */
1273	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1274		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1275		    td->td_proc->p_pid, instance->li_lock->lo_name,
1276		    instance->li_flags);
1277		instance->li_flags--;
1278		return;
1279	}
1280
1281	/* Otherwise, remove this item from the list. */
1282	s = intr_disable();
1283	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1284	    td->td_proc->p_pid, instance->li_lock->lo_name,
1285	    (*lock_list)->ll_count - 1);
1286	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1287		(*lock_list)->ll_children[j] =
1288		    (*lock_list)->ll_children[j + 1];
1289	(*lock_list)->ll_count--;
1290	intr_restore(s);
1291
1292	/* If this lock list entry is now empty, free it. */
1293	if ((*lock_list)->ll_count == 0) {
1294		lle = *lock_list;
1295		*lock_list = lle->ll_next;
1296		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1297		    td->td_proc->p_pid, lle);
1298		witness_lock_list_free(lle);
1299	}
1300}
1301
1302/*
1303 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1304 * exempt Giant and sleepable locks from the checks as well.  If any
1305 * non-exempt locks are held, then a supplied message is printed to the
1306 * console along with a list of the offending locks.  If indicated in the
1307 * flags then a failure results in a panic as well.
1308 */
1309int
1310witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1311{
1312	struct lock_list_entry *lle;
1313	struct lock_instance *lock1;
1314	struct thread *td;
1315	va_list ap;
1316	int i, n;
1317
1318	if (witness_cold || witness_watch == 0 || panicstr != NULL)
1319		return (0);
1320	n = 0;
1321	td = curthread;
1322	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1323		for (i = lle->ll_count - 1; i >= 0; i--) {
1324			lock1 = &lle->ll_children[i];
1325			if (lock1->li_lock == lock)
1326				continue;
1327			if (flags & WARN_GIANTOK &&
1328			    lock1->li_lock == &Giant.mtx_object)
1329				continue;
1330			if (flags & WARN_SLEEPOK &&
1331			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1332				continue;
1333			if (n == 0) {
1334				va_start(ap, fmt);
1335				vprintf(fmt, ap);
1336				va_end(ap);
1337				printf(" with the following");
1338				if (flags & WARN_SLEEPOK)
1339					printf(" non-sleepable");
1340				printf(" locks held:\n");
1341			}
1342			n++;
1343			witness_list_lock(lock1);
1344		}
1345	if (PCPU_GET(spinlocks) != NULL) {
1346		/*
1347		 * Since we already hold a spinlock preemption is
1348		 * already blocked.
1349		 */
1350		if (n == 0) {
1351			va_start(ap, fmt);
1352			vprintf(fmt, ap);
1353			va_end(ap);
1354			printf(" with the following");
1355			if (flags & WARN_SLEEPOK)
1356				printf(" non-sleepable");
1357			printf(" locks held:\n");
1358		}
1359		n += witness_list_locks(PCPU_PTR(spinlocks));
1360	}
1361	if (flags & WARN_PANIC && n)
1362		panic("witness_warn");
1363#ifdef KDB
1364	else if (witness_kdb && n)
1365		kdb_enter(__func__);
1366	else if (witness_trace && n)
1367		kdb_backtrace();
1368#endif
1369	return (n);
1370}
1371
1372const char *
1373witness_file(struct lock_object *lock)
1374{
1375	struct witness *w;
1376
1377	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1378		return ("?");
1379	w = lock->lo_witness;
1380	return (w->w_file);
1381}
1382
1383int
1384witness_line(struct lock_object *lock)
1385{
1386	struct witness *w;
1387
1388	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1389		return (0);
1390	w = lock->lo_witness;
1391	return (w->w_line);
1392}
1393
1394static struct witness *
1395enroll(const char *description, struct lock_class *lock_class)
1396{
1397	struct witness *w;
1398
1399	if (witness_watch == 0 || panicstr != NULL)
1400		return (NULL);
1401	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1402		return (NULL);
1403	mtx_lock_spin(&w_mtx);
1404	STAILQ_FOREACH(w, &w_all, w_list) {
1405		if (w->w_name == description || (w->w_refcount > 0 &&
1406		    strcmp(description, w->w_name) == 0)) {
1407			w->w_refcount++;
1408			mtx_unlock_spin(&w_mtx);
1409			if (lock_class != w->w_class)
1410				panic(
1411				"lock (%s) %s does not match earlier (%s) lock",
1412				    description, lock_class->lc_name,
1413				    w->w_class->lc_name);
1414			return (w);
1415		}
1416	}
1417	if ((w = witness_get()) == NULL)
1418		goto out;
1419	w->w_name = description;
1420	w->w_class = lock_class;
1421	w->w_refcount = 1;
1422	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1423	if (lock_class->lc_flags & LC_SPINLOCK) {
1424		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1425		w_spin_cnt++;
1426	} else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1427		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1428		w_sleep_cnt++;
1429	} else {
1430		mtx_unlock_spin(&w_mtx);
1431		panic("lock class %s is not sleep or spin",
1432		    lock_class->lc_name);
1433	}
1434	mtx_unlock_spin(&w_mtx);
1435out:
1436	/*
1437	 * We issue a warning for any spin locks not defined in the static
1438	 * order list as a way to discourage their use (folks should really
1439	 * be using non-spin mutexes most of the time).  However, several
1440	 * 3rd part device drivers use spin locks because that is all they
1441	 * have available on Windows and Linux and they think that normal
1442	 * mutexes are insufficient.
1443	 */
1444	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_spin_warn)
1445		printf("WITNESS: spin lock %s not in order list\n",
1446		    description);
1447	return (w);
1448}
1449
1450/* Don't let the door bang you on the way out... */
1451static int
1452depart(struct witness *w)
1453{
1454	struct witness_child_list_entry *wcl, *nwcl;
1455	struct witness_list *list;
1456	struct witness *parent;
1457
1458	MPASS(w->w_refcount == 0);
1459	if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1460		list = &w_sleep;
1461		w_sleep_cnt--;
1462	} else {
1463		list = &w_spin;
1464		w_spin_cnt--;
1465	}
1466	/*
1467	 * First, we run through the entire tree looking for any
1468	 * witnesses that the outgoing witness is a child of.  For
1469	 * each parent that we find, we reparent all the direct
1470	 * children of the outgoing witness to its parent.
1471	 */
1472	STAILQ_FOREACH(parent, list, w_typelist) {
1473		if (!isitmychild(parent, w))
1474			continue;
1475		removechild(parent, w);
1476	}
1477
1478	/*
1479	 * Now we go through and free up the child list of the
1480	 * outgoing witness.
1481	 */
1482	for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1483		nwcl = wcl->wcl_next;
1484        	w_child_cnt--;
1485		witness_child_free(wcl);
1486	}
1487
1488	/*
1489	 * Detach from various lists and free.
1490	 */
1491	STAILQ_REMOVE(list, w, witness, w_typelist);
1492	STAILQ_REMOVE(&w_all, w, witness, w_list);
1493	witness_free(w);
1494
1495	return (1);
1496}
1497
1498/*
1499 * Add "child" as a direct child of "parent".  Returns false if
1500 * we fail due to out of memory.
1501 */
1502static int
1503insertchild(struct witness *parent, struct witness *child)
1504{
1505	struct witness_child_list_entry **wcl;
1506
1507	MPASS(child != NULL && parent != NULL);
1508
1509	/*
1510	 * Insert "child" after "parent"
1511	 */
1512	wcl = &parent->w_children;
1513	while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1514		wcl = &(*wcl)->wcl_next;
1515	if (*wcl == NULL) {
1516		*wcl = witness_child_get();
1517		if (*wcl == NULL)
1518			return (0);
1519        	w_child_cnt++;
1520	}
1521	(*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1522
1523	return (1);
1524}
1525
1526
1527static int
1528itismychild(struct witness *parent, struct witness *child)
1529{
1530	struct witness_list *list;
1531
1532	MPASS(child != NULL && parent != NULL);
1533	if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1534	    (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1535		panic(
1536		"%s: parent (%s) and child (%s) are not the same lock type",
1537		    __func__, parent->w_class->lc_name,
1538		    child->w_class->lc_name);
1539
1540	if (!insertchild(parent, child))
1541		return (0);
1542
1543	if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1544		list = &w_sleep;
1545	else
1546		list = &w_spin;
1547	return (1);
1548}
1549
1550static void
1551removechild(struct witness *parent, struct witness *child)
1552{
1553	struct witness_child_list_entry **wcl, *wcl1;
1554	int i;
1555
1556	for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1557		for (i = 0; i < (*wcl)->wcl_count; i++)
1558			if ((*wcl)->wcl_children[i] == child)
1559				goto found;
1560	return;
1561found:
1562	(*wcl)->wcl_count--;
1563	if ((*wcl)->wcl_count > i)
1564		(*wcl)->wcl_children[i] =
1565		    (*wcl)->wcl_children[(*wcl)->wcl_count];
1566	MPASS((*wcl)->wcl_children[i] != NULL);
1567	if ((*wcl)->wcl_count != 0)
1568		return;
1569	wcl1 = *wcl;
1570	*wcl = wcl1->wcl_next;
1571	w_child_cnt--;
1572	witness_child_free(wcl1);
1573}
1574
1575static int
1576isitmychild(struct witness *parent, struct witness *child)
1577{
1578	struct witness_child_list_entry *wcl;
1579	int i;
1580
1581	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1582		for (i = 0; i < wcl->wcl_count; i++) {
1583			if (wcl->wcl_children[i] == child)
1584				return (1);
1585		}
1586	}
1587	return (0);
1588}
1589
1590static int
1591isitmydescendant(struct witness *parent, struct witness *child)
1592{
1593	struct witness_child_list_entry *wcl;
1594	int i, j;
1595
1596	if (isitmychild(parent, child))
1597		return (1);
1598	j = 0;
1599	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1600		MPASS(j < 1000);
1601		for (i = 0; i < wcl->wcl_count; i++) {
1602			if (isitmydescendant(wcl->wcl_children[i], child))
1603				return (1);
1604		}
1605		j++;
1606	}
1607	return (0);
1608}
1609
1610#ifdef BLESSING
1611static int
1612blessed(struct witness *w1, struct witness *w2)
1613{
1614	int i;
1615	struct witness_blessed *b;
1616
1617	for (i = 0; i < blessed_count; i++) {
1618		b = &blessed_list[i];
1619		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1620			if (strcmp(w2->w_name, b->b_lock2) == 0)
1621				return (1);
1622			continue;
1623		}
1624		if (strcmp(w1->w_name, b->b_lock2) == 0)
1625			if (strcmp(w2->w_name, b->b_lock1) == 0)
1626				return (1);
1627	}
1628	return (0);
1629}
1630#endif
1631
1632static struct witness *
1633witness_get(void)
1634{
1635	struct witness *w;
1636
1637	if (witness_watch == 0) {
1638		mtx_unlock_spin(&w_mtx);
1639		return (NULL);
1640	}
1641	if (STAILQ_EMPTY(&w_free)) {
1642		witness_watch = 0;
1643		mtx_unlock_spin(&w_mtx);
1644		printf("%s: witness exhausted\n", __func__);
1645		return (NULL);
1646	}
1647	w = STAILQ_FIRST(&w_free);
1648	STAILQ_REMOVE_HEAD(&w_free, w_list);
1649	w_free_cnt--;
1650	bzero(w, sizeof(*w));
1651	return (w);
1652}
1653
1654static void
1655witness_free(struct witness *w)
1656{
1657
1658	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1659	w_free_cnt++;
1660}
1661
1662static struct witness_child_list_entry *
1663witness_child_get(void)
1664{
1665	struct witness_child_list_entry *wcl;
1666
1667	if (witness_watch == 0) {
1668		mtx_unlock_spin(&w_mtx);
1669		return (NULL);
1670	}
1671	wcl = w_child_free;
1672	if (wcl == NULL) {
1673		witness_watch = 0;
1674		mtx_unlock_spin(&w_mtx);
1675		printf("%s: witness exhausted\n", __func__);
1676		return (NULL);
1677	}
1678	w_child_free = wcl->wcl_next;
1679	w_child_free_cnt--;
1680	bzero(wcl, sizeof(*wcl));
1681	return (wcl);
1682}
1683
1684static void
1685witness_child_free(struct witness_child_list_entry *wcl)
1686{
1687
1688	wcl->wcl_next = w_child_free;
1689	w_child_free = wcl;
1690	w_child_free_cnt++;
1691}
1692
1693static struct lock_list_entry *
1694witness_lock_list_get(void)
1695{
1696	struct lock_list_entry *lle;
1697
1698	if (witness_watch == 0)
1699		return (NULL);
1700	mtx_lock_spin(&w_mtx);
1701	lle = w_lock_list_free;
1702	if (lle == NULL) {
1703		witness_watch = 0;
1704		mtx_unlock_spin(&w_mtx);
1705		printf("%s: witness exhausted\n", __func__);
1706		return (NULL);
1707	}
1708	w_lock_list_free = lle->ll_next;
1709	mtx_unlock_spin(&w_mtx);
1710	bzero(lle, sizeof(*lle));
1711	return (lle);
1712}
1713
1714static void
1715witness_lock_list_free(struct lock_list_entry *lle)
1716{
1717
1718	mtx_lock_spin(&w_mtx);
1719	lle->ll_next = w_lock_list_free;
1720	w_lock_list_free = lle;
1721	mtx_unlock_spin(&w_mtx);
1722}
1723
1724static struct lock_instance *
1725find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1726{
1727	struct lock_list_entry *lle;
1728	struct lock_instance *instance;
1729	int i;
1730
1731	for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1732		for (i = lle->ll_count - 1; i >= 0; i--) {
1733			instance = &lle->ll_children[i];
1734			if (instance->li_lock == lock)
1735				return (instance);
1736		}
1737	return (NULL);
1738}
1739
1740static void
1741witness_list_lock(struct lock_instance *instance)
1742{
1743	struct lock_object *lock;
1744
1745	lock = instance->li_lock;
1746	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1747	    "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
1748	if (lock->lo_type != lock->lo_name)
1749		printf(" (%s)", lock->lo_type);
1750	printf(" r = %d (%p) locked @ %s:%d\n",
1751	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1752	    instance->li_line);
1753}
1754
1755#ifdef DDB
1756static int
1757witness_thread_has_locks(struct thread *td)
1758{
1759
1760	return (td->td_sleeplocks != NULL);
1761}
1762
1763static int
1764witness_proc_has_locks(struct proc *p)
1765{
1766	struct thread *td;
1767
1768	FOREACH_THREAD_IN_PROC(p, td) {
1769		if (witness_thread_has_locks(td))
1770			return (1);
1771	}
1772	return (0);
1773}
1774#endif
1775
1776int
1777witness_list_locks(struct lock_list_entry **lock_list)
1778{
1779	struct lock_list_entry *lle;
1780	int i, nheld;
1781
1782	nheld = 0;
1783	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1784		for (i = lle->ll_count - 1; i >= 0; i--) {
1785			witness_list_lock(&lle->ll_children[i]);
1786			nheld++;
1787		}
1788	return (nheld);
1789}
1790
1791/*
1792 * This is a bit risky at best.  We call this function when we have timed
1793 * out acquiring a spin lock, and we assume that the other CPU is stuck
1794 * with this lock held.  So, we go groveling around in the other CPU's
1795 * per-cpu data to try to find the lock instance for this spin lock to
1796 * see when it was last acquired.
1797 */
1798void
1799witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1800{
1801	struct lock_instance *instance;
1802	struct pcpu *pc;
1803
1804	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1805		return;
1806	pc = pcpu_find(owner->td_oncpu);
1807	instance = find_instance(pc->pc_spinlocks, lock);
1808	if (instance != NULL)
1809		witness_list_lock(instance);
1810}
1811
1812void
1813witness_save(struct lock_object *lock, const char **filep, int *linep)
1814{
1815	struct lock_list_entry *lock_list;
1816	struct lock_instance *instance;
1817	struct lock_class *class;
1818
1819	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1820	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1821		return;
1822	class = LOCK_CLASS(lock);
1823	if (class->lc_flags & LC_SLEEPLOCK)
1824		lock_list = curthread->td_sleeplocks;
1825	else {
1826		if (witness_skipspin)
1827			return;
1828		lock_list = PCPU_GET(spinlocks);
1829	}
1830	instance = find_instance(lock_list, lock);
1831	if (instance == NULL)
1832		panic("%s: lock (%s) %s not locked", __func__,
1833		    class->lc_name, lock->lo_name);
1834	*filep = instance->li_file;
1835	*linep = instance->li_line;
1836}
1837
1838void
1839witness_restore(struct lock_object *lock, const char *file, int line)
1840{
1841	struct lock_list_entry *lock_list;
1842	struct lock_instance *instance;
1843	struct lock_class *class;
1844
1845	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1846	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1847		return;
1848	class = LOCK_CLASS(lock);
1849	if (class->lc_flags & LC_SLEEPLOCK)
1850		lock_list = curthread->td_sleeplocks;
1851	else {
1852		if (witness_skipspin)
1853			return;
1854		lock_list = PCPU_GET(spinlocks);
1855	}
1856	instance = find_instance(lock_list, lock);
1857	if (instance == NULL)
1858		panic("%s: lock (%s) %s not locked", __func__,
1859		    class->lc_name, lock->lo_name);
1860	lock->lo_witness->w_file = file;
1861	lock->lo_witness->w_line = line;
1862	instance->li_file = file;
1863	instance->li_line = line;
1864}
1865
1866void
1867witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1868{
1869#ifdef INVARIANT_SUPPORT
1870	struct lock_instance *instance;
1871	struct lock_class *class;
1872
1873	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1874		return;
1875	class = LOCK_CLASS(lock);
1876	if ((class->lc_flags & LC_SLEEPLOCK) != 0)
1877		instance = find_instance(curthread->td_sleeplocks, lock);
1878	else if ((class->lc_flags & LC_SPINLOCK) != 0)
1879		instance = find_instance(PCPU_GET(spinlocks), lock);
1880	else {
1881		panic("Lock (%s) %s is not sleep or spin!",
1882		    class->lc_name, lock->lo_name);
1883	}
1884	file = fixup_filename(file);
1885	switch (flags) {
1886	case LA_UNLOCKED:
1887		if (instance != NULL)
1888			panic("Lock (%s) %s locked @ %s:%d.",
1889			    class->lc_name, lock->lo_name, file, line);
1890		break;
1891	case LA_LOCKED:
1892	case LA_LOCKED | LA_RECURSED:
1893	case LA_LOCKED | LA_NOTRECURSED:
1894	case LA_SLOCKED:
1895	case LA_SLOCKED | LA_RECURSED:
1896	case LA_SLOCKED | LA_NOTRECURSED:
1897	case LA_XLOCKED:
1898	case LA_XLOCKED | LA_RECURSED:
1899	case LA_XLOCKED | LA_NOTRECURSED:
1900		if (instance == NULL) {
1901			panic("Lock (%s) %s not locked @ %s:%d.",
1902			    class->lc_name, lock->lo_name, file, line);
1903			break;
1904		}
1905		if ((flags & LA_XLOCKED) != 0 &&
1906		    (instance->li_flags & LI_EXCLUSIVE) == 0)
1907			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1908			    class->lc_name, lock->lo_name, file, line);
1909		if ((flags & LA_SLOCKED) != 0 &&
1910		    (instance->li_flags & LI_EXCLUSIVE) != 0)
1911			panic("Lock (%s) %s exclusively locked @ %s:%d.",
1912			    class->lc_name, lock->lo_name, file, line);
1913		if ((flags & LA_RECURSED) != 0 &&
1914		    (instance->li_flags & LI_RECURSEMASK) == 0)
1915			panic("Lock (%s) %s not recursed @ %s:%d.",
1916			    class->lc_name, lock->lo_name, file, line);
1917		if ((flags & LA_NOTRECURSED) != 0 &&
1918		    (instance->li_flags & LI_RECURSEMASK) != 0)
1919			panic("Lock (%s) %s recursed @ %s:%d.",
1920			    class->lc_name, lock->lo_name, file, line);
1921		break;
1922	default:
1923		panic("Invalid lock assertion at %s:%d.", file, line);
1924
1925	}
1926#endif	/* INVARIANT_SUPPORT */
1927}
1928
1929#ifdef DDB
1930static void
1931witness_list(struct thread *td)
1932{
1933
1934	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1935	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1936
1937	if (witness_watch == 0)
1938		return;
1939
1940	witness_list_locks(&td->td_sleeplocks);
1941
1942	/*
1943	 * We only handle spinlocks if td == curthread.  This is somewhat broken
1944	 * if td is currently executing on some other CPU and holds spin locks
1945	 * as we won't display those locks.  If we had a MI way of getting
1946	 * the per-cpu data for a given cpu then we could use
1947	 * td->td_oncpu to get the list of spinlocks for this thread
1948	 * and "fix" this.
1949	 *
1950	 * That still wouldn't really fix this unless we locked sched_lock
1951	 * or stopped the other CPU to make sure it wasn't changing the list
1952	 * out from under us.  It is probably best to just not try to handle
1953	 * threads on other CPU's for now.
1954	 */
1955	if (td == curthread && PCPU_GET(spinlocks) != NULL)
1956		witness_list_locks(PCPU_PTR(spinlocks));
1957}
1958
1959DB_SHOW_COMMAND(locks, db_witness_list)
1960{
1961	struct thread *td;
1962
1963	if (have_addr)
1964		td = db_lookup_thread(addr, TRUE);
1965	else
1966		td = kdb_thread;
1967	witness_list(td);
1968}
1969
1970DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1971{
1972	struct thread *td;
1973	struct proc *p;
1974
1975	/*
1976	 * It would be nice to list only threads and processes that actually
1977	 * held sleep locks, but that information is currently not exported
1978	 * by WITNESS.
1979	 */
1980	FOREACH_PROC_IN_SYSTEM(p) {
1981		if (!witness_proc_has_locks(p))
1982			continue;
1983		FOREACH_THREAD_IN_PROC(p, td) {
1984			if (!witness_thread_has_locks(td))
1985				continue;
1986			db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
1987			    p->p_comm, td, td->td_tid);
1988			witness_list(td);
1989		}
1990	}
1991}
1992
1993DB_SHOW_COMMAND(witness, db_witness_display)
1994{
1995
1996	witness_display(db_printf);
1997}
1998#endif
1999