subr_witness.c revision 140637
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 *    promote products derived from this software without specific prior
14 *    written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Implementation of the `witness' lock verifier.  Originally implemented for
34 * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
35 * classes in FreeBSD.
36 */
37
38/*
39 *	Main Entry: witness
40 *	Pronunciation: 'wit-n&s
41 *	Function: noun
42 *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43 *	    testimony, witness, from 2wit
44 *	Date: before 12th century
45 *	1 : attestation of a fact or event : TESTIMONY
46 *	2 : one that gives evidence; specifically : one who testifies in
47 *	    a cause or before a judicial tribunal
48 *	3 : one asked to be present at a transaction so as to be able to
49 *	    testify to its having taken place
50 *	4 : one who has personal knowledge of something
51 *	5 a : something serving as evidence or proof : SIGN
52 *	  b : public affirmation by word or example of usually
53 *	      religious faith or conviction <the heroic witness to divine
54 *	      life -- Pilot>
55 *	6 capitalized : a member of the Jehovah's Witnesses
56 */
57
58/*
59 * Special rules concerning Giant and lock orders:
60 *
61 * 1) Giant must be acquired before any other mutexes.  Stated another way,
62 *    no other mutex may be held when Giant is acquired.
63 *
64 * 2) Giant must be released when blocking on a sleepable lock.
65 *
66 * This rule is less obvious, but is a result of Giant providing the same
67 * semantics as spl().  Basically, when a thread sleeps, it must release
68 * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
69 * 2).
70 *
71 * 3) Giant may be acquired before or after sleepable locks.
72 *
73 * This rule is also not quite as obvious.  Giant may be acquired after
74 * a sleepable lock because it is a non-sleepable lock and non-sleepable
75 * locks may always be acquired while holding a sleepable lock.  The second
76 * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
77 * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
78 * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
79 * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
80 * execute.  Thus, acquiring Giant both before and after a sleepable lock
81 * will not result in a lock order reversal.
82 */
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 140637 2005-01-22 21:14:21Z rwatson $");
86
87#include "opt_ddb.h"
88#include "opt_witness.h"
89
90#include <sys/param.h>
91#include <sys/bus.h>
92#include <sys/kdb.h>
93#include <sys/kernel.h>
94#include <sys/ktr.h>
95#include <sys/lock.h>
96#include <sys/malloc.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/sysctl.h>
100#include <sys/systm.h>
101
102#include <ddb/ddb.h>
103
104#include <machine/stdarg.h>
105
106/* Define this to check for blessed mutexes */
107#undef BLESSING
108
109#define WITNESS_COUNT 1024
110#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
111/*
112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
113 * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
114 * probably be safe for the most part, but it's still a SWAG.
115 */
116#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
117
118#define	WITNESS_NCHILDREN 6
119
120struct witness_child_list_entry;
121
122struct witness {
123	const	char *w_name;
124	struct	lock_class *w_class;
125	STAILQ_ENTRY(witness) w_list;		/* List of all witnesses. */
126	STAILQ_ENTRY(witness) w_typelist;	/* Witnesses of a type. */
127	struct	witness_child_list_entry *w_children;	/* Great evilness... */
128	const	char *w_file;
129	int	w_line;
130	u_int	w_level;
131	u_int	w_refcount;
132	u_char	w_Giant_squawked:1;
133	u_char	w_other_squawked:1;
134	u_char	w_same_squawked:1;
135	u_char	w_displayed:1;
136};
137
138struct witness_child_list_entry {
139	struct	witness_child_list_entry *wcl_next;
140	struct	witness *wcl_children[WITNESS_NCHILDREN];
141	u_int	wcl_count;
142};
143
144STAILQ_HEAD(witness_list, witness);
145
146#ifdef BLESSING
147struct witness_blessed {
148	const	char *b_lock1;
149	const	char *b_lock2;
150};
151#endif
152
153struct witness_order_list_entry {
154	const	char *w_name;
155	struct	lock_class *w_class;
156};
157
158#ifdef BLESSING
159static int	blessed(struct witness *, struct witness *);
160#endif
161static int	depart(struct witness *w);
162static struct	witness *enroll(const char *description,
163				struct lock_class *lock_class);
164static int	insertchild(struct witness *parent, struct witness *child);
165static int	isitmychild(struct witness *parent, struct witness *child);
166static int	isitmydescendant(struct witness *parent, struct witness *child);
167static int	itismychild(struct witness *parent, struct witness *child);
168static int	rebalancetree(struct witness_list *list);
169static void	removechild(struct witness *parent, struct witness *child);
170static int	reparentchildren(struct witness *newparent,
171		    struct witness *oldparent);
172static int	sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
173static void	witness_displaydescendants(void(*)(const char *fmt, ...),
174					   struct witness *, int indent);
175static const char *fixup_filename(const char *file);
176static void	witness_leveldescendents(struct witness *parent, int level);
177static void	witness_levelall(void);
178static struct	witness *witness_get(void);
179static void	witness_free(struct witness *m);
180static struct	witness_child_list_entry *witness_child_get(void);
181static void	witness_child_free(struct witness_child_list_entry *wcl);
182static struct	lock_list_entry *witness_lock_list_get(void);
183static void	witness_lock_list_free(struct lock_list_entry *lle);
184static struct	lock_instance *find_instance(struct lock_list_entry *lock_list,
185					     struct lock_object *lock);
186static void	witness_list_lock(struct lock_instance *instance);
187#ifdef DDB
188static void	witness_list(struct thread *td);
189static void	witness_display_list(void(*prnt)(const char *fmt, ...),
190				     struct witness_list *list);
191static void	witness_display(void(*)(const char *fmt, ...));
192#endif
193
194SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, 0, "Witness Locking");
195
196/*
197 * If set to 0, witness is disabled.  If set to 1, witness performs full lock
198 * order checking for all locks.  If set to 2 or higher, then witness skips
199 * the full lock order check if the lock being acquired is at a higher level
200 * (i.e. farther down in the tree) than the current lock.  This last mode is
201 * somewhat experimental and not considered fully safe.  At runtime, this
202 * value may be set to 0 to turn off witness.  witness is not allowed be
203 * turned on once it is turned off, however.
204 */
205static int witness_watch = 1;
206TUNABLE_INT("debug.witness.watch", &witness_watch);
207SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
208    sysctl_debug_witness_watch, "I", "witness is watching lock operations");
209
210#ifdef KDB
211/*
212 * When KDB is enabled and witness_kdb is set to 1, it will cause the system
213 * to drop into kdebug() when:
214 *	- a lock heirarchy violation occurs
215 *	- locks are held when going to sleep.
216 */
217#ifdef WITNESS_KDB
218int	witness_kdb = 1;
219#else
220int	witness_kdb = 0;
221#endif
222TUNABLE_INT("debug.witness.kdb", &witness_kdb);
223SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
224
225/*
226 * When KDB is enabled and witness_trace is set to 1, it will cause the system
227 * to print a stack trace:
228 *	- a lock heirarchy violation occurs
229 *	- locks are held when going to sleep.
230 */
231int	witness_trace = 1;
232TUNABLE_INT("debug.witness.trace", &witness_trace);
233SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
234#endif /* KDB */
235
236#ifdef WITNESS_SKIPSPIN
237int	witness_skipspin = 1;
238#else
239int	witness_skipspin = 0;
240#endif
241TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
242SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN,
243    &witness_skipspin, 0, "");
244
245static struct mtx w_mtx;
246static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
247static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
248static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
249static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
250static struct witness_child_list_entry *w_child_free = NULL;
251static struct lock_list_entry *w_lock_list_free = NULL;
252
253static struct witness w_data[WITNESS_COUNT];
254static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
255static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
256
257static struct witness_order_list_entry order_lists[] = {
258	{ "proctree", &lock_class_sx },
259	{ "allproc", &lock_class_sx },
260	{ "Giant", &lock_class_mtx_sleep },
261	{ "filedesc structure", &lock_class_mtx_sleep },
262	{ "pipe mutex", &lock_class_mtx_sleep },
263	{ "sigio lock", &lock_class_mtx_sleep },
264	{ "process group", &lock_class_mtx_sleep },
265	{ "process lock", &lock_class_mtx_sleep },
266	{ "session", &lock_class_mtx_sleep },
267	{ "uidinfo hash", &lock_class_mtx_sleep },
268	{ "uidinfo struct", &lock_class_mtx_sleep },
269	{ "allprison", &lock_class_mtx_sleep },
270	{ NULL, NULL },
271	/*
272	 * Sockets
273	 */
274	{ "filedesc structure", &lock_class_mtx_sleep },
275	{ "accept", &lock_class_mtx_sleep },
276	{ "so_snd", &lock_class_mtx_sleep },
277	{ "so_rcv", &lock_class_mtx_sleep },
278	{ "sellck", &lock_class_mtx_sleep },
279	{ NULL, NULL },
280	/*
281	 * Routing
282	 */
283	{ "so_rcv", &lock_class_mtx_sleep },
284	{ "radix node head", &lock_class_mtx_sleep },
285	{ "rtentry", &lock_class_mtx_sleep },
286	{ "ifaddr", &lock_class_mtx_sleep },
287	{ NULL, NULL },
288	/*
289	 * UNIX Domain Sockets
290	 */
291	{ "unp", &lock_class_mtx_sleep },
292	{ "so_snd", &lock_class_mtx_sleep },
293	{ NULL, NULL },
294	/*
295	 * UDP/IP
296	 */
297	{ "udp", &lock_class_mtx_sleep },
298	{ "udpinp", &lock_class_mtx_sleep },
299	{ "so_snd", &lock_class_mtx_sleep },
300	{ NULL, NULL },
301	/*
302	 * TCP/IP
303	 */
304	{ "tcp", &lock_class_mtx_sleep },
305	{ "tcpinp", &lock_class_mtx_sleep },
306	{ "so_snd", &lock_class_mtx_sleep },
307	{ NULL, NULL },
308	/*
309	 * SLIP
310	 */
311	{ "slip_mtx", &lock_class_mtx_sleep },
312	{ "slip sc_mtx", &lock_class_mtx_sleep },
313	{ NULL, NULL },
314	/*
315	 * netatalk
316	 */
317	{ "ddp_list_mtx", &lock_class_mtx_sleep },
318	{ "ddp_mtx", &lock_class_mtx_sleep },
319	{ NULL, NULL },
320	/*
321	 * BPF
322	 */
323	{ "bpf global lock", &lock_class_mtx_sleep },
324	{ "bpf interface lock", &lock_class_mtx_sleep },
325	{ "bpf cdev lock", &lock_class_mtx_sleep },
326	{ NULL, NULL },
327	/*
328	 * spin locks
329	 */
330#ifdef SMP
331	{ "ap boot", &lock_class_mtx_spin },
332#endif
333	{ "sio", &lock_class_mtx_spin },
334#ifdef __i386__
335	{ "cy", &lock_class_mtx_spin },
336#endif
337	{ "uart_hwmtx", &lock_class_mtx_spin },
338	{ "sabtty", &lock_class_mtx_spin },
339	{ "zstty", &lock_class_mtx_spin },
340	{ "ng_node", &lock_class_mtx_spin },
341	{ "ng_worklist", &lock_class_mtx_spin },
342	{ "taskqueue_fast", &lock_class_mtx_spin },
343	{ "intr table", &lock_class_mtx_spin },
344	{ "ithread table lock", &lock_class_mtx_spin },
345	{ "sleepq chain", &lock_class_mtx_spin },
346	{ "sched lock", &lock_class_mtx_spin },
347	{ "turnstile chain", &lock_class_mtx_spin },
348	{ "td_contested", &lock_class_mtx_spin },
349	{ "callout", &lock_class_mtx_spin },
350	{ "entropy harvest mutex", &lock_class_mtx_spin },
351	/*
352	 * leaf locks
353	 */
354	{ "allpmaps", &lock_class_mtx_spin },
355	{ "vm page queue free mutex", &lock_class_mtx_spin },
356	{ "icu", &lock_class_mtx_spin },
357#ifdef SMP
358	{ "smp rendezvous", &lock_class_mtx_spin },
359#if defined(__i386__) || defined(__amd64__)
360	{ "tlb", &lock_class_mtx_spin },
361#endif
362#ifdef __sparc64__
363	{ "ipi", &lock_class_mtx_spin },
364#endif
365#endif
366	{ "clk", &lock_class_mtx_spin },
367	{ "mutex profiling lock", &lock_class_mtx_spin },
368	{ "kse zombie lock", &lock_class_mtx_spin },
369	{ "ALD Queue", &lock_class_mtx_spin },
370#ifdef __ia64__
371	{ "MCA spin lock", &lock_class_mtx_spin },
372#endif
373#if defined(__i386__) || defined(__amd64__)
374	{ "pcicfg", &lock_class_mtx_spin },
375#endif
376	{ NULL, NULL },
377	{ NULL, NULL }
378};
379
380#ifdef BLESSING
381/*
382 * Pairs of locks which have been blessed
383 * Don't complain about order problems with blessed locks
384 */
385static struct witness_blessed blessed_list[] = {
386};
387static int blessed_count =
388	sizeof(blessed_list) / sizeof(struct witness_blessed);
389#endif
390
391/*
392 * List of all locks in the system.
393 */
394TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
395
396static struct mtx all_mtx = {
397	{ &lock_class_mtx_sleep,	/* mtx_object.lo_class */
398	  "All locks list",		/* mtx_object.lo_name */
399	  "All locks list",		/* mtx_object.lo_type */
400	  LO_INITIALIZED,		/* mtx_object.lo_flags */
401	  { NULL, NULL },		/* mtx_object.lo_list */
402	  NULL },			/* mtx_object.lo_witness */
403	MTX_UNOWNED, 0			/* mtx_lock, mtx_recurse */
404};
405
406/*
407 * This global is set to 0 once it becomes safe to use the witness code.
408 */
409static int witness_cold = 1;
410
411/*
412 * Global variables for book keeping.
413 */
414static int lock_cur_cnt;
415static int lock_max_cnt;
416
417/*
418 * The WITNESS-enabled diagnostic code.
419 */
420static void
421witness_initialize(void *dummy __unused)
422{
423	struct lock_object *lock;
424	struct witness_order_list_entry *order;
425	struct witness *w, *w1;
426	int i;
427
428	/*
429	 * We have to release Giant before initializing its witness
430	 * structure so that WITNESS doesn't get confused.
431	 */
432	mtx_unlock(&Giant);
433	mtx_assert(&Giant, MA_NOTOWNED);
434
435	CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
436	TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
437	mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
438	    MTX_NOWITNESS);
439	for (i = 0; i < WITNESS_COUNT; i++)
440		witness_free(&w_data[i]);
441	for (i = 0; i < WITNESS_CHILDCOUNT; i++)
442		witness_child_free(&w_childdata[i]);
443	for (i = 0; i < LOCK_CHILDCOUNT; i++)
444		witness_lock_list_free(&w_locklistdata[i]);
445
446	/* First add in all the specified order lists. */
447	for (order = order_lists; order->w_name != NULL; order++) {
448		w = enroll(order->w_name, order->w_class);
449		if (w == NULL)
450			continue;
451		w->w_file = "order list";
452		for (order++; order->w_name != NULL; order++) {
453			w1 = enroll(order->w_name, order->w_class);
454			if (w1 == NULL)
455				continue;
456			w1->w_file = "order list";
457			if (!itismychild(w, w1))
458				panic("Not enough memory for static orders!");
459			w = w1;
460		}
461	}
462
463	/* Iterate through all locks and add them to witness. */
464	mtx_lock(&all_mtx);
465	TAILQ_FOREACH(lock, &all_locks, lo_list) {
466		if (lock->lo_flags & LO_WITNESS)
467			lock->lo_witness = enroll(lock->lo_type,
468			    lock->lo_class);
469		else
470			lock->lo_witness = NULL;
471	}
472	mtx_unlock(&all_mtx);
473
474	/* Mark the witness code as being ready for use. */
475	atomic_store_rel_int(&witness_cold, 0);
476
477	mtx_lock(&Giant);
478}
479SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
480
481static int
482sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
483{
484	int error, value;
485
486	value = witness_watch;
487	error = sysctl_handle_int(oidp, &value, 0, req);
488	if (error != 0 || req->newptr == NULL)
489		return (error);
490	error = suser(req->td);
491	if (error != 0)
492		return (error);
493	if (value == witness_watch)
494		return (0);
495	if (value != 0)
496		return (EINVAL);
497	witness_watch = 0;
498	return (0);
499}
500
501void
502witness_init(struct lock_object *lock)
503{
504	struct lock_class *class;
505
506	class = lock->lo_class;
507	if (lock->lo_flags & LO_INITIALIZED)
508		panic("%s: lock (%s) %s is already initialized", __func__,
509		    class->lc_name, lock->lo_name);
510	if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
511	    (class->lc_flags & LC_RECURSABLE) == 0)
512		panic("%s: lock (%s) %s can not be recursable", __func__,
513		    class->lc_name, lock->lo_name);
514	if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
515	    (class->lc_flags & LC_SLEEPABLE) == 0)
516		panic("%s: lock (%s) %s can not be sleepable", __func__,
517		    class->lc_name, lock->lo_name);
518	if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
519	    (class->lc_flags & LC_UPGRADABLE) == 0)
520		panic("%s: lock (%s) %s can not be upgradable", __func__,
521		    class->lc_name, lock->lo_name);
522
523	mtx_lock(&all_mtx);
524	TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
525	lock->lo_flags |= LO_INITIALIZED;
526	lock_cur_cnt++;
527	if (lock_cur_cnt > lock_max_cnt)
528		lock_max_cnt = lock_cur_cnt;
529	mtx_unlock(&all_mtx);
530	if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
531	    (lock->lo_flags & LO_WITNESS) != 0)
532		lock->lo_witness = enroll(lock->lo_type, class);
533	else
534		lock->lo_witness = NULL;
535}
536
537void
538witness_destroy(struct lock_object *lock)
539{
540	struct witness *w;
541
542	if (witness_cold)
543		panic("lock (%s) %s destroyed while witness_cold",
544		    lock->lo_class->lc_name, lock->lo_name);
545	if ((lock->lo_flags & LO_INITIALIZED) == 0)
546		panic("%s: lock (%s) %s is not initialized", __func__,
547		    lock->lo_class->lc_name, lock->lo_name);
548
549	/* XXX: need to verify that no one holds the lock */
550	w = lock->lo_witness;
551	if (w != NULL) {
552		mtx_lock_spin(&w_mtx);
553		MPASS(w->w_refcount > 0);
554		w->w_refcount--;
555
556		/*
557		 * Lock is already released if we have an allocation failure
558		 * and depart() fails.
559		 */
560		if (w->w_refcount != 0 || depart(w))
561			mtx_unlock_spin(&w_mtx);
562	}
563
564	mtx_lock(&all_mtx);
565	lock_cur_cnt--;
566	TAILQ_REMOVE(&all_locks, lock, lo_list);
567	lock->lo_flags &= ~LO_INITIALIZED;
568	mtx_unlock(&all_mtx);
569}
570
571#ifdef DDB
572static void
573witness_display_list(void(*prnt)(const char *fmt, ...),
574		     struct witness_list *list)
575{
576	struct witness *w;
577
578	STAILQ_FOREACH(w, list, w_typelist) {
579		if (w->w_file == NULL || w->w_level > 0)
580			continue;
581		/*
582		 * This lock has no anscestors, display its descendants.
583		 */
584		witness_displaydescendants(prnt, w, 0);
585	}
586}
587
588static void
589witness_display(void(*prnt)(const char *fmt, ...))
590{
591	struct witness *w;
592
593	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
594	witness_levelall();
595
596	/* Clear all the displayed flags. */
597	STAILQ_FOREACH(w, &w_all, w_list) {
598		w->w_displayed = 0;
599	}
600
601	/*
602	 * First, handle sleep locks which have been acquired at least
603	 * once.
604	 */
605	prnt("Sleep locks:\n");
606	witness_display_list(prnt, &w_sleep);
607
608	/*
609	 * Now do spin locks which have been acquired at least once.
610	 */
611	prnt("\nSpin locks:\n");
612	witness_display_list(prnt, &w_spin);
613
614	/*
615	 * Finally, any locks which have not been acquired yet.
616	 */
617	prnt("\nLocks which were never acquired:\n");
618	STAILQ_FOREACH(w, &w_all, w_list) {
619		if (w->w_file != NULL || w->w_refcount == 0)
620			continue;
621		prnt("%s\n", w->w_name);
622	}
623}
624#endif /* DDB */
625
626/* Trim useless garbage from filenames. */
627static const char *
628fixup_filename(const char *file)
629{
630
631	if (file == NULL)
632		return (NULL);
633	while (strncmp(file, "../", 3) == 0)
634		file += 3;
635	return (file);
636}
637
638int
639witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
640{
641
642	if (witness_watch == 0 || panicstr != NULL)
643		return (0);
644
645	/* Require locks that witness knows about. */
646	if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
647	    lock2->lo_witness == NULL)
648		return (EINVAL);
649
650	MPASS(!mtx_owned(&w_mtx));
651	mtx_lock_spin(&w_mtx);
652
653	/*
654	 * If we already have either an explicit or implied lock order that
655	 * is the other way around, then return an error.
656	 */
657	if (isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
658		mtx_unlock_spin(&w_mtx);
659		return (EDOOFUS);
660	}
661
662	/* Try to add the new order. */
663	CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
664	    lock2->lo_type, lock1->lo_type);
665	if (!itismychild(lock1->lo_witness, lock2->lo_witness))
666		return (ENOMEM);
667	mtx_unlock_spin(&w_mtx);
668	return (0);
669}
670
671void
672witness_checkorder(struct lock_object *lock, int flags, const char *file,
673    int line)
674{
675	struct lock_list_entry **lock_list, *lle;
676	struct lock_instance *lock1, *lock2;
677	struct lock_class *class;
678	struct witness *w, *w1;
679	struct thread *td;
680	int i, j;
681
682	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
683	    panicstr != NULL)
684		return;
685
686	/*
687	 * Try locks do not block if they fail to acquire the lock, thus
688	 * there is no danger of deadlocks or of switching while holding a
689	 * spin lock if we acquire a lock via a try operation.  This
690	 * function shouldn't even be called for try locks, so panic if
691	 * that happens.
692	 */
693	if (flags & LOP_TRYLOCK)
694		panic("%s should not be called for try lock operations",
695		    __func__);
696
697	w = lock->lo_witness;
698	class = lock->lo_class;
699	td = curthread;
700	file = fixup_filename(file);
701
702	if (class->lc_flags & LC_SLEEPLOCK) {
703		/*
704		 * Since spin locks include a critical section, this check
705		 * implicitly enforces a lock order of all sleep locks before
706		 * all spin locks.
707		 */
708		if (td->td_critnest != 0 && !kdb_active)
709			panic("blockable sleep lock (%s) %s @ %s:%d",
710			    class->lc_name, lock->lo_name, file, line);
711
712		/*
713		 * If this is the first lock acquired then just return as
714		 * no order checking is needed.
715		 */
716		if (td->td_sleeplocks == NULL)
717			return;
718		lock_list = &td->td_sleeplocks;
719	} else {
720		/*
721		 * If this is the first lock, just return as no order
722		 * checking is needed.  We check this in both if clauses
723		 * here as unifying the check would require us to use a
724		 * critical section to ensure we don't migrate while doing
725		 * the check.  Note that if this is not the first lock, we
726		 * are already in a critical section and are safe for the
727		 * rest of the check.
728		 */
729		if (PCPU_GET(spinlocks) == NULL)
730			return;
731		lock_list = PCPU_PTR(spinlocks);
732	}
733
734	/*
735	 * Check to see if we are recursing on a lock we already own.  If
736	 * so, make sure that we don't mismatch exclusive and shared lock
737	 * acquires.
738	 */
739	lock1 = find_instance(*lock_list, lock);
740	if (lock1 != NULL) {
741		if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
742		    (flags & LOP_EXCLUSIVE) == 0) {
743			printf("shared lock of (%s) %s @ %s:%d\n",
744			    class->lc_name, lock->lo_name, file, line);
745			printf("while exclusively locked from %s:%d\n",
746			    lock1->li_file, lock1->li_line);
747			panic("share->excl");
748		}
749		if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
750		    (flags & LOP_EXCLUSIVE) != 0) {
751			printf("exclusive lock of (%s) %s @ %s:%d\n",
752			    class->lc_name, lock->lo_name, file, line);
753			printf("while share locked from %s:%d\n",
754			    lock1->li_file, lock1->li_line);
755			panic("excl->share");
756		}
757		return;
758	}
759
760	/*
761	 * Try locks do not block if they fail to acquire the lock, thus
762	 * there is no danger of deadlocks or of switching while holding a
763	 * spin lock if we acquire a lock via a try operation.
764	 */
765	if (flags & LOP_TRYLOCK)
766		return;
767
768	/*
769	 * Check for duplicate locks of the same type.  Note that we only
770	 * have to check for this on the last lock we just acquired.  Any
771	 * other cases will be caught as lock order violations.
772	 */
773	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
774	w1 = lock1->li_lock->lo_witness;
775	if (w1 == w) {
776		if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
777			return;
778		w->w_same_squawked = 1;
779		printf("acquiring duplicate lock of same type: \"%s\"\n",
780			lock->lo_type);
781		printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
782		    lock1->li_file, lock1->li_line);
783		printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
784#ifdef KDB
785		goto debugger;
786#else
787		return;
788#endif
789	}
790	MPASS(!mtx_owned(&w_mtx));
791	mtx_lock_spin(&w_mtx);
792	/*
793	 * If we have a known higher number just say ok
794	 */
795	if (witness_watch > 1 && w->w_level > w1->w_level) {
796		mtx_unlock_spin(&w_mtx);
797		return;
798	}
799	/*
800	 * If we know that the the lock we are acquiring comes after
801	 * the lock we most recently acquired in the lock order tree,
802	 * then there is no need for any further checks.
803	 */
804	if (isitmydescendant(w1, w)) {
805		mtx_unlock_spin(&w_mtx);
806		return;
807	}
808	for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
809		for (i = lle->ll_count - 1; i >= 0; i--, j++) {
810
811			MPASS(j < WITNESS_COUNT);
812			lock1 = &lle->ll_children[i];
813			w1 = lock1->li_lock->lo_witness;
814
815			/*
816			 * If this lock doesn't undergo witness checking,
817			 * then skip it.
818			 */
819			if (w1 == NULL) {
820				KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
821				    ("lock missing witness structure"));
822				continue;
823			}
824			/*
825			 * If we are locking Giant and this is a sleepable
826			 * lock, then skip it.
827			 */
828			if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
829			    lock == &Giant.mtx_object)
830				continue;
831			/*
832			 * If we are locking a sleepable lock and this lock
833			 * is Giant, then skip it.
834			 */
835			if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
836			    lock1->li_lock == &Giant.mtx_object)
837				continue;
838			/*
839			 * If we are locking a sleepable lock and this lock
840			 * isn't sleepable, we want to treat it as a lock
841			 * order violation to enfore a general lock order of
842			 * sleepable locks before non-sleepable locks.
843			 */
844			if (!((lock->lo_flags & LO_SLEEPABLE) != 0 &&
845			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
846			    /*
847			     * Check the lock order hierarchy for a reveresal.
848			     */
849			    if (!isitmydescendant(w, w1))
850				continue;
851			/*
852			 * We have a lock order violation, check to see if it
853			 * is allowed or has already been yelled about.
854			 */
855			mtx_unlock_spin(&w_mtx);
856#ifdef BLESSING
857			/*
858			 * If the lock order is blessed, just bail.  We don't
859			 * look for other lock order violations though, which
860			 * may be a bug.
861			 */
862			if (blessed(w, w1))
863				return;
864#endif
865			if (lock1->li_lock == &Giant.mtx_object) {
866				if (w1->w_Giant_squawked)
867					return;
868				else
869					w1->w_Giant_squawked = 1;
870			} else {
871				if (w1->w_other_squawked)
872					return;
873				else
874					w1->w_other_squawked = 1;
875			}
876			/*
877			 * Ok, yell about it.
878			 */
879			printf("lock order reversal\n");
880			/*
881			 * Try to locate an earlier lock with
882			 * witness w in our list.
883			 */
884			do {
885				lock2 = &lle->ll_children[i];
886				MPASS(lock2->li_lock != NULL);
887				if (lock2->li_lock->lo_witness == w)
888					break;
889				if (i == 0 && lle->ll_next != NULL) {
890					lle = lle->ll_next;
891					i = lle->ll_count - 1;
892					MPASS(i >= 0 && i < LOCK_NCHILDREN);
893				} else
894					i--;
895			} while (i >= 0);
896			if (i < 0) {
897				printf(" 1st %p %s (%s) @ %s:%d\n",
898				    lock1->li_lock, lock1->li_lock->lo_name,
899				    lock1->li_lock->lo_type, lock1->li_file,
900				    lock1->li_line);
901				printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
902				    lock->lo_name, lock->lo_type, file, line);
903			} else {
904				printf(" 1st %p %s (%s) @ %s:%d\n",
905				    lock2->li_lock, lock2->li_lock->lo_name,
906				    lock2->li_lock->lo_type, lock2->li_file,
907				    lock2->li_line);
908				printf(" 2nd %p %s (%s) @ %s:%d\n",
909				    lock1->li_lock, lock1->li_lock->lo_name,
910				    lock1->li_lock->lo_type, lock1->li_file,
911				    lock1->li_line);
912				printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
913				    lock->lo_name, lock->lo_type, file, line);
914			}
915#ifdef KDB
916			goto debugger;
917#else
918			return;
919#endif
920		}
921	}
922	lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
923	/*
924	 * If requested, build a new lock order.  However, don't build a new
925	 * relationship between a sleepable lock and Giant if it is in the
926	 * wrong direction.  The correct lock order is that sleepable locks
927	 * always come before Giant.
928	 */
929	if (flags & LOP_NEWORDER &&
930	    !(lock1->li_lock == &Giant.mtx_object &&
931	    (lock->lo_flags & LO_SLEEPABLE) != 0)) {
932		CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
933		    lock->lo_type, lock1->li_lock->lo_type);
934		if (!itismychild(lock1->li_lock->lo_witness, w))
935			/* Witness is dead. */
936			return;
937	}
938	mtx_unlock_spin(&w_mtx);
939	return;
940
941#ifdef KDB
942debugger:
943	if (witness_trace)
944		kdb_backtrace();
945	if (witness_kdb)
946		kdb_enter(__func__);
947#endif
948}
949
950void
951witness_lock(struct lock_object *lock, int flags, const char *file, int line)
952{
953	struct lock_list_entry **lock_list, *lle;
954	struct lock_instance *instance;
955	struct witness *w;
956	struct thread *td;
957
958	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
959	    panicstr != NULL)
960		return;
961	w = lock->lo_witness;
962	td = curthread;
963	file = fixup_filename(file);
964
965	/* Determine lock list for this lock. */
966	if (lock->lo_class->lc_flags & LC_SLEEPLOCK)
967		lock_list = &td->td_sleeplocks;
968	else
969		lock_list = PCPU_PTR(spinlocks);
970
971	/* Check to see if we are recursing on a lock we already own. */
972	instance = find_instance(*lock_list, lock);
973	if (instance != NULL) {
974		instance->li_flags++;
975		CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
976		    td->td_proc->p_pid, lock->lo_name,
977		    instance->li_flags & LI_RECURSEMASK);
978		instance->li_file = file;
979		instance->li_line = line;
980		return;
981	}
982
983	/* Update per-witness last file and line acquire. */
984	w->w_file = file;
985	w->w_line = line;
986
987	/* Find the next open lock instance in the list and fill it. */
988	lle = *lock_list;
989	if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
990		lle = witness_lock_list_get();
991		if (lle == NULL)
992			return;
993		lle->ll_next = *lock_list;
994		CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
995		    td->td_proc->p_pid, lle);
996		*lock_list = lle;
997	}
998	instance = &lle->ll_children[lle->ll_count++];
999	instance->li_lock = lock;
1000	instance->li_line = line;
1001	instance->li_file = file;
1002	if ((flags & LOP_EXCLUSIVE) != 0)
1003		instance->li_flags = LI_EXCLUSIVE;
1004	else
1005		instance->li_flags = 0;
1006	CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1007	    td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1008}
1009
1010void
1011witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1012{
1013	struct lock_instance *instance;
1014	struct lock_class *class;
1015
1016	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1017	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1018		return;
1019	class = lock->lo_class;
1020	file = fixup_filename(file);
1021	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1022		panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1023		    class->lc_name, lock->lo_name, file, line);
1024	if ((flags & LOP_TRYLOCK) == 0)
1025		panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
1026		    lock->lo_name, file, line);
1027	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1028		panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1029		    class->lc_name, lock->lo_name, file, line);
1030	instance = find_instance(curthread->td_sleeplocks, lock);
1031	if (instance == NULL)
1032		panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1033		    class->lc_name, lock->lo_name, file, line);
1034	if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1035		panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1036		    class->lc_name, lock->lo_name, file, line);
1037	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1038		panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1039		    class->lc_name, lock->lo_name,
1040		    instance->li_flags & LI_RECURSEMASK, file, line);
1041	instance->li_flags |= LI_EXCLUSIVE;
1042}
1043
1044void
1045witness_downgrade(struct lock_object *lock, int flags, const char *file,
1046    int line)
1047{
1048	struct lock_instance *instance;
1049	struct lock_class *class;
1050
1051	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1052	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1053		return;
1054	class = lock->lo_class;
1055	file = fixup_filename(file);
1056	if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1057		panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1058		    class->lc_name, lock->lo_name, file, line);
1059	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1060		panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1061		    class->lc_name, lock->lo_name, file, line);
1062	instance = find_instance(curthread->td_sleeplocks, lock);
1063	if (instance == NULL)
1064		panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1065		    class->lc_name, lock->lo_name, file, line);
1066	if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1067		panic("downgrade of shared lock (%s) %s @ %s:%d",
1068		    class->lc_name, lock->lo_name, file, line);
1069	if ((instance->li_flags & LI_RECURSEMASK) != 0)
1070		panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1071		    class->lc_name, lock->lo_name,
1072		    instance->li_flags & LI_RECURSEMASK, file, line);
1073	instance->li_flags &= ~LI_EXCLUSIVE;
1074}
1075
1076void
1077witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1078{
1079	struct lock_list_entry **lock_list, *lle;
1080	struct lock_instance *instance;
1081	struct lock_class *class;
1082	struct thread *td;
1083	register_t s;
1084	int i, j;
1085
1086	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
1087	    panicstr != NULL)
1088		return;
1089	td = curthread;
1090	class = lock->lo_class;
1091	file = fixup_filename(file);
1092
1093	/* Find lock instance associated with this lock. */
1094	if (class->lc_flags & LC_SLEEPLOCK)
1095		lock_list = &td->td_sleeplocks;
1096	else
1097		lock_list = PCPU_PTR(spinlocks);
1098	for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1099		for (i = 0; i < (*lock_list)->ll_count; i++) {
1100			instance = &(*lock_list)->ll_children[i];
1101			if (instance->li_lock == lock)
1102				goto found;
1103		}
1104	panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1105	    file, line);
1106found:
1107
1108	/* First, check for shared/exclusive mismatches. */
1109	if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
1110	    (flags & LOP_EXCLUSIVE) == 0) {
1111		printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1112		    lock->lo_name, file, line);
1113		printf("while exclusively locked from %s:%d\n",
1114		    instance->li_file, instance->li_line);
1115		panic("excl->ushare");
1116	}
1117	if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
1118	    (flags & LOP_EXCLUSIVE) != 0) {
1119		printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1120		    lock->lo_name, file, line);
1121		printf("while share locked from %s:%d\n", instance->li_file,
1122		    instance->li_line);
1123		panic("share->uexcl");
1124	}
1125
1126	/* If we are recursed, unrecurse. */
1127	if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1128		CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1129		    td->td_proc->p_pid, instance->li_lock->lo_name,
1130		    instance->li_flags);
1131		instance->li_flags--;
1132		return;
1133	}
1134
1135	/* Otherwise, remove this item from the list. */
1136	s = intr_disable();
1137	CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1138	    td->td_proc->p_pid, instance->li_lock->lo_name,
1139	    (*lock_list)->ll_count - 1);
1140	for (j = i; j < (*lock_list)->ll_count - 1; j++)
1141		(*lock_list)->ll_children[j] =
1142		    (*lock_list)->ll_children[j + 1];
1143	(*lock_list)->ll_count--;
1144	intr_restore(s);
1145
1146	/* If this lock list entry is now empty, free it. */
1147	if ((*lock_list)->ll_count == 0) {
1148		lle = *lock_list;
1149		*lock_list = lle->ll_next;
1150		CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1151		    td->td_proc->p_pid, lle);
1152		witness_lock_list_free(lle);
1153	}
1154}
1155
1156/*
1157 * Warn if any locks other than 'lock' are held.  Flags can be passed in to
1158 * exempt Giant and sleepable locks from the checks as well.  If any
1159 * non-exempt locks are held, then a supplied message is printed to the
1160 * console along with a list of the offending locks.  If indicated in the
1161 * flags then a failure results in a panic as well.
1162 */
1163int
1164witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1165{
1166	struct lock_list_entry *lle;
1167	struct lock_instance *lock1;
1168	struct thread *td;
1169	va_list ap;
1170	int i, n;
1171
1172	if (witness_cold || witness_watch == 0 || panicstr != NULL)
1173		return (0);
1174	n = 0;
1175	td = curthread;
1176	for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1177		for (i = lle->ll_count - 1; i >= 0; i--) {
1178			lock1 = &lle->ll_children[i];
1179			if (lock1->li_lock == lock)
1180				continue;
1181			if (flags & WARN_GIANTOK &&
1182			    lock1->li_lock == &Giant.mtx_object)
1183				continue;
1184			if (flags & WARN_SLEEPOK &&
1185			    (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1186				continue;
1187			if (n == 0) {
1188				va_start(ap, fmt);
1189				vprintf(fmt, ap);
1190				va_end(ap);
1191				printf(" with the following");
1192				if (flags & WARN_SLEEPOK)
1193					printf(" non-sleepable");
1194				printf(" locks held:\n");
1195			}
1196			n++;
1197			witness_list_lock(lock1);
1198		}
1199	if (PCPU_GET(spinlocks) != NULL) {
1200		/*
1201		 * Since we already hold a spinlock preemption is
1202		 * already blocked.
1203		 */
1204		if (n == 0) {
1205			va_start(ap, fmt);
1206			vprintf(fmt, ap);
1207			va_end(ap);
1208			printf(" with the following");
1209			if (flags & WARN_SLEEPOK)
1210				printf(" non-sleepable");
1211			printf(" locks held:\n");
1212		}
1213		n += witness_list_locks(PCPU_PTR(spinlocks));
1214	}
1215	if (flags & WARN_PANIC && n)
1216		panic("witness_warn");
1217#ifdef KDB
1218	else if (witness_kdb && n)
1219		kdb_enter(__func__);
1220	else if (witness_trace && n)
1221		kdb_backtrace();
1222#endif
1223	return (n);
1224}
1225
1226const char *
1227witness_file(struct lock_object *lock)
1228{
1229	struct witness *w;
1230
1231	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1232		return ("?");
1233	w = lock->lo_witness;
1234	return (w->w_file);
1235}
1236
1237int
1238witness_line(struct lock_object *lock)
1239{
1240	struct witness *w;
1241
1242	if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1243		return (0);
1244	w = lock->lo_witness;
1245	return (w->w_line);
1246}
1247
1248static struct witness *
1249enroll(const char *description, struct lock_class *lock_class)
1250{
1251	struct witness *w;
1252
1253	if (witness_watch == 0 || panicstr != NULL)
1254		return (NULL);
1255	if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1256		return (NULL);
1257	mtx_lock_spin(&w_mtx);
1258	STAILQ_FOREACH(w, &w_all, w_list) {
1259		if (w->w_name == description || (w->w_refcount > 0 &&
1260		    strcmp(description, w->w_name) == 0)) {
1261			w->w_refcount++;
1262			mtx_unlock_spin(&w_mtx);
1263			if (lock_class != w->w_class)
1264				panic(
1265				"lock (%s) %s does not match earlier (%s) lock",
1266				    description, lock_class->lc_name,
1267				    w->w_class->lc_name);
1268			return (w);
1269		}
1270	}
1271	/*
1272	 * This isn't quite right, as witness_cold is still 0 while we
1273	 * enroll all the locks initialized before witness_initialize().
1274	 */
1275	if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) {
1276		mtx_unlock_spin(&w_mtx);
1277		panic("spin lock %s not in order list", description);
1278	}
1279	if ((w = witness_get()) == NULL)
1280		return (NULL);
1281	w->w_name = description;
1282	w->w_class = lock_class;
1283	w->w_refcount = 1;
1284	STAILQ_INSERT_HEAD(&w_all, w, w_list);
1285	if (lock_class->lc_flags & LC_SPINLOCK)
1286		STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1287	else if (lock_class->lc_flags & LC_SLEEPLOCK)
1288		STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1289	else {
1290		mtx_unlock_spin(&w_mtx);
1291		panic("lock class %s is not sleep or spin",
1292		    lock_class->lc_name);
1293	}
1294	mtx_unlock_spin(&w_mtx);
1295	return (w);
1296}
1297
1298/* Don't let the door bang you on the way out... */
1299static int
1300depart(struct witness *w)
1301{
1302	struct witness_child_list_entry *wcl, *nwcl;
1303	struct witness_list *list;
1304	struct witness *parent;
1305
1306	MPASS(w->w_refcount == 0);
1307	if (w->w_class->lc_flags & LC_SLEEPLOCK)
1308		list = &w_sleep;
1309	else
1310		list = &w_spin;
1311	/*
1312	 * First, we run through the entire tree looking for any
1313	 * witnesses that the outgoing witness is a child of.  For
1314	 * each parent that we find, we reparent all the direct
1315	 * children of the outgoing witness to its parent.
1316	 */
1317	STAILQ_FOREACH(parent, list, w_typelist) {
1318		if (!isitmychild(parent, w))
1319			continue;
1320		removechild(parent, w);
1321		if (!reparentchildren(parent, w))
1322			return (0);
1323	}
1324
1325	/*
1326	 * Now we go through and free up the child list of the
1327	 * outgoing witness.
1328	 */
1329	for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1330		nwcl = wcl->wcl_next;
1331		witness_child_free(wcl);
1332	}
1333
1334	/*
1335	 * Detach from various lists and free.
1336	 */
1337	STAILQ_REMOVE(list, w, witness, w_typelist);
1338	STAILQ_REMOVE(&w_all, w, witness, w_list);
1339	witness_free(w);
1340
1341	/* Finally, fixup the tree. */
1342	return (rebalancetree(list));
1343}
1344
1345/*
1346 * Prune an entire lock order tree.  We look for cases where a lock
1347 * is now both a descendant and a direct child of a given lock.  In
1348 * that case, we want to remove the direct child link from the tree.
1349 *
1350 * Returns false if insertchild() fails.
1351 */
1352static int
1353rebalancetree(struct witness_list *list)
1354{
1355	struct witness *child, *parent;
1356
1357	STAILQ_FOREACH(child, list, w_typelist) {
1358		STAILQ_FOREACH(parent, list, w_typelist) {
1359			if (!isitmychild(parent, child))
1360				continue;
1361			removechild(parent, child);
1362			if (isitmydescendant(parent, child))
1363				continue;
1364			if (!insertchild(parent, child))
1365				return (0);
1366		}
1367	}
1368	witness_levelall();
1369	return (1);
1370}
1371
1372/*
1373 * Add "child" as a direct child of "parent".  Returns false if
1374 * we fail due to out of memory.
1375 */
1376static int
1377insertchild(struct witness *parent, struct witness *child)
1378{
1379	struct witness_child_list_entry **wcl;
1380
1381	MPASS(child != NULL && parent != NULL);
1382
1383	/*
1384	 * Insert "child" after "parent"
1385	 */
1386	wcl = &parent->w_children;
1387	while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1388		wcl = &(*wcl)->wcl_next;
1389	if (*wcl == NULL) {
1390		*wcl = witness_child_get();
1391		if (*wcl == NULL)
1392			return (0);
1393	}
1394	(*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1395
1396	return (1);
1397}
1398
1399/*
1400 * Make all the direct descendants of oldparent be direct descendants
1401 * of newparent.
1402 */
1403static int
1404reparentchildren(struct witness *newparent, struct witness *oldparent)
1405{
1406	struct witness_child_list_entry *wcl;
1407	int i;
1408
1409	/* Avoid making a witness a child of itself. */
1410	MPASS(!isitmychild(oldparent, newparent));
1411
1412	for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1413		for (i = 0; i < wcl->wcl_count; i++)
1414			if (!insertchild(newparent, wcl->wcl_children[i]))
1415				return (0);
1416	return (1);
1417}
1418
1419static int
1420itismychild(struct witness *parent, struct witness *child)
1421{
1422	struct witness_list *list;
1423
1424	MPASS(child != NULL && parent != NULL);
1425	if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1426	    (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1427		panic(
1428		"%s: parent (%s) and child (%s) are not the same lock type",
1429		    __func__, parent->w_class->lc_name,
1430		    child->w_class->lc_name);
1431
1432	if (!insertchild(parent, child))
1433		return (0);
1434
1435	if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1436		list = &w_sleep;
1437	else
1438		list = &w_spin;
1439	return (rebalancetree(list));
1440}
1441
1442static void
1443removechild(struct witness *parent, struct witness *child)
1444{
1445	struct witness_child_list_entry **wcl, *wcl1;
1446	int i;
1447
1448	for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1449		for (i = 0; i < (*wcl)->wcl_count; i++)
1450			if ((*wcl)->wcl_children[i] == child)
1451				goto found;
1452	return;
1453found:
1454	(*wcl)->wcl_count--;
1455	if ((*wcl)->wcl_count > i)
1456		(*wcl)->wcl_children[i] =
1457		    (*wcl)->wcl_children[(*wcl)->wcl_count];
1458	MPASS((*wcl)->wcl_children[i] != NULL);
1459	if ((*wcl)->wcl_count != 0)
1460		return;
1461	wcl1 = *wcl;
1462	*wcl = wcl1->wcl_next;
1463	witness_child_free(wcl1);
1464}
1465
1466static int
1467isitmychild(struct witness *parent, struct witness *child)
1468{
1469	struct witness_child_list_entry *wcl;
1470	int i;
1471
1472	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1473		for (i = 0; i < wcl->wcl_count; i++) {
1474			if (wcl->wcl_children[i] == child)
1475				return (1);
1476		}
1477	}
1478	return (0);
1479}
1480
1481static int
1482isitmydescendant(struct witness *parent, struct witness *child)
1483{
1484	struct witness_child_list_entry *wcl;
1485	int i, j;
1486
1487	if (isitmychild(parent, child))
1488		return (1);
1489	j = 0;
1490	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1491		MPASS(j < 1000);
1492		for (i = 0; i < wcl->wcl_count; i++) {
1493			if (isitmydescendant(wcl->wcl_children[i], child))
1494				return (1);
1495		}
1496		j++;
1497	}
1498	return (0);
1499}
1500
1501static void
1502witness_levelall (void)
1503{
1504	struct witness_list *list;
1505	struct witness *w, *w1;
1506
1507	/*
1508	 * First clear all levels.
1509	 */
1510	STAILQ_FOREACH(w, &w_all, w_list) {
1511		w->w_level = 0;
1512	}
1513
1514	/*
1515	 * Look for locks with no parent and level all their descendants.
1516	 */
1517	STAILQ_FOREACH(w, &w_all, w_list) {
1518		/*
1519		 * This is just an optimization, technically we could get
1520		 * away just walking the all list each time.
1521		 */
1522		if (w->w_class->lc_flags & LC_SLEEPLOCK)
1523			list = &w_sleep;
1524		else
1525			list = &w_spin;
1526		STAILQ_FOREACH(w1, list, w_typelist) {
1527			if (isitmychild(w1, w))
1528				goto skip;
1529		}
1530		witness_leveldescendents(w, 0);
1531	skip:
1532		;	/* silence GCC 3.x */
1533	}
1534}
1535
1536static void
1537witness_leveldescendents(struct witness *parent, int level)
1538{
1539	struct witness_child_list_entry *wcl;
1540	int i;
1541
1542	if (parent->w_level < level)
1543		parent->w_level = level;
1544	level++;
1545	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1546		for (i = 0; i < wcl->wcl_count; i++)
1547			witness_leveldescendents(wcl->wcl_children[i], level);
1548}
1549
1550static void
1551witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1552			   struct witness *parent, int indent)
1553{
1554	struct witness_child_list_entry *wcl;
1555	int i, level;
1556
1557	level = parent->w_level;
1558	prnt("%-2d", level);
1559	for (i = 0; i < indent; i++)
1560		prnt(" ");
1561	if (parent->w_refcount > 0)
1562		prnt("%s", parent->w_name);
1563	else
1564		prnt("(dead)");
1565	if (parent->w_displayed) {
1566		prnt(" -- (already displayed)\n");
1567		return;
1568	}
1569	parent->w_displayed = 1;
1570	if (parent->w_refcount > 0) {
1571		if (parent->w_file != NULL)
1572			prnt(" -- last acquired @ %s:%d", parent->w_file,
1573			    parent->w_line);
1574	}
1575	prnt("\n");
1576	for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1577		for (i = 0; i < wcl->wcl_count; i++)
1578			    witness_displaydescendants(prnt,
1579				wcl->wcl_children[i], indent + 1);
1580}
1581
1582#ifdef BLESSING
1583static int
1584blessed(struct witness *w1, struct witness *w2)
1585{
1586	int i;
1587	struct witness_blessed *b;
1588
1589	for (i = 0; i < blessed_count; i++) {
1590		b = &blessed_list[i];
1591		if (strcmp(w1->w_name, b->b_lock1) == 0) {
1592			if (strcmp(w2->w_name, b->b_lock2) == 0)
1593				return (1);
1594			continue;
1595		}
1596		if (strcmp(w1->w_name, b->b_lock2) == 0)
1597			if (strcmp(w2->w_name, b->b_lock1) == 0)
1598				return (1);
1599	}
1600	return (0);
1601}
1602#endif
1603
1604static struct witness *
1605witness_get(void)
1606{
1607	struct witness *w;
1608
1609	if (witness_watch == 0) {
1610		mtx_unlock_spin(&w_mtx);
1611		return (NULL);
1612	}
1613	if (STAILQ_EMPTY(&w_free)) {
1614		witness_watch = 0;
1615		mtx_unlock_spin(&w_mtx);
1616		printf("%s: witness exhausted\n", __func__);
1617		return (NULL);
1618	}
1619	w = STAILQ_FIRST(&w_free);
1620	STAILQ_REMOVE_HEAD(&w_free, w_list);
1621	bzero(w, sizeof(*w));
1622	return (w);
1623}
1624
1625static void
1626witness_free(struct witness *w)
1627{
1628
1629	STAILQ_INSERT_HEAD(&w_free, w, w_list);
1630}
1631
1632static struct witness_child_list_entry *
1633witness_child_get(void)
1634{
1635	struct witness_child_list_entry *wcl;
1636
1637	if (witness_watch == 0) {
1638		mtx_unlock_spin(&w_mtx);
1639		return (NULL);
1640	}
1641	wcl = w_child_free;
1642	if (wcl == NULL) {
1643		witness_watch = 0;
1644		mtx_unlock_spin(&w_mtx);
1645		printf("%s: witness exhausted\n", __func__);
1646		return (NULL);
1647	}
1648	w_child_free = wcl->wcl_next;
1649	bzero(wcl, sizeof(*wcl));
1650	return (wcl);
1651}
1652
1653static void
1654witness_child_free(struct witness_child_list_entry *wcl)
1655{
1656
1657	wcl->wcl_next = w_child_free;
1658	w_child_free = wcl;
1659}
1660
1661static struct lock_list_entry *
1662witness_lock_list_get(void)
1663{
1664	struct lock_list_entry *lle;
1665
1666	if (witness_watch == 0)
1667		return (NULL);
1668	mtx_lock_spin(&w_mtx);
1669	lle = w_lock_list_free;
1670	if (lle == NULL) {
1671		witness_watch = 0;
1672		mtx_unlock_spin(&w_mtx);
1673		printf("%s: witness exhausted\n", __func__);
1674		return (NULL);
1675	}
1676	w_lock_list_free = lle->ll_next;
1677	mtx_unlock_spin(&w_mtx);
1678	bzero(lle, sizeof(*lle));
1679	return (lle);
1680}
1681
1682static void
1683witness_lock_list_free(struct lock_list_entry *lle)
1684{
1685
1686	mtx_lock_spin(&w_mtx);
1687	lle->ll_next = w_lock_list_free;
1688	w_lock_list_free = lle;
1689	mtx_unlock_spin(&w_mtx);
1690}
1691
1692static struct lock_instance *
1693find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1694{
1695	struct lock_list_entry *lle;
1696	struct lock_instance *instance;
1697	int i;
1698
1699	for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1700		for (i = lle->ll_count - 1; i >= 0; i--) {
1701			instance = &lle->ll_children[i];
1702			if (instance->li_lock == lock)
1703				return (instance);
1704		}
1705	return (NULL);
1706}
1707
1708static void
1709witness_list_lock(struct lock_instance *instance)
1710{
1711	struct lock_object *lock;
1712
1713	lock = instance->li_lock;
1714	printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1715	    "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
1716	if (lock->lo_type != lock->lo_name)
1717		printf(" (%s)", lock->lo_type);
1718	printf(" r = %d (%p) locked @ %s:%d\n",
1719	    instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1720	    instance->li_line);
1721}
1722
1723#ifdef DDB
1724static int
1725witness_thread_has_locks(struct thread *td)
1726{
1727
1728	return (td->td_sleeplocks != NULL);
1729}
1730
1731static int
1732witness_proc_has_locks(struct proc *p)
1733{
1734	struct thread *td;
1735
1736	FOREACH_THREAD_IN_PROC(p, td) {
1737		if (witness_thread_has_locks(td))
1738			return (1);
1739	}
1740	return (0);
1741}
1742#endif
1743
1744int
1745witness_list_locks(struct lock_list_entry **lock_list)
1746{
1747	struct lock_list_entry *lle;
1748	int i, nheld;
1749
1750	nheld = 0;
1751	for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1752		for (i = lle->ll_count - 1; i >= 0; i--) {
1753			witness_list_lock(&lle->ll_children[i]);
1754			nheld++;
1755		}
1756	return (nheld);
1757}
1758
1759/*
1760 * This is a bit risky at best.  We call this function when we have timed
1761 * out acquiring a spin lock, and we assume that the other CPU is stuck
1762 * with this lock held.  So, we go groveling around in the other CPU's
1763 * per-cpu data to try to find the lock instance for this spin lock to
1764 * see when it was last acquired.
1765 */
1766void
1767witness_display_spinlock(struct lock_object *lock, struct thread *owner)
1768{
1769	struct lock_instance *instance;
1770	struct pcpu *pc;
1771
1772	if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
1773		return;
1774	pc = pcpu_find(owner->td_oncpu);
1775	instance = find_instance(pc->pc_spinlocks, lock);
1776	if (instance != NULL)
1777		witness_list_lock(instance);
1778}
1779
1780void
1781witness_save(struct lock_object *lock, const char **filep, int *linep)
1782{
1783	struct lock_instance *instance;
1784
1785	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1786	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1787		return;
1788	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1789		panic("%s: lock (%s) %s is not a sleep lock", __func__,
1790		    lock->lo_class->lc_name, lock->lo_name);
1791	instance = find_instance(curthread->td_sleeplocks, lock);
1792	if (instance == NULL)
1793		panic("%s: lock (%s) %s not locked", __func__,
1794		    lock->lo_class->lc_name, lock->lo_name);
1795	*filep = instance->li_file;
1796	*linep = instance->li_line;
1797}
1798
1799void
1800witness_restore(struct lock_object *lock, const char *file, int line)
1801{
1802	struct lock_instance *instance;
1803
1804	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1805	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1806		return;
1807	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1808		panic("%s: lock (%s) %s is not a sleep lock", __func__,
1809		    lock->lo_class->lc_name, lock->lo_name);
1810	instance = find_instance(curthread->td_sleeplocks, lock);
1811	if (instance == NULL)
1812		panic("%s: lock (%s) %s not locked", __func__,
1813		    lock->lo_class->lc_name, lock->lo_name);
1814	lock->lo_witness->w_file = file;
1815	lock->lo_witness->w_line = line;
1816	instance->li_file = file;
1817	instance->li_line = line;
1818}
1819
1820void
1821witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1822{
1823#ifdef INVARIANT_SUPPORT
1824	struct lock_instance *instance;
1825
1826	if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1827		return;
1828	if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1829		instance = find_instance(curthread->td_sleeplocks, lock);
1830	else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1831		instance = find_instance(PCPU_GET(spinlocks), lock);
1832	else {
1833		panic("Lock (%s) %s is not sleep or spin!",
1834		    lock->lo_class->lc_name, lock->lo_name);
1835	}
1836	file = fixup_filename(file);
1837	switch (flags) {
1838	case LA_UNLOCKED:
1839		if (instance != NULL)
1840			panic("Lock (%s) %s locked @ %s:%d.",
1841			    lock->lo_class->lc_name, lock->lo_name, file, line);
1842		break;
1843	case LA_LOCKED:
1844	case LA_LOCKED | LA_RECURSED:
1845	case LA_LOCKED | LA_NOTRECURSED:
1846	case LA_SLOCKED:
1847	case LA_SLOCKED | LA_RECURSED:
1848	case LA_SLOCKED | LA_NOTRECURSED:
1849	case LA_XLOCKED:
1850	case LA_XLOCKED | LA_RECURSED:
1851	case LA_XLOCKED | LA_NOTRECURSED:
1852		if (instance == NULL) {
1853			panic("Lock (%s) %s not locked @ %s:%d.",
1854			    lock->lo_class->lc_name, lock->lo_name, file, line);
1855			break;
1856		}
1857		if ((flags & LA_XLOCKED) != 0 &&
1858		    (instance->li_flags & LI_EXCLUSIVE) == 0)
1859			panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1860			    lock->lo_class->lc_name, lock->lo_name, file, line);
1861		if ((flags & LA_SLOCKED) != 0 &&
1862		    (instance->li_flags & LI_EXCLUSIVE) != 0)
1863			panic("Lock (%s) %s exclusively locked @ %s:%d.",
1864			    lock->lo_class->lc_name, lock->lo_name, file, line);
1865		if ((flags & LA_RECURSED) != 0 &&
1866		    (instance->li_flags & LI_RECURSEMASK) == 0)
1867			panic("Lock (%s) %s not recursed @ %s:%d.",
1868			    lock->lo_class->lc_name, lock->lo_name, file, line);
1869		if ((flags & LA_NOTRECURSED) != 0 &&
1870		    (instance->li_flags & LI_RECURSEMASK) != 0)
1871			panic("Lock (%s) %s recursed @ %s:%d.",
1872			    lock->lo_class->lc_name, lock->lo_name, file, line);
1873		break;
1874	default:
1875		panic("Invalid lock assertion at %s:%d.", file, line);
1876
1877	}
1878#endif	/* INVARIANT_SUPPORT */
1879}
1880
1881#ifdef DDB
1882static void
1883witness_list(struct thread *td)
1884{
1885
1886	KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1887	KASSERT(kdb_active, ("%s: not in the debugger", __func__));
1888
1889	if (witness_watch == 0)
1890		return;
1891
1892	witness_list_locks(&td->td_sleeplocks);
1893
1894	/*
1895	 * We only handle spinlocks if td == curthread.  This is somewhat broken
1896	 * if td is currently executing on some other CPU and holds spin locks
1897	 * as we won't display those locks.  If we had a MI way of getting
1898	 * the per-cpu data for a given cpu then we could use
1899	 * td->td_oncpu to get the list of spinlocks for this thread
1900	 * and "fix" this.
1901	 *
1902	 * That still wouldn't really fix this unless we locked sched_lock
1903	 * or stopped the other CPU to make sure it wasn't changing the list
1904	 * out from under us.  It is probably best to just not try to handle
1905	 * threads on other CPU's for now.
1906	 */
1907	if (td == curthread && PCPU_GET(spinlocks) != NULL)
1908		witness_list_locks(PCPU_PTR(spinlocks));
1909}
1910
1911DB_SHOW_COMMAND(locks, db_witness_list)
1912{
1913	struct thread *td;
1914	pid_t pid;
1915	struct proc *p;
1916
1917	if (have_addr) {
1918		pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1919		    ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1920		    ((addr >> 16) % 16) * 10000;
1921		/* sx_slock(&allproc_lock); */
1922		FOREACH_PROC_IN_SYSTEM(p) {
1923			if (p->p_pid == pid)
1924				break;
1925		}
1926		/* sx_sunlock(&allproc_lock); */
1927		if (p == NULL) {
1928			db_printf("pid %d not found\n", pid);
1929			return;
1930		}
1931		FOREACH_THREAD_IN_PROC(p, td) {
1932			witness_list(td);
1933		}
1934	} else {
1935		td = curthread;
1936		witness_list(td);
1937	}
1938}
1939
1940DB_SHOW_COMMAND(alllocks, db_witness_list_all)
1941{
1942	struct thread *td;
1943	struct proc *p;
1944
1945	/*
1946	 * It would be nice to list only threads and processes that actually
1947	 * held sleep locks, but that information is currently not exported
1948	 * by WITNESS.
1949	 */
1950	FOREACH_PROC_IN_SYSTEM(p) {
1951		if (!witness_proc_has_locks(p))
1952			continue;
1953		FOREACH_THREAD_IN_PROC(p, td) {
1954			if (!witness_thread_has_locks(td))
1955				continue;
1956			printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
1957			    p->p_comm, td, td->td_tid);
1958			witness_list(td);
1959		}
1960	}
1961}
1962
1963DB_SHOW_COMMAND(witness, db_witness_display)
1964{
1965
1966	witness_display(db_printf);
1967}
1968#endif
1969