Deleted Added
full compact
subr_witness.c (189544) subr_witness.c (191672)
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 * promote products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier. Originally implemented for
37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 * Main Entry: witness
43 * Pronunciation: 'wit-n&s
44 * Function: noun
45 * Etymology: Middle English witnesse, from Old English witnes knowledge,
46 * testimony, witness, from 2wit
47 * Date: before 12th century
48 * 1 : attestation of a fact or event : TESTIMONY
49 * 2 : one that gives evidence; specifically : one who testifies in
50 * a cause or before a judicial tribunal
51 * 3 : one asked to be present at a transaction so as to be able to
52 * testify to its having taken place
53 * 4 : one who has personal knowledge of something
54 * 5 a : something serving as evidence or proof : SIGN
55 * b : public affirmation by word or example of usually
56 * religious faith or conviction <the heroic witness to divine
57 * life -- Pilot>
58 * 6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes. Stated another way,
65 * no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl(). Basically, when a thread sleeps, it must release
71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious. Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock. The second
79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute. Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Berkeley Software Design Inc's name may not be used to endorse or
16 * promote products derived from this software without specific prior
17 * written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
32 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 */
34
35/*
36 * Implementation of the `witness' lock verifier. Originally implemented for
37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
38 * classes in FreeBSD.
39 */
40
41/*
42 * Main Entry: witness
43 * Pronunciation: 'wit-n&s
44 * Function: noun
45 * Etymology: Middle English witnesse, from Old English witnes knowledge,
46 * testimony, witness, from 2wit
47 * Date: before 12th century
48 * 1 : attestation of a fact or event : TESTIMONY
49 * 2 : one that gives evidence; specifically : one who testifies in
50 * a cause or before a judicial tribunal
51 * 3 : one asked to be present at a transaction so as to be able to
52 * testify to its having taken place
53 * 4 : one who has personal knowledge of something
54 * 5 a : something serving as evidence or proof : SIGN
55 * b : public affirmation by word or example of usually
56 * religious faith or conviction <the heroic witness to divine
57 * life -- Pilot>
58 * 6 capitalized : a member of the Jehovah's Witnesses
59 */
60
61/*
62 * Special rules concerning Giant and lock orders:
63 *
64 * 1) Giant must be acquired before any other mutexes. Stated another way,
65 * no other mutex may be held when Giant is acquired.
66 *
67 * 2) Giant must be released when blocking on a sleepable lock.
68 *
69 * This rule is less obvious, but is a result of Giant providing the same
70 * semantics as spl(). Basically, when a thread sleeps, it must release
71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
72 * 2).
73 *
74 * 3) Giant may be acquired before or after sleepable locks.
75 *
76 * This rule is also not quite as obvious. Giant may be acquired after
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock. The second
79 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute. Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 189544 2009-03-08 21:48:29Z rwatson $");
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 191672 2009-04-29 19:19:13Z bms $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define KTR_WITNESS KTR_SUBSYS
124#else
125#define KTR_WITNESS 0
126#endif
127
128#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
129#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
130#define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#define WITNESS_COUNT 1024
136#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
137#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
138#define WITNESS_PENDLIST 512
139
140/* Allocate 256 KB of stack data space */
141#define WITNESS_LO_DATA_COUNT 2048
142
143/* Prime, gives load factor of ~2 at full load */
144#define WITNESS_LO_HASH_SIZE 1021
145
146/*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151#define LOCK_NCHILDREN 5
152#define LOCK_CHILDCOUNT 2048
153
154#define MAX_W_NAME 64
155
156#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT)
157#define CYCLEGRAPH_SBUF_SIZE 8192
158#define FULLGRAPH_SBUF_SIZE 32768
159
160/*
161 * These flags go in the witness relationship matrix and describe the
162 * relationship between any two struct witness objects.
163 */
164#define WITNESS_UNRELATED 0x00 /* No lock order relation. */
165#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
166#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
167#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
168#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
169#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
170#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
171#define WITNESS_RELATED_MASK \
172 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
174 * observed. */
175#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
176#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
177#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
178
179/* Descendant to ancestor flags */
180#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
181
182/* Ancestor to descendant flags */
183#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
184
185#define WITNESS_INDEX_ASSERT(i) \
186 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
187
188MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
189
190/*
191 * Lock instances. A lock instance is the data associated with a lock while
192 * it is held by witness. For example, a lock instance will hold the
193 * recursion count of a lock. Lock instances are held in lists. Spin locks
194 * are held in a per-cpu list while sleep locks are held in per-thread list.
195 */
196struct lock_instance {
197 struct lock_object *li_lock;
198 const char *li_file;
199 int li_line;
200 u_int li_flags;
201};
202
203/*
204 * A simple list type used to build the list of locks held by a thread
205 * or CPU. We can't simply embed the list in struct lock_object since a
206 * lock may be held by more than one thread if it is a shared lock. Locks
207 * are added to the head of the list, so we fill up each list entry from
208 * "the back" logically. To ease some of the arithmetic, we actually fill
209 * in each list entry the normal way (children[0] then children[1], etc.) but
210 * when we traverse the list we read children[count-1] as the first entry
211 * down to children[0] as the final entry.
212 */
213struct lock_list_entry {
214 struct lock_list_entry *ll_next;
215 struct lock_instance ll_children[LOCK_NCHILDREN];
216 u_int ll_count;
217};
218
219/*
220 * The main witness structure. One of these per named lock type in the system
221 * (for example, "vnode interlock").
222 */
223struct witness {
224 char w_name[MAX_W_NAME];
225 uint32_t w_index; /* Index in the relationship matrix */
226 struct lock_class *w_class;
227 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
228 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
229 struct witness *w_hash_next; /* Linked list in hash buckets. */
230 const char *w_file; /* File where last acquired */
231 uint32_t w_line; /* Line where last acquired */
232 uint32_t w_refcount;
233 uint16_t w_num_ancestors; /* direct/indirect
234 * ancestor count */
235 uint16_t w_num_descendants; /* direct/indirect
236 * descendant count */
237 int16_t w_ddb_level;
238 unsigned w_displayed:1;
239 unsigned w_reversed:1;
240};
241
242STAILQ_HEAD(witness_list, witness);
243
244/*
245 * The witness hash table. Keys are witness names (const char *), elements are
246 * witness objects (struct witness *).
247 */
248struct witness_hash {
249 struct witness *wh_array[WITNESS_HASH_SIZE];
250 uint32_t wh_size;
251 uint32_t wh_count;
252};
253
254/*
255 * Key type for the lock order data hash table.
256 */
257struct witness_lock_order_key {
258 uint16_t from;
259 uint16_t to;
260};
261
262struct witness_lock_order_data {
263 struct stack wlod_stack;
264 struct witness_lock_order_key wlod_key;
265 struct witness_lock_order_data *wlod_next;
266};
267
268/*
269 * The witness lock order data hash table. Keys are witness index tuples
270 * (struct witness_lock_order_key), elements are lock order data objects
271 * (struct witness_lock_order_data).
272 */
273struct witness_lock_order_hash {
274 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
275 u_int wloh_size;
276 u_int wloh_count;
277};
278
279#ifdef BLESSING
280struct witness_blessed {
281 const char *b_lock1;
282 const char *b_lock2;
283};
284#endif
285
286struct witness_pendhelp {
287 const char *wh_type;
288 struct lock_object *wh_lock;
289};
290
291struct witness_order_list_entry {
292 const char *w_name;
293 struct lock_class *w_class;
294};
295
296/*
297 * Returns 0 if one of the locks is a spin lock and the other is not.
298 * Returns 1 otherwise.
299 */
300static __inline int
301witness_lock_type_equal(struct witness *w1, struct witness *w2)
302{
303
304 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
305 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
306}
307
308static __inline int
309witness_lock_order_key_empty(const struct witness_lock_order_key *key)
310{
311
312 return (key->from == 0 && key->to == 0);
313}
314
315static __inline int
316witness_lock_order_key_equal(const struct witness_lock_order_key *a,
317 const struct witness_lock_order_key *b)
318{
319
320 return (a->from == b->from && a->to == b->to);
321}
322
323static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
324 const char *fname);
325#ifdef KDB
326static void _witness_debugger(int cond, const char *msg);
327#endif
328static void adopt(struct witness *parent, struct witness *child);
329#ifdef BLESSING
330static int blessed(struct witness *, struct witness *);
331#endif
332static void depart(struct witness *w);
333static struct witness *enroll(const char *description,
334 struct lock_class *lock_class);
335static struct lock_instance *find_instance(struct lock_list_entry *list,
336 struct lock_object *lock);
337static int isitmychild(struct witness *parent, struct witness *child);
338static int isitmydescendant(struct witness *parent, struct witness *child);
339static void itismychild(struct witness *parent, struct witness *child);
340static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
341static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
342static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
343static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
344#ifdef DDB
345static void witness_ddb_compute_levels(void);
346static void witness_ddb_display(void(*)(const char *fmt, ...));
347static void witness_ddb_display_descendants(void(*)(const char *fmt, ...),
348 struct witness *, int indent);
349static void witness_ddb_display_list(void(*prnt)(const char *fmt, ...),
350 struct witness_list *list);
351static void witness_ddb_level_descendants(struct witness *parent, int l);
352static void witness_ddb_list(struct thread *td);
353#endif
354static void witness_free(struct witness *m);
355static struct witness *witness_get(void);
356static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
357static struct witness *witness_hash_get(const char *key);
358static void witness_hash_put(struct witness *w);
359static void witness_init_hash_tables(void);
360static void witness_increment_graph_generation(void);
361static void witness_lock_list_free(struct lock_list_entry *lle);
362static struct lock_list_entry *witness_lock_list_get(void);
363static int witness_lock_order_add(struct witness *parent,
364 struct witness *child);
365static int witness_lock_order_check(struct witness *parent,
366 struct witness *child);
367static struct witness_lock_order_data *witness_lock_order_get(
368 struct witness *parent,
369 struct witness *child);
370static void witness_list_lock(struct lock_instance *instance);
371static void witness_setflag(struct lock_object *lock, int flag, int set);
372
373#ifdef KDB
374#define witness_debugger(c) _witness_debugger(c, __func__)
375#else
376#define witness_debugger(c)
377#endif
378
379SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking");
380
381/*
382 * If set to 0, lock order checking is disabled. If set to -1,
383 * witness is completely disabled. Otherwise witness performs full
384 * lock order checking for all locks. At runtime, lock order checking
385 * may be toggled. However, witness cannot be reenabled once it is
386 * completely disabled.
387 */
388static int witness_watch = 1;
389TUNABLE_INT("debug.witness.watch", &witness_watch);
390SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
391 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
392
393#ifdef KDB
394/*
395 * When KDB is enabled and witness_kdb is 1, it will cause the system
396 * to drop into kdebug() when:
397 * - a lock hierarchy violation occurs
398 * - locks are held when going to sleep.
399 */
400#ifdef WITNESS_KDB
401int witness_kdb = 1;
402#else
403int witness_kdb = 0;
404#endif
405TUNABLE_INT("debug.witness.kdb", &witness_kdb);
406SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
407
408/*
409 * When KDB is enabled and witness_trace is 1, it will cause the system
410 * to print a stack trace:
411 * - a lock hierarchy violation occurs
412 * - locks are held when going to sleep.
413 */
414int witness_trace = 1;
415TUNABLE_INT("debug.witness.trace", &witness_trace);
416SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
417#endif /* KDB */
418
419#ifdef WITNESS_SKIPSPIN
420int witness_skipspin = 1;
421#else
422int witness_skipspin = 0;
423#endif
424TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
425SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
426 0, "");
427
428/*
429 * Call this to print out the relations between locks.
430 */
431SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
432 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
433
434/*
435 * Call this to print out the witness faulty stacks.
436 */
437SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
438 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
439
440static struct mtx w_mtx;
441
442/* w_list */
443static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
444static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
445
446/* w_typelist */
447static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
448static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
449
450/* lock list */
451static struct lock_list_entry *w_lock_list_free = NULL;
452static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
453static u_int pending_cnt;
454
455static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
456SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
457SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
458SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
459 "");
460
461static struct witness *w_data;
462static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
463static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
464static struct witness_hash w_hash; /* The witness hash table. */
465
466/* The lock order data hash */
467static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
468static struct witness_lock_order_data *w_lofree = NULL;
469static struct witness_lock_order_hash w_lohash;
470static int w_max_used_index = 0;
471static unsigned int w_generation = 0;
472static const char *w_notrunning = "Witness not running\n";
473static const char *w_stillcold = "Witness is still cold\n";
474
475
476static struct witness_order_list_entry order_lists[] = {
477 /*
478 * sx locks
479 */
480 { "proctree", &lock_class_sx },
481 { "allproc", &lock_class_sx },
482 { "allprison", &lock_class_sx },
483 { NULL, NULL },
484 /*
485 * Various mutexes
486 */
487 { "Giant", &lock_class_mtx_sleep },
488 { "pipe mutex", &lock_class_mtx_sleep },
489 { "sigio lock", &lock_class_mtx_sleep },
490 { "process group", &lock_class_mtx_sleep },
491 { "process lock", &lock_class_mtx_sleep },
492 { "session", &lock_class_mtx_sleep },
493 { "uidinfo hash", &lock_class_rw },
494#ifdef HWPMC_HOOKS
495 { "pmc-sleep", &lock_class_mtx_sleep },
496#endif
497 { NULL, NULL },
498 /*
499 * Sockets
500 */
501 { "accept", &lock_class_mtx_sleep },
502 { "so_snd", &lock_class_mtx_sleep },
503 { "so_rcv", &lock_class_mtx_sleep },
504 { "sellck", &lock_class_mtx_sleep },
505 { NULL, NULL },
506 /*
507 * Routing
508 */
509 { "so_rcv", &lock_class_mtx_sleep },
510 { "radix node head", &lock_class_rw },
511 { "rtentry", &lock_class_mtx_sleep },
512 { "ifaddr", &lock_class_mtx_sleep },
513 { NULL, NULL },
514 /*
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>
97#include <sys/kdb.h>
98#include <sys/kernel.h>
99#include <sys/ktr.h>
100#include <sys/lock.h>
101#include <sys/malloc.h>
102#include <sys/mutex.h>
103#include <sys/priv.h>
104#include <sys/proc.h>
105#include <sys/sbuf.h>
106#include <sys/sched.h>
107#include <sys/stack.h>
108#include <sys/sysctl.h>
109#include <sys/systm.h>
110
111#ifdef DDB
112#include <ddb/ddb.h>
113#endif
114
115#include <machine/stdarg.h>
116
117#if !defined(DDB) && !defined(STACK)
118#error "DDB or STACK options are required for WITNESS"
119#endif
120
121/* Note that these traces do not work with KTR_ALQ. */
122#if 0
123#define KTR_WITNESS KTR_SUBSYS
124#else
125#define KTR_WITNESS 0
126#endif
127
128#define LI_RECURSEMASK 0x0000ffff /* Recursion depth of lock instance. */
129#define LI_EXCLUSIVE 0x00010000 /* Exclusive lock instance. */
130#define LI_NORELEASE 0x00020000 /* Lock not allowed to be released. */
131
132/* Define this to check for blessed mutexes */
133#undef BLESSING
134
135#define WITNESS_COUNT 1024
136#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
137#define WITNESS_HASH_SIZE 251 /* Prime, gives load factor < 2 */
138#define WITNESS_PENDLIST 512
139
140/* Allocate 256 KB of stack data space */
141#define WITNESS_LO_DATA_COUNT 2048
142
143/* Prime, gives load factor of ~2 at full load */
144#define WITNESS_LO_HASH_SIZE 1021
145
146/*
147 * XXX: This is somewhat bogus, as we assume here that at most 2048 threads
148 * will hold LOCK_NCHILDREN locks. We handle failure ok, and we should
149 * probably be safe for the most part, but it's still a SWAG.
150 */
151#define LOCK_NCHILDREN 5
152#define LOCK_CHILDCOUNT 2048
153
154#define MAX_W_NAME 64
155
156#define BADSTACK_SBUF_SIZE (256 * WITNESS_COUNT)
157#define CYCLEGRAPH_SBUF_SIZE 8192
158#define FULLGRAPH_SBUF_SIZE 32768
159
160/*
161 * These flags go in the witness relationship matrix and describe the
162 * relationship between any two struct witness objects.
163 */
164#define WITNESS_UNRELATED 0x00 /* No lock order relation. */
165#define WITNESS_PARENT 0x01 /* Parent, aka direct ancestor. */
166#define WITNESS_ANCESTOR 0x02 /* Direct or indirect ancestor. */
167#define WITNESS_CHILD 0x04 /* Child, aka direct descendant. */
168#define WITNESS_DESCENDANT 0x08 /* Direct or indirect descendant. */
169#define WITNESS_ANCESTOR_MASK (WITNESS_PARENT | WITNESS_ANCESTOR)
170#define WITNESS_DESCENDANT_MASK (WITNESS_CHILD | WITNESS_DESCENDANT)
171#define WITNESS_RELATED_MASK \
172 (WITNESS_ANCESTOR_MASK | WITNESS_DESCENDANT_MASK)
173#define WITNESS_REVERSAL 0x10 /* A lock order reversal has been
174 * observed. */
175#define WITNESS_RESERVED1 0x20 /* Unused flag, reserved. */
176#define WITNESS_RESERVED2 0x40 /* Unused flag, reserved. */
177#define WITNESS_LOCK_ORDER_KNOWN 0x80 /* This lock order is known. */
178
179/* Descendant to ancestor flags */
180#define WITNESS_DTOA(x) (((x) & WITNESS_RELATED_MASK) >> 2)
181
182/* Ancestor to descendant flags */
183#define WITNESS_ATOD(x) (((x) & WITNESS_RELATED_MASK) << 2)
184
185#define WITNESS_INDEX_ASSERT(i) \
186 MPASS((i) > 0 && (i) <= w_max_used_index && (i) < WITNESS_COUNT)
187
188MALLOC_DEFINE(M_WITNESS, "Witness", "Witness");
189
190/*
191 * Lock instances. A lock instance is the data associated with a lock while
192 * it is held by witness. For example, a lock instance will hold the
193 * recursion count of a lock. Lock instances are held in lists. Spin locks
194 * are held in a per-cpu list while sleep locks are held in per-thread list.
195 */
196struct lock_instance {
197 struct lock_object *li_lock;
198 const char *li_file;
199 int li_line;
200 u_int li_flags;
201};
202
203/*
204 * A simple list type used to build the list of locks held by a thread
205 * or CPU. We can't simply embed the list in struct lock_object since a
206 * lock may be held by more than one thread if it is a shared lock. Locks
207 * are added to the head of the list, so we fill up each list entry from
208 * "the back" logically. To ease some of the arithmetic, we actually fill
209 * in each list entry the normal way (children[0] then children[1], etc.) but
210 * when we traverse the list we read children[count-1] as the first entry
211 * down to children[0] as the final entry.
212 */
213struct lock_list_entry {
214 struct lock_list_entry *ll_next;
215 struct lock_instance ll_children[LOCK_NCHILDREN];
216 u_int ll_count;
217};
218
219/*
220 * The main witness structure. One of these per named lock type in the system
221 * (for example, "vnode interlock").
222 */
223struct witness {
224 char w_name[MAX_W_NAME];
225 uint32_t w_index; /* Index in the relationship matrix */
226 struct lock_class *w_class;
227 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
228 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
229 struct witness *w_hash_next; /* Linked list in hash buckets. */
230 const char *w_file; /* File where last acquired */
231 uint32_t w_line; /* Line where last acquired */
232 uint32_t w_refcount;
233 uint16_t w_num_ancestors; /* direct/indirect
234 * ancestor count */
235 uint16_t w_num_descendants; /* direct/indirect
236 * descendant count */
237 int16_t w_ddb_level;
238 unsigned w_displayed:1;
239 unsigned w_reversed:1;
240};
241
242STAILQ_HEAD(witness_list, witness);
243
244/*
245 * The witness hash table. Keys are witness names (const char *), elements are
246 * witness objects (struct witness *).
247 */
248struct witness_hash {
249 struct witness *wh_array[WITNESS_HASH_SIZE];
250 uint32_t wh_size;
251 uint32_t wh_count;
252};
253
254/*
255 * Key type for the lock order data hash table.
256 */
257struct witness_lock_order_key {
258 uint16_t from;
259 uint16_t to;
260};
261
262struct witness_lock_order_data {
263 struct stack wlod_stack;
264 struct witness_lock_order_key wlod_key;
265 struct witness_lock_order_data *wlod_next;
266};
267
268/*
269 * The witness lock order data hash table. Keys are witness index tuples
270 * (struct witness_lock_order_key), elements are lock order data objects
271 * (struct witness_lock_order_data).
272 */
273struct witness_lock_order_hash {
274 struct witness_lock_order_data *wloh_array[WITNESS_LO_HASH_SIZE];
275 u_int wloh_size;
276 u_int wloh_count;
277};
278
279#ifdef BLESSING
280struct witness_blessed {
281 const char *b_lock1;
282 const char *b_lock2;
283};
284#endif
285
286struct witness_pendhelp {
287 const char *wh_type;
288 struct lock_object *wh_lock;
289};
290
291struct witness_order_list_entry {
292 const char *w_name;
293 struct lock_class *w_class;
294};
295
296/*
297 * Returns 0 if one of the locks is a spin lock and the other is not.
298 * Returns 1 otherwise.
299 */
300static __inline int
301witness_lock_type_equal(struct witness *w1, struct witness *w2)
302{
303
304 return ((w1->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) ==
305 (w2->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)));
306}
307
308static __inline int
309witness_lock_order_key_empty(const struct witness_lock_order_key *key)
310{
311
312 return (key->from == 0 && key->to == 0);
313}
314
315static __inline int
316witness_lock_order_key_equal(const struct witness_lock_order_key *a,
317 const struct witness_lock_order_key *b)
318{
319
320 return (a->from == b->from && a->to == b->to);
321}
322
323static int _isitmyx(struct witness *w1, struct witness *w2, int rmask,
324 const char *fname);
325#ifdef KDB
326static void _witness_debugger(int cond, const char *msg);
327#endif
328static void adopt(struct witness *parent, struct witness *child);
329#ifdef BLESSING
330static int blessed(struct witness *, struct witness *);
331#endif
332static void depart(struct witness *w);
333static struct witness *enroll(const char *description,
334 struct lock_class *lock_class);
335static struct lock_instance *find_instance(struct lock_list_entry *list,
336 struct lock_object *lock);
337static int isitmychild(struct witness *parent, struct witness *child);
338static int isitmydescendant(struct witness *parent, struct witness *child);
339static void itismychild(struct witness *parent, struct witness *child);
340static int sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS);
341static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
342static int sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS);
343static void witness_add_fullgraph(struct sbuf *sb, struct witness *parent);
344#ifdef DDB
345static void witness_ddb_compute_levels(void);
346static void witness_ddb_display(void(*)(const char *fmt, ...));
347static void witness_ddb_display_descendants(void(*)(const char *fmt, ...),
348 struct witness *, int indent);
349static void witness_ddb_display_list(void(*prnt)(const char *fmt, ...),
350 struct witness_list *list);
351static void witness_ddb_level_descendants(struct witness *parent, int l);
352static void witness_ddb_list(struct thread *td);
353#endif
354static void witness_free(struct witness *m);
355static struct witness *witness_get(void);
356static uint32_t witness_hash_djb2(const uint8_t *key, uint32_t size);
357static struct witness *witness_hash_get(const char *key);
358static void witness_hash_put(struct witness *w);
359static void witness_init_hash_tables(void);
360static void witness_increment_graph_generation(void);
361static void witness_lock_list_free(struct lock_list_entry *lle);
362static struct lock_list_entry *witness_lock_list_get(void);
363static int witness_lock_order_add(struct witness *parent,
364 struct witness *child);
365static int witness_lock_order_check(struct witness *parent,
366 struct witness *child);
367static struct witness_lock_order_data *witness_lock_order_get(
368 struct witness *parent,
369 struct witness *child);
370static void witness_list_lock(struct lock_instance *instance);
371static void witness_setflag(struct lock_object *lock, int flag, int set);
372
373#ifdef KDB
374#define witness_debugger(c) _witness_debugger(c, __func__)
375#else
376#define witness_debugger(c)
377#endif
378
379SYSCTL_NODE(_debug, OID_AUTO, witness, CTLFLAG_RW, NULL, "Witness Locking");
380
381/*
382 * If set to 0, lock order checking is disabled. If set to -1,
383 * witness is completely disabled. Otherwise witness performs full
384 * lock order checking for all locks. At runtime, lock order checking
385 * may be toggled. However, witness cannot be reenabled once it is
386 * completely disabled.
387 */
388static int witness_watch = 1;
389TUNABLE_INT("debug.witness.watch", &witness_watch);
390SYSCTL_PROC(_debug_witness, OID_AUTO, watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
391 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
392
393#ifdef KDB
394/*
395 * When KDB is enabled and witness_kdb is 1, it will cause the system
396 * to drop into kdebug() when:
397 * - a lock hierarchy violation occurs
398 * - locks are held when going to sleep.
399 */
400#ifdef WITNESS_KDB
401int witness_kdb = 1;
402#else
403int witness_kdb = 0;
404#endif
405TUNABLE_INT("debug.witness.kdb", &witness_kdb);
406SYSCTL_INT(_debug_witness, OID_AUTO, kdb, CTLFLAG_RW, &witness_kdb, 0, "");
407
408/*
409 * When KDB is enabled and witness_trace is 1, it will cause the system
410 * to print a stack trace:
411 * - a lock hierarchy violation occurs
412 * - locks are held when going to sleep.
413 */
414int witness_trace = 1;
415TUNABLE_INT("debug.witness.trace", &witness_trace);
416SYSCTL_INT(_debug_witness, OID_AUTO, trace, CTLFLAG_RW, &witness_trace, 0, "");
417#endif /* KDB */
418
419#ifdef WITNESS_SKIPSPIN
420int witness_skipspin = 1;
421#else
422int witness_skipspin = 0;
423#endif
424TUNABLE_INT("debug.witness.skipspin", &witness_skipspin);
425SYSCTL_INT(_debug_witness, OID_AUTO, skipspin, CTLFLAG_RDTUN, &witness_skipspin,
426 0, "");
427
428/*
429 * Call this to print out the relations between locks.
430 */
431SYSCTL_PROC(_debug_witness, OID_AUTO, fullgraph, CTLTYPE_STRING | CTLFLAG_RD,
432 NULL, 0, sysctl_debug_witness_fullgraph, "A", "Show locks relation graphs");
433
434/*
435 * Call this to print out the witness faulty stacks.
436 */
437SYSCTL_PROC(_debug_witness, OID_AUTO, badstacks, CTLTYPE_STRING | CTLFLAG_RD,
438 NULL, 0, sysctl_debug_witness_badstacks, "A", "Show bad witness stacks");
439
440static struct mtx w_mtx;
441
442/* w_list */
443static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
444static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
445
446/* w_typelist */
447static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
448static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
449
450/* lock list */
451static struct lock_list_entry *w_lock_list_free = NULL;
452static struct witness_pendhelp pending_locks[WITNESS_PENDLIST];
453static u_int pending_cnt;
454
455static int w_free_cnt, w_spin_cnt, w_sleep_cnt;
456SYSCTL_INT(_debug_witness, OID_AUTO, free_cnt, CTLFLAG_RD, &w_free_cnt, 0, "");
457SYSCTL_INT(_debug_witness, OID_AUTO, spin_cnt, CTLFLAG_RD, &w_spin_cnt, 0, "");
458SYSCTL_INT(_debug_witness, OID_AUTO, sleep_cnt, CTLFLAG_RD, &w_sleep_cnt, 0,
459 "");
460
461static struct witness *w_data;
462static uint8_t w_rmatrix[WITNESS_COUNT+1][WITNESS_COUNT+1];
463static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
464static struct witness_hash w_hash; /* The witness hash table. */
465
466/* The lock order data hash */
467static struct witness_lock_order_data w_lodata[WITNESS_LO_DATA_COUNT];
468static struct witness_lock_order_data *w_lofree = NULL;
469static struct witness_lock_order_hash w_lohash;
470static int w_max_used_index = 0;
471static unsigned int w_generation = 0;
472static const char *w_notrunning = "Witness not running\n";
473static const char *w_stillcold = "Witness is still cold\n";
474
475
476static struct witness_order_list_entry order_lists[] = {
477 /*
478 * sx locks
479 */
480 { "proctree", &lock_class_sx },
481 { "allproc", &lock_class_sx },
482 { "allprison", &lock_class_sx },
483 { NULL, NULL },
484 /*
485 * Various mutexes
486 */
487 { "Giant", &lock_class_mtx_sleep },
488 { "pipe mutex", &lock_class_mtx_sleep },
489 { "sigio lock", &lock_class_mtx_sleep },
490 { "process group", &lock_class_mtx_sleep },
491 { "process lock", &lock_class_mtx_sleep },
492 { "session", &lock_class_mtx_sleep },
493 { "uidinfo hash", &lock_class_rw },
494#ifdef HWPMC_HOOKS
495 { "pmc-sleep", &lock_class_mtx_sleep },
496#endif
497 { NULL, NULL },
498 /*
499 * Sockets
500 */
501 { "accept", &lock_class_mtx_sleep },
502 { "so_snd", &lock_class_mtx_sleep },
503 { "so_rcv", &lock_class_mtx_sleep },
504 { "sellck", &lock_class_mtx_sleep },
505 { NULL, NULL },
506 /*
507 * Routing
508 */
509 { "so_rcv", &lock_class_mtx_sleep },
510 { "radix node head", &lock_class_rw },
511 { "rtentry", &lock_class_mtx_sleep },
512 { "ifaddr", &lock_class_mtx_sleep },
513 { NULL, NULL },
514 /*
515 * Multicast - protocol locks before interface locks, after UDP locks.
515 * IPv4 multicast:
516 * protocol locks before interface locks, after UDP locks.
516 */
517 { "udpinp", &lock_class_rw },
518 { "in_multi_mtx", &lock_class_mtx_sleep },
519 { "igmp_mtx", &lock_class_mtx_sleep },
520 { "if_addr_mtx", &lock_class_mtx_sleep },
521 { NULL, NULL },
522 /*
517 */
518 { "udpinp", &lock_class_rw },
519 { "in_multi_mtx", &lock_class_mtx_sleep },
520 { "igmp_mtx", &lock_class_mtx_sleep },
521 { "if_addr_mtx", &lock_class_mtx_sleep },
522 { NULL, NULL },
523 /*
524 * IPv6 multicast:
525 * protocol locks before interface locks, after UDP locks.
526 */
527 { "udpinp", &lock_class_rw },
528 { "in6_multi_mtx", &lock_class_mtx_sleep },
529 { "mld_mtx", &lock_class_mtx_sleep },
530 { "if_addr_mtx", &lock_class_mtx_sleep },
531 { NULL, NULL },
532 /*
523 * UNIX Domain Sockets
524 */
525 { "unp_global_rwlock", &lock_class_rw },
526 { "unp_list_lock", &lock_class_mtx_sleep },
527 { "unp", &lock_class_mtx_sleep },
528 { "so_snd", &lock_class_mtx_sleep },
529 { NULL, NULL },
530 /*
531 * UDP/IP
532 */
533 { "udp", &lock_class_rw },
534 { "udpinp", &lock_class_rw },
535 { "so_snd", &lock_class_mtx_sleep },
536 { NULL, NULL },
537 /*
538 * TCP/IP
539 */
540 { "tcp", &lock_class_rw },
541 { "tcpinp", &lock_class_rw },
542 { "so_snd", &lock_class_mtx_sleep },
543 { NULL, NULL },
544 /*
545 * SLIP
546 */
547 { "slip_mtx", &lock_class_mtx_sleep },
548 { "slip sc_mtx", &lock_class_mtx_sleep },
549 { NULL, NULL },
550 /*
551 * netatalk
552 */
553 { "ddp_list_mtx", &lock_class_mtx_sleep },
554 { "ddp_mtx", &lock_class_mtx_sleep },
555 { NULL, NULL },
556 /*
557 * BPF
558 */
559 { "bpf global lock", &lock_class_mtx_sleep },
560 { "bpf interface lock", &lock_class_mtx_sleep },
561 { "bpf cdev lock", &lock_class_mtx_sleep },
562 { NULL, NULL },
563 /*
564 * NFS server
565 */
566 { "nfsd_mtx", &lock_class_mtx_sleep },
567 { "so_snd", &lock_class_mtx_sleep },
568 { NULL, NULL },
569
570 /*
571 * IEEE 802.11
572 */
573 { "802.11 com lock", &lock_class_mtx_sleep},
574 { NULL, NULL },
575 /*
576 * Network drivers
577 */
578 { "network driver", &lock_class_mtx_sleep},
579 { NULL, NULL },
580
581 /*
582 * Netgraph
583 */
584 { "ng_node", &lock_class_mtx_sleep },
585 { "ng_worklist", &lock_class_mtx_sleep },
586 { NULL, NULL },
587 /*
588 * CDEV
589 */
590 { "system map", &lock_class_mtx_sleep },
591 { "vm page queue mutex", &lock_class_mtx_sleep },
592 { "vnode interlock", &lock_class_mtx_sleep },
593 { "cdev", &lock_class_mtx_sleep },
594 { NULL, NULL },
595 /*
596 * kqueue/VFS interaction
597 */
598 { "kqueue", &lock_class_mtx_sleep },
599 { "struct mount mtx", &lock_class_mtx_sleep },
600 { "vnode interlock", &lock_class_mtx_sleep },
601 { NULL, NULL },
602 /*
603 * spin locks
604 */
605#ifdef SMP
606 { "ap boot", &lock_class_mtx_spin },
607#endif
608 { "rm.mutex_mtx", &lock_class_mtx_spin },
609 { "sio", &lock_class_mtx_spin },
610 { "scrlock", &lock_class_mtx_spin },
611#ifdef __i386__
612 { "cy", &lock_class_mtx_spin },
613#endif
614#ifdef __sparc64__
615 { "pcib_mtx", &lock_class_mtx_spin },
616 { "rtc_mtx", &lock_class_mtx_spin },
617#endif
618 { "scc_hwmtx", &lock_class_mtx_spin },
619 { "uart_hwmtx", &lock_class_mtx_spin },
620 { "fast_taskqueue", &lock_class_mtx_spin },
621 { "intr table", &lock_class_mtx_spin },
622#ifdef HWPMC_HOOKS
623 { "pmc-per-proc", &lock_class_mtx_spin },
624#endif
625 { "process slock", &lock_class_mtx_spin },
626 { "sleepq chain", &lock_class_mtx_spin },
627 { "umtx lock", &lock_class_mtx_spin },
628 { "rm_spinlock", &lock_class_mtx_spin },
629 { "turnstile chain", &lock_class_mtx_spin },
630 { "turnstile lock", &lock_class_mtx_spin },
631 { "sched lock", &lock_class_mtx_spin },
632 { "td_contested", &lock_class_mtx_spin },
633 { "callout", &lock_class_mtx_spin },
634 { "entropy harvest mutex", &lock_class_mtx_spin },
635 { "syscons video lock", &lock_class_mtx_spin },
636 { "time lock", &lock_class_mtx_spin },
637#ifdef SMP
638 { "smp rendezvous", &lock_class_mtx_spin },
639#endif
640#ifdef __powerpc__
641 { "tlb0", &lock_class_mtx_spin },
642#endif
643 /*
644 * leaf locks
645 */
646 { "intrcnt", &lock_class_mtx_spin },
647 { "icu", &lock_class_mtx_spin },
648#if defined(SMP) && defined(__sparc64__)
649 { "ipi", &lock_class_mtx_spin },
650#endif
651#ifdef __i386__
652 { "allpmaps", &lock_class_mtx_spin },
653 { "descriptor tables", &lock_class_mtx_spin },
654#endif
655 { "clk", &lock_class_mtx_spin },
656 { "cpuset", &lock_class_mtx_spin },
657 { "mprof lock", &lock_class_mtx_spin },
658 { "zombie lock", &lock_class_mtx_spin },
659 { "ALD Queue", &lock_class_mtx_spin },
660#ifdef __ia64__
661 { "MCA spin lock", &lock_class_mtx_spin },
662#endif
663#if defined(__i386__) || defined(__amd64__)
664 { "pcicfg", &lock_class_mtx_spin },
665 { "NDIS thread lock", &lock_class_mtx_spin },
666#endif
667 { "tw_osl_io_lock", &lock_class_mtx_spin },
668 { "tw_osl_q_lock", &lock_class_mtx_spin },
669 { "tw_cl_io_lock", &lock_class_mtx_spin },
670 { "tw_cl_intr_lock", &lock_class_mtx_spin },
671 { "tw_cl_gen_lock", &lock_class_mtx_spin },
672#ifdef HWPMC_HOOKS
673 { "pmc-leaf", &lock_class_mtx_spin },
674#endif
675 { "blocked lock", &lock_class_mtx_spin },
676 { NULL, NULL },
677 { NULL, NULL }
678};
679
680#ifdef BLESSING
681/*
682 * Pairs of locks which have been blessed
683 * Don't complain about order problems with blessed locks
684 */
685static struct witness_blessed blessed_list[] = {
686};
687static int blessed_count =
688 sizeof(blessed_list) / sizeof(struct witness_blessed);
689#endif
690
691/*
692 * This global is set to 0 once it becomes safe to use the witness code.
693 */
694static int witness_cold = 1;
695
696/*
697 * This global is set to 1 once the static lock orders have been enrolled
698 * so that a warning can be issued for any spin locks enrolled later.
699 */
700static int witness_spin_warn = 0;
701
702/*
703 * The WITNESS-enabled diagnostic code. Note that the witness code does
704 * assume that the early boot is single-threaded at least until after this
705 * routine is completed.
706 */
707static void
708witness_initialize(void *dummy __unused)
709{
710 struct lock_object *lock;
711 struct witness_order_list_entry *order;
712 struct witness *w, *w1;
713 int i;
714
715 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
716 M_NOWAIT | M_ZERO);
717
718 /*
719 * We have to release Giant before initializing its witness
720 * structure so that WITNESS doesn't get confused.
721 */
722 mtx_unlock(&Giant);
723 mtx_assert(&Giant, MA_NOTOWNED);
724
725 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
726 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
727 MTX_NOWITNESS | MTX_NOPROFILE);
728 for (i = WITNESS_COUNT - 1; i >= 0; i--) {
729 w = &w_data[i];
730 memset(w, 0, sizeof(*w));
731 w_data[i].w_index = i; /* Witness index never changes. */
732 witness_free(w);
733 }
734 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
735 ("%s: Invalid list of free witness objects", __func__));
736
737 /* Witness with index 0 is not used to aid in debugging. */
738 STAILQ_REMOVE_HEAD(&w_free, w_list);
739 w_free_cnt--;
740
741 memset(w_rmatrix, 0,
742 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
743
744 for (i = 0; i < LOCK_CHILDCOUNT; i++)
745 witness_lock_list_free(&w_locklistdata[i]);
746 witness_init_hash_tables();
747
748 /* First add in all the specified order lists. */
749 for (order = order_lists; order->w_name != NULL; order++) {
750 w = enroll(order->w_name, order->w_class);
751 if (w == NULL)
752 continue;
753 w->w_file = "order list";
754 for (order++; order->w_name != NULL; order++) {
755 w1 = enroll(order->w_name, order->w_class);
756 if (w1 == NULL)
757 continue;
758 w1->w_file = "order list";
759 itismychild(w, w1);
760 w = w1;
761 }
762 }
763 witness_spin_warn = 1;
764
765 /* Iterate through all locks and add them to witness. */
766 for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
767 lock = pending_locks[i].wh_lock;
768 KASSERT(lock->lo_flags & LO_WITNESS,
769 ("%s: lock %s is on pending list but not LO_WITNESS",
770 __func__, lock->lo_name));
771 lock->lo_witness = enroll(pending_locks[i].wh_type,
772 LOCK_CLASS(lock));
773 }
774
775 /* Mark the witness code as being ready for use. */
776 witness_cold = 0;
777
778 mtx_lock(&Giant);
779}
780SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
781 NULL);
782
783void
784witness_init(struct lock_object *lock, const char *type)
785{
786 struct lock_class *class;
787
788 /* Various sanity checks. */
789 class = LOCK_CLASS(lock);
790 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
791 (class->lc_flags & LC_RECURSABLE) == 0)
792 panic("%s: lock (%s) %s can not be recursable", __func__,
793 class->lc_name, lock->lo_name);
794 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
795 (class->lc_flags & LC_SLEEPABLE) == 0)
796 panic("%s: lock (%s) %s can not be sleepable", __func__,
797 class->lc_name, lock->lo_name);
798 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
799 (class->lc_flags & LC_UPGRADABLE) == 0)
800 panic("%s: lock (%s) %s can not be upgradable", __func__,
801 class->lc_name, lock->lo_name);
802
803 /*
804 * If we shouldn't watch this lock, then just clear lo_witness.
805 * Otherwise, if witness_cold is set, then it is too early to
806 * enroll this lock, so defer it to witness_initialize() by adding
807 * it to the pending_locks list. If it is not too early, then enroll
808 * the lock now.
809 */
810 if (witness_watch < 1 || panicstr != NULL ||
811 (lock->lo_flags & LO_WITNESS) == 0)
812 lock->lo_witness = NULL;
813 else if (witness_cold) {
814 pending_locks[pending_cnt].wh_lock = lock;
815 pending_locks[pending_cnt++].wh_type = type;
816 if (pending_cnt > WITNESS_PENDLIST)
817 panic("%s: pending locks list is too small, bump it\n",
818 __func__);
819 } else
820 lock->lo_witness = enroll(type, class);
821}
822
823void
824witness_destroy(struct lock_object *lock)
825{
826 struct lock_class *class;
827 struct witness *w;
828
829 class = LOCK_CLASS(lock);
830
831 if (witness_cold)
832 panic("lock (%s) %s destroyed while witness_cold",
833 class->lc_name, lock->lo_name);
834
835 /* XXX: need to verify that no one holds the lock */
836 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
837 return;
838 w = lock->lo_witness;
839
840 mtx_lock_spin(&w_mtx);
841 MPASS(w->w_refcount > 0);
842 w->w_refcount--;
843
844 if (w->w_refcount == 0)
845 depart(w);
846 mtx_unlock_spin(&w_mtx);
847}
848
849#ifdef DDB
850static void
851witness_ddb_compute_levels(void)
852{
853 struct witness *w;
854
855 /*
856 * First clear all levels.
857 */
858 STAILQ_FOREACH(w, &w_all, w_list)
859 w->w_ddb_level = -1;
860
861 /*
862 * Look for locks with no parents and level all their descendants.
863 */
864 STAILQ_FOREACH(w, &w_all, w_list) {
865
866 /* If the witness has ancestors (is not a root), skip it. */
867 if (w->w_num_ancestors > 0)
868 continue;
869 witness_ddb_level_descendants(w, 0);
870 }
871}
872
873static void
874witness_ddb_level_descendants(struct witness *w, int l)
875{
876 int i;
877
878 if (w->w_ddb_level >= l)
879 return;
880
881 w->w_ddb_level = l;
882 l++;
883
884 for (i = 1; i <= w_max_used_index; i++) {
885 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
886 witness_ddb_level_descendants(&w_data[i], l);
887 }
888}
889
890static void
891witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...),
892 struct witness *w, int indent)
893{
894 int i;
895
896 for (i = 0; i < indent; i++)
897 prnt(" ");
898 prnt("%s (type: %s, depth: %d, active refs: %d)",
899 w->w_name, w->w_class->lc_name,
900 w->w_ddb_level, w->w_refcount);
901 if (w->w_displayed) {
902 prnt(" -- (already displayed)\n");
903 return;
904 }
905 w->w_displayed = 1;
906 if (w->w_file != NULL && w->w_line != 0)
907 prnt(" -- last acquired @ %s:%d\n", w->w_file,
908 w->w_line);
909 else
910 prnt(" -- never acquired\n");
911 indent++;
912 WITNESS_INDEX_ASSERT(w->w_index);
913 for (i = 1; i <= w_max_used_index; i++) {
914 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
915 witness_ddb_display_descendants(prnt, &w_data[i],
916 indent);
917 }
918}
919
920static void
921witness_ddb_display_list(void(*prnt)(const char *fmt, ...),
922 struct witness_list *list)
923{
924 struct witness *w;
925
926 STAILQ_FOREACH(w, list, w_typelist) {
927 if (w->w_file == NULL || w->w_ddb_level > 0)
928 continue;
929
930 /* This lock has no anscestors - display its descendants. */
931 witness_ddb_display_descendants(prnt, w, 0);
932 }
933}
934
935static void
936witness_ddb_display(void(*prnt)(const char *fmt, ...))
937{
938 struct witness *w;
939
940 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
941 witness_ddb_compute_levels();
942
943 /* Clear all the displayed flags. */
944 STAILQ_FOREACH(w, &w_all, w_list)
945 w->w_displayed = 0;
946
947 /*
948 * First, handle sleep locks which have been acquired at least
949 * once.
950 */
951 prnt("Sleep locks:\n");
952 witness_ddb_display_list(prnt, &w_sleep);
953
954 /*
955 * Now do spin locks which have been acquired at least once.
956 */
957 prnt("\nSpin locks:\n");
958 witness_ddb_display_list(prnt, &w_spin);
959
960 /*
961 * Finally, any locks which have not been acquired yet.
962 */
963 prnt("\nLocks which were never acquired:\n");
964 STAILQ_FOREACH(w, &w_all, w_list) {
965 if (w->w_file != NULL || w->w_refcount == 0)
966 continue;
967 prnt("%s (type: %s, depth: %d)\n", w->w_name,
968 w->w_class->lc_name, w->w_ddb_level);
969 }
970}
971#endif /* DDB */
972
973/* Trim useless garbage from filenames. */
974static const char *
975fixup_filename(const char *file)
976{
977
978 if (file == NULL)
979 return (NULL);
980 while (strncmp(file, "../", 3) == 0)
981 file += 3;
982 return (file);
983}
984
985int
986witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
987{
988
989 if (witness_watch == -1 || panicstr != NULL)
990 return (0);
991
992 /* Require locks that witness knows about. */
993 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
994 lock2->lo_witness == NULL)
995 return (EINVAL);
996
997 mtx_assert(&w_mtx, MA_NOTOWNED);
998 mtx_lock_spin(&w_mtx);
999
1000 /*
1001 * If we already have either an explicit or implied lock order that
1002 * is the other way around, then return an error.
1003 */
1004 if (witness_watch &&
1005 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1006 mtx_unlock_spin(&w_mtx);
1007 return (EDOOFUS);
1008 }
1009
1010 /* Try to add the new order. */
1011 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1012 lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1013 itismychild(lock1->lo_witness, lock2->lo_witness);
1014 mtx_unlock_spin(&w_mtx);
1015 return (0);
1016}
1017
1018void
1019witness_checkorder(struct lock_object *lock, int flags, const char *file,
1020 int line, struct lock_object *interlock)
1021{
1022 struct lock_list_entry *lock_list, *lle;
1023 struct lock_instance *lock1, *lock2, *plock;
1024 struct lock_class *class;
1025 struct witness *w, *w1;
1026 struct thread *td;
1027 int i, j;
1028
1029 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1030 panicstr != NULL)
1031 return;
1032
1033 w = lock->lo_witness;
1034 class = LOCK_CLASS(lock);
1035 td = curthread;
1036 file = fixup_filename(file);
1037
1038 if (class->lc_flags & LC_SLEEPLOCK) {
1039
1040 /*
1041 * Since spin locks include a critical section, this check
1042 * implicitly enforces a lock order of all sleep locks before
1043 * all spin locks.
1044 */
1045 if (td->td_critnest != 0 && !kdb_active)
1046 panic("blockable sleep lock (%s) %s @ %s:%d",
1047 class->lc_name, lock->lo_name, file, line);
1048
1049 /*
1050 * If this is the first lock acquired then just return as
1051 * no order checking is needed.
1052 */
1053 lock_list = td->td_sleeplocks;
1054 if (lock_list == NULL || lock_list->ll_count == 0)
1055 return;
1056 } else {
1057
1058 /*
1059 * If this is the first lock, just return as no order
1060 * checking is needed. Avoid problems with thread
1061 * migration pinning the thread while checking if
1062 * spinlocks are held. If at least one spinlock is held
1063 * the thread is in a safe path and it is allowed to
1064 * unpin it.
1065 */
1066 sched_pin();
1067 lock_list = PCPU_GET(spinlocks);
1068 if (lock_list == NULL || lock_list->ll_count == 0) {
1069 sched_unpin();
1070 return;
1071 }
1072 sched_unpin();
1073 }
1074
1075 /*
1076 * Check to see if we are recursing on a lock we already own. If
1077 * so, make sure that we don't mismatch exclusive and shared lock
1078 * acquires.
1079 */
1080 lock1 = find_instance(lock_list, lock);
1081 if (lock1 != NULL) {
1082 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1083 (flags & LOP_EXCLUSIVE) == 0) {
1084 printf("shared lock of (%s) %s @ %s:%d\n",
1085 class->lc_name, lock->lo_name, file, line);
1086 printf("while exclusively locked from %s:%d\n",
1087 lock1->li_file, lock1->li_line);
1088 panic("share->excl");
1089 }
1090 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1091 (flags & LOP_EXCLUSIVE) != 0) {
1092 printf("exclusive lock of (%s) %s @ %s:%d\n",
1093 class->lc_name, lock->lo_name, file, line);
1094 printf("while share locked from %s:%d\n",
1095 lock1->li_file, lock1->li_line);
1096 panic("excl->share");
1097 }
1098 return;
1099 }
1100
1101 /*
1102 * Find the previously acquired lock, but ignore interlocks.
1103 */
1104 plock = &lock_list->ll_children[lock_list->ll_count - 1];
1105 if (interlock != NULL && plock->li_lock == interlock) {
1106 if (lock_list->ll_count > 1)
1107 plock =
1108 &lock_list->ll_children[lock_list->ll_count - 2];
1109 else {
1110 lle = lock_list->ll_next;
1111
1112 /*
1113 * The interlock is the only lock we hold, so
1114 * simply return.
1115 */
1116 if (lle == NULL)
1117 return;
1118 plock = &lle->ll_children[lle->ll_count - 1];
1119 }
1120 }
1121
1122 /*
1123 * Try to perform most checks without a lock. If this succeeds we
1124 * can skip acquiring the lock and return success.
1125 */
1126 w1 = plock->li_lock->lo_witness;
1127 if (witness_lock_order_check(w1, w))
1128 return;
1129
1130 /*
1131 * Check for duplicate locks of the same type. Note that we only
1132 * have to check for this on the last lock we just acquired. Any
1133 * other cases will be caught as lock order violations.
1134 */
1135 mtx_lock_spin(&w_mtx);
1136 witness_lock_order_add(w1, w);
1137 if (w1 == w) {
1138 i = w->w_index;
1139 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1140 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1141 w_rmatrix[i][i] |= WITNESS_REVERSAL;
1142 w->w_reversed = 1;
1143 mtx_unlock_spin(&w_mtx);
1144 printf(
1145 "acquiring duplicate lock of same type: \"%s\"\n",
1146 w->w_name);
1147 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1148 plock->li_file, plock->li_line);
1149 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
1150 witness_debugger(1);
1151 } else
1152 mtx_unlock_spin(&w_mtx);
1153 return;
1154 }
1155 mtx_assert(&w_mtx, MA_OWNED);
1156
1157 /*
1158 * If we know that the the lock we are acquiring comes after
1159 * the lock we most recently acquired in the lock order tree,
1160 * then there is no need for any further checks.
1161 */
1162 if (isitmychild(w1, w))
1163 goto out;
1164
1165 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1166 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1167
1168 MPASS(j < WITNESS_COUNT);
1169 lock1 = &lle->ll_children[i];
1170
1171 /*
1172 * Ignore the interlock the first time we see it.
1173 */
1174 if (interlock != NULL && interlock == lock1->li_lock) {
1175 interlock = NULL;
1176 continue;
1177 }
1178
1179 /*
1180 * If this lock doesn't undergo witness checking,
1181 * then skip it.
1182 */
1183 w1 = lock1->li_lock->lo_witness;
1184 if (w1 == NULL) {
1185 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1186 ("lock missing witness structure"));
1187 continue;
1188 }
1189
1190 /*
1191 * If we are locking Giant and this is a sleepable
1192 * lock, then skip it.
1193 */
1194 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1195 lock == &Giant.lock_object)
1196 continue;
1197
1198 /*
1199 * If we are locking a sleepable lock and this lock
1200 * is Giant, then skip it.
1201 */
1202 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1203 lock1->li_lock == &Giant.lock_object)
1204 continue;
1205
1206 /*
1207 * If we are locking a sleepable lock and this lock
1208 * isn't sleepable, we want to treat it as a lock
1209 * order violation to enfore a general lock order of
1210 * sleepable locks before non-sleepable locks.
1211 */
1212 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1213 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1214 goto reversal;
1215
1216 /*
1217 * If we are locking Giant and this is a non-sleepable
1218 * lock, then treat it as a reversal.
1219 */
1220 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1221 lock == &Giant.lock_object)
1222 goto reversal;
1223
1224 /*
1225 * Check the lock order hierarchy for a reveresal.
1226 */
1227 if (!isitmydescendant(w, w1))
1228 continue;
1229 reversal:
1230
1231 /*
1232 * We have a lock order violation, check to see if it
1233 * is allowed or has already been yelled about.
1234 */
1235#ifdef BLESSING
1236
1237 /*
1238 * If the lock order is blessed, just bail. We don't
1239 * look for other lock order violations though, which
1240 * may be a bug.
1241 */
1242 if (blessed(w, w1))
1243 goto out;
1244#endif
1245
1246 /* Bail if this violation is known */
1247 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1248 goto out;
1249
1250 /* Record this as a violation */
1251 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1252 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1253 w->w_reversed = w1->w_reversed = 1;
1254 witness_increment_graph_generation();
1255 mtx_unlock_spin(&w_mtx);
1256
1257 /*
1258 * Ok, yell about it.
1259 */
1260 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1261 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1262 printf(
1263 "lock order reversal: (sleepable after non-sleepable)\n");
1264 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1265 && lock == &Giant.lock_object)
1266 printf(
1267 "lock order reversal: (Giant after non-sleepable)\n");
1268 else
1269 printf("lock order reversal:\n");
1270
1271 /*
1272 * Try to locate an earlier lock with
1273 * witness w in our list.
1274 */
1275 do {
1276 lock2 = &lle->ll_children[i];
1277 MPASS(lock2->li_lock != NULL);
1278 if (lock2->li_lock->lo_witness == w)
1279 break;
1280 if (i == 0 && lle->ll_next != NULL) {
1281 lle = lle->ll_next;
1282 i = lle->ll_count - 1;
1283 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1284 } else
1285 i--;
1286 } while (i >= 0);
1287 if (i < 0) {
1288 printf(" 1st %p %s (%s) @ %s:%d\n",
1289 lock1->li_lock, lock1->li_lock->lo_name,
1290 w1->w_name, lock1->li_file, lock1->li_line);
1291 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1292 lock->lo_name, w->w_name, file, line);
1293 } else {
1294 printf(" 1st %p %s (%s) @ %s:%d\n",
1295 lock2->li_lock, lock2->li_lock->lo_name,
1296 lock2->li_lock->lo_witness->w_name,
1297 lock2->li_file, lock2->li_line);
1298 printf(" 2nd %p %s (%s) @ %s:%d\n",
1299 lock1->li_lock, lock1->li_lock->lo_name,
1300 w1->w_name, lock1->li_file, lock1->li_line);
1301 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1302 lock->lo_name, w->w_name, file, line);
1303 }
1304 witness_debugger(1);
1305 return;
1306 }
1307 }
1308
1309 /*
1310 * If requested, build a new lock order. However, don't build a new
1311 * relationship between a sleepable lock and Giant if it is in the
1312 * wrong direction. The correct lock order is that sleepable locks
1313 * always come before Giant.
1314 */
1315 if (flags & LOP_NEWORDER &&
1316 !(plock->li_lock == &Giant.lock_object &&
1317 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1318 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1319 w->w_name, plock->li_lock->lo_witness->w_name);
1320 itismychild(plock->li_lock->lo_witness, w);
1321 }
1322out:
1323 mtx_unlock_spin(&w_mtx);
1324}
1325
1326void
1327witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1328{
1329 struct lock_list_entry **lock_list, *lle;
1330 struct lock_instance *instance;
1331 struct witness *w;
1332 struct thread *td;
1333
1334 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1335 panicstr != NULL)
1336 return;
1337 w = lock->lo_witness;
1338 td = curthread;
1339 file = fixup_filename(file);
1340
1341 /* Determine lock list for this lock. */
1342 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1343 lock_list = &td->td_sleeplocks;
1344 else
1345 lock_list = PCPU_PTR(spinlocks);
1346
1347 /* Check to see if we are recursing on a lock we already own. */
1348 instance = find_instance(*lock_list, lock);
1349 if (instance != NULL) {
1350 instance->li_flags++;
1351 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1352 td->td_proc->p_pid, lock->lo_name,
1353 instance->li_flags & LI_RECURSEMASK);
1354 instance->li_file = file;
1355 instance->li_line = line;
1356 return;
1357 }
1358
1359 /* Update per-witness last file and line acquire. */
1360 w->w_file = file;
1361 w->w_line = line;
1362
1363 /* Find the next open lock instance in the list and fill it. */
1364 lle = *lock_list;
1365 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1366 lle = witness_lock_list_get();
1367 if (lle == NULL)
1368 return;
1369 lle->ll_next = *lock_list;
1370 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1371 td->td_proc->p_pid, lle);
1372 *lock_list = lle;
1373 }
1374 instance = &lle->ll_children[lle->ll_count++];
1375 instance->li_lock = lock;
1376 instance->li_line = line;
1377 instance->li_file = file;
1378 if ((flags & LOP_EXCLUSIVE) != 0)
1379 instance->li_flags = LI_EXCLUSIVE;
1380 else
1381 instance->li_flags = 0;
1382 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1383 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1384}
1385
1386void
1387witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1388{
1389 struct lock_instance *instance;
1390 struct lock_class *class;
1391
1392 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1393 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1394 return;
1395 class = LOCK_CLASS(lock);
1396 file = fixup_filename(file);
1397 if (witness_watch) {
1398 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1399 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1400 class->lc_name, lock->lo_name, file, line);
1401 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1402 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1403 class->lc_name, lock->lo_name, file, line);
1404 }
1405 instance = find_instance(curthread->td_sleeplocks, lock);
1406 if (instance == NULL)
1407 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1408 class->lc_name, lock->lo_name, file, line);
1409 if (witness_watch) {
1410 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1411 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1412 class->lc_name, lock->lo_name, file, line);
1413 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1414 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1415 class->lc_name, lock->lo_name,
1416 instance->li_flags & LI_RECURSEMASK, file, line);
1417 }
1418 instance->li_flags |= LI_EXCLUSIVE;
1419}
1420
1421void
1422witness_downgrade(struct lock_object *lock, int flags, const char *file,
1423 int line)
1424{
1425 struct lock_instance *instance;
1426 struct lock_class *class;
1427
1428 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1429 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1430 return;
1431 class = LOCK_CLASS(lock);
1432 file = fixup_filename(file);
1433 if (witness_watch) {
1434 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1435 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1436 class->lc_name, lock->lo_name, file, line);
1437 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1438 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1439 class->lc_name, lock->lo_name, file, line);
1440 }
1441 instance = find_instance(curthread->td_sleeplocks, lock);
1442 if (instance == NULL)
1443 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1444 class->lc_name, lock->lo_name, file, line);
1445 if (witness_watch) {
1446 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1447 panic("downgrade of shared lock (%s) %s @ %s:%d",
1448 class->lc_name, lock->lo_name, file, line);
1449 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1450 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1451 class->lc_name, lock->lo_name,
1452 instance->li_flags & LI_RECURSEMASK, file, line);
1453 }
1454 instance->li_flags &= ~LI_EXCLUSIVE;
1455}
1456
1457void
1458witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1459{
1460 struct lock_list_entry **lock_list, *lle;
1461 struct lock_instance *instance;
1462 struct lock_class *class;
1463 struct thread *td;
1464 register_t s;
1465 int i, j;
1466
1467 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1468 return;
1469 td = curthread;
1470 class = LOCK_CLASS(lock);
1471 file = fixup_filename(file);
1472
1473 /* Find lock instance associated with this lock. */
1474 if (class->lc_flags & LC_SLEEPLOCK)
1475 lock_list = &td->td_sleeplocks;
1476 else
1477 lock_list = PCPU_PTR(spinlocks);
1478 lle = *lock_list;
1479 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1480 for (i = 0; i < (*lock_list)->ll_count; i++) {
1481 instance = &(*lock_list)->ll_children[i];
1482 if (instance->li_lock == lock)
1483 goto found;
1484 }
1485
1486 /*
1487 * When disabling WITNESS through witness_watch we could end up in
1488 * having registered locks in the td_sleeplocks queue.
1489 * We have to make sure we flush these queues, so just search for
1490 * eventual register locks and remove them.
1491 */
1492 if (witness_watch > 0)
1493 panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1494 lock->lo_name, file, line);
1495 else
1496 return;
1497found:
1498
1499 /* First, check for shared/exclusive mismatches. */
1500 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1501 (flags & LOP_EXCLUSIVE) == 0) {
1502 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1503 lock->lo_name, file, line);
1504 printf("while exclusively locked from %s:%d\n",
1505 instance->li_file, instance->li_line);
1506 panic("excl->ushare");
1507 }
1508 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1509 (flags & LOP_EXCLUSIVE) != 0) {
1510 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1511 lock->lo_name, file, line);
1512 printf("while share locked from %s:%d\n", instance->li_file,
1513 instance->li_line);
1514 panic("share->uexcl");
1515 }
1516 /* If we are recursed, unrecurse. */
1517 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1518 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1519 td->td_proc->p_pid, instance->li_lock->lo_name,
1520 instance->li_flags);
1521 instance->li_flags--;
1522 return;
1523 }
1524 /* The lock is now being dropped, check for NORELEASE flag */
1525 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1526 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1527 lock->lo_name, file, line);
1528 panic("lock marked norelease");
1529 }
1530
1531 /* Otherwise, remove this item from the list. */
1532 s = intr_disable();
1533 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1534 td->td_proc->p_pid, instance->li_lock->lo_name,
1535 (*lock_list)->ll_count - 1);
1536 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1537 (*lock_list)->ll_children[j] =
1538 (*lock_list)->ll_children[j + 1];
1539 (*lock_list)->ll_count--;
1540 intr_restore(s);
1541
1542 /*
1543 * In order to reduce contention on w_mtx, we want to keep always an
1544 * head object into lists so that frequent allocation from the
1545 * free witness pool (and subsequent locking) is avoided.
1546 * In order to maintain the current code simple, when the head
1547 * object is totally unloaded it means also that we do not have
1548 * further objects in the list, so the list ownership needs to be
1549 * hand over to another object if the current head needs to be freed.
1550 */
1551 if ((*lock_list)->ll_count == 0) {
1552 if (*lock_list == lle) {
1553 if (lle->ll_next == NULL)
1554 return;
1555 } else
1556 lle = *lock_list;
1557 *lock_list = lle->ll_next;
1558 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1559 td->td_proc->p_pid, lle);
1560 witness_lock_list_free(lle);
1561 }
1562}
1563
1564void
1565witness_thread_exit(struct thread *td)
1566{
1567 struct lock_list_entry *lle;
1568 int i, n;
1569
1570 lle = td->td_sleeplocks;
1571 if (lle == NULL || panicstr != NULL)
1572 return;
1573 if (lle->ll_count != 0) {
1574 for (n = 0; lle != NULL; lle = lle->ll_next)
1575 for (i = lle->ll_count - 1; i >= 0; i--) {
1576 if (n == 0)
1577 printf("Thread %p exiting with the following locks held:\n",
1578 td);
1579 n++;
1580 witness_list_lock(&lle->ll_children[i]);
1581
1582 }
1583 panic("Thread %p cannot exit while holding sleeplocks\n", td);
1584 }
1585 witness_lock_list_free(lle);
1586}
1587
1588/*
1589 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1590 * exempt Giant and sleepable locks from the checks as well. If any
1591 * non-exempt locks are held, then a supplied message is printed to the
1592 * console along with a list of the offending locks. If indicated in the
1593 * flags then a failure results in a panic as well.
1594 */
1595int
1596witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1597{
1598 struct lock_list_entry *lock_list, *lle;
1599 struct lock_instance *lock1;
1600 struct thread *td;
1601 va_list ap;
1602 int i, n;
1603
1604 if (witness_cold || witness_watch < 1 || panicstr != NULL)
1605 return (0);
1606 n = 0;
1607 td = curthread;
1608 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1609 for (i = lle->ll_count - 1; i >= 0; i--) {
1610 lock1 = &lle->ll_children[i];
1611 if (lock1->li_lock == lock)
1612 continue;
1613 if (flags & WARN_GIANTOK &&
1614 lock1->li_lock == &Giant.lock_object)
1615 continue;
1616 if (flags & WARN_SLEEPOK &&
1617 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1618 continue;
1619 if (n == 0) {
1620 va_start(ap, fmt);
1621 vprintf(fmt, ap);
1622 va_end(ap);
1623 printf(" with the following");
1624 if (flags & WARN_SLEEPOK)
1625 printf(" non-sleepable");
1626 printf(" locks held:\n");
1627 }
1628 n++;
1629 witness_list_lock(lock1);
1630 }
1631
1632 /*
1633 * Pin the thread in order to avoid problems with thread migration.
1634 * Once that all verifies are passed about spinlocks ownership,
1635 * the thread is in a safe path and it can be unpinned.
1636 */
1637 sched_pin();
1638 lock_list = PCPU_GET(spinlocks);
1639 if (lock_list != NULL && lock_list->ll_count != 0) {
1640 sched_unpin();
1641
1642 /*
1643 * We should only have one spinlock and as long as
1644 * the flags cannot match for this locks class,
1645 * check if the first spinlock is the one curthread
1646 * should hold.
1647 */
1648 lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1649 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1650 lock1->li_lock == lock && n == 0)
1651 return (0);
1652
1653 va_start(ap, fmt);
1654 vprintf(fmt, ap);
1655 va_end(ap);
1656 printf(" with the following");
1657 if (flags & WARN_SLEEPOK)
1658 printf(" non-sleepable");
1659 printf(" locks held:\n");
1660 n += witness_list_locks(&lock_list);
1661 } else
1662 sched_unpin();
1663 if (flags & WARN_PANIC && n)
1664 panic("%s", __func__);
1665 else
1666 witness_debugger(n);
1667 return (n);
1668}
1669
1670const char *
1671witness_file(struct lock_object *lock)
1672{
1673 struct witness *w;
1674
1675 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1676 return ("?");
1677 w = lock->lo_witness;
1678 return (w->w_file);
1679}
1680
1681int
1682witness_line(struct lock_object *lock)
1683{
1684 struct witness *w;
1685
1686 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1687 return (0);
1688 w = lock->lo_witness;
1689 return (w->w_line);
1690}
1691
1692static struct witness *
1693enroll(const char *description, struct lock_class *lock_class)
1694{
1695 struct witness *w;
1696 struct witness_list *typelist;
1697
1698 MPASS(description != NULL);
1699
1700 if (witness_watch == -1 || panicstr != NULL)
1701 return (NULL);
1702 if ((lock_class->lc_flags & LC_SPINLOCK)) {
1703 if (witness_skipspin)
1704 return (NULL);
1705 else
1706 typelist = &w_spin;
1707 } else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1708 typelist = &w_sleep;
1709 else
1710 panic("lock class %s is not sleep or spin",
1711 lock_class->lc_name);
1712
1713 mtx_lock_spin(&w_mtx);
1714 w = witness_hash_get(description);
1715 if (w)
1716 goto found;
1717 if ((w = witness_get()) == NULL)
1718 return (NULL);
1719 MPASS(strlen(description) < MAX_W_NAME);
1720 strcpy(w->w_name, description);
1721 w->w_class = lock_class;
1722 w->w_refcount = 1;
1723 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1724 if (lock_class->lc_flags & LC_SPINLOCK) {
1725 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1726 w_spin_cnt++;
1727 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1728 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1729 w_sleep_cnt++;
1730 }
1731
1732 /* Insert new witness into the hash */
1733 witness_hash_put(w);
1734 witness_increment_graph_generation();
1735 mtx_unlock_spin(&w_mtx);
1736 return (w);
1737found:
1738 w->w_refcount++;
1739 mtx_unlock_spin(&w_mtx);
1740 if (lock_class != w->w_class)
1741 panic(
1742 "lock (%s) %s does not match earlier (%s) lock",
1743 description, lock_class->lc_name,
1744 w->w_class->lc_name);
1745 return (w);
1746}
1747
1748static void
1749depart(struct witness *w)
1750{
1751 struct witness_list *list;
1752
1753 MPASS(w->w_refcount == 0);
1754 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1755 list = &w_sleep;
1756 w_sleep_cnt--;
1757 } else {
1758 list = &w_spin;
1759 w_spin_cnt--;
1760 }
1761 /*
1762 * Set file to NULL as it may point into a loadable module.
1763 */
1764 w->w_file = NULL;
1765 w->w_line = 0;
1766 witness_increment_graph_generation();
1767}
1768
1769
1770static void
1771adopt(struct witness *parent, struct witness *child)
1772{
1773 int pi, ci, i, j;
1774
1775 if (witness_cold == 0)
1776 mtx_assert(&w_mtx, MA_OWNED);
1777
1778 /* If the relationship is already known, there's no work to be done. */
1779 if (isitmychild(parent, child))
1780 return;
1781
1782 /* When the structure of the graph changes, bump up the generation. */
1783 witness_increment_graph_generation();
1784
1785 /*
1786 * The hard part ... create the direct relationship, then propagate all
1787 * indirect relationships.
1788 */
1789 pi = parent->w_index;
1790 ci = child->w_index;
1791 WITNESS_INDEX_ASSERT(pi);
1792 WITNESS_INDEX_ASSERT(ci);
1793 MPASS(pi != ci);
1794 w_rmatrix[pi][ci] |= WITNESS_PARENT;
1795 w_rmatrix[ci][pi] |= WITNESS_CHILD;
1796
1797 /*
1798 * If parent was not already an ancestor of child,
1799 * then we increment the descendant and ancestor counters.
1800 */
1801 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1802 parent->w_num_descendants++;
1803 child->w_num_ancestors++;
1804 }
1805
1806 /*
1807 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1808 * an ancestor of 'pi' during this loop.
1809 */
1810 for (i = 1; i <= w_max_used_index; i++) {
1811 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1812 (i != pi))
1813 continue;
1814
1815 /* Find each descendant of 'i' and mark it as a descendant. */
1816 for (j = 1; j <= w_max_used_index; j++) {
1817
1818 /*
1819 * Skip children that are already marked as
1820 * descendants of 'i'.
1821 */
1822 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1823 continue;
1824
1825 /*
1826 * We are only interested in descendants of 'ci'. Note
1827 * that 'ci' itself is counted as a descendant of 'ci'.
1828 */
1829 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1830 (j != ci))
1831 continue;
1832 w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1833 w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1834 w_data[i].w_num_descendants++;
1835 w_data[j].w_num_ancestors++;
1836
1837 /*
1838 * Make sure we aren't marking a node as both an
1839 * ancestor and descendant. We should have caught
1840 * this as a lock order reversal earlier.
1841 */
1842 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1843 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1844 printf("witness rmatrix paradox! [%d][%d]=%d "
1845 "both ancestor and descendant\n",
1846 i, j, w_rmatrix[i][j]);
1847 kdb_backtrace();
1848 printf("Witness disabled.\n");
1849 witness_watch = -1;
1850 }
1851 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1852 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1853 printf("witness rmatrix paradox! [%d][%d]=%d "
1854 "both ancestor and descendant\n",
1855 j, i, w_rmatrix[j][i]);
1856 kdb_backtrace();
1857 printf("Witness disabled.\n");
1858 witness_watch = -1;
1859 }
1860 }
1861 }
1862}
1863
1864static void
1865itismychild(struct witness *parent, struct witness *child)
1866{
1867
1868 MPASS(child != NULL && parent != NULL);
1869 if (witness_cold == 0)
1870 mtx_assert(&w_mtx, MA_OWNED);
1871
1872 if (!witness_lock_type_equal(parent, child)) {
1873 if (witness_cold == 0)
1874 mtx_unlock_spin(&w_mtx);
1875 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1876 "the same lock type", __func__, parent->w_name,
1877 parent->w_class->lc_name, child->w_name,
1878 child->w_class->lc_name);
1879 }
1880 adopt(parent, child);
1881}
1882
1883/*
1884 * Generic code for the isitmy*() functions. The rmask parameter is the
1885 * expected relationship of w1 to w2.
1886 */
1887static int
1888_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1889{
1890 unsigned char r1, r2;
1891 int i1, i2;
1892
1893 i1 = w1->w_index;
1894 i2 = w2->w_index;
1895 WITNESS_INDEX_ASSERT(i1);
1896 WITNESS_INDEX_ASSERT(i2);
1897 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1898 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1899
1900 /* The flags on one better be the inverse of the flags on the other */
1901 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1902 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1903 printf("%s: rmatrix mismatch between %s (index %d) and %s "
1904 "(index %d): w_rmatrix[%d][%d] == %hhx but "
1905 "w_rmatrix[%d][%d] == %hhx\n",
1906 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1907 i2, i1, r2);
1908 kdb_backtrace();
1909 printf("Witness disabled.\n");
1910 witness_watch = -1;
1911 }
1912 return (r1 & rmask);
1913}
1914
1915/*
1916 * Checks if @child is a direct child of @parent.
1917 */
1918static int
1919isitmychild(struct witness *parent, struct witness *child)
1920{
1921
1922 return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1923}
1924
1925/*
1926 * Checks if @descendant is a direct or inderect descendant of @ancestor.
1927 */
1928static int
1929isitmydescendant(struct witness *ancestor, struct witness *descendant)
1930{
1931
1932 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1933 __func__));
1934}
1935
1936#ifdef BLESSING
1937static int
1938blessed(struct witness *w1, struct witness *w2)
1939{
1940 int i;
1941 struct witness_blessed *b;
1942
1943 for (i = 0; i < blessed_count; i++) {
1944 b = &blessed_list[i];
1945 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1946 if (strcmp(w2->w_name, b->b_lock2) == 0)
1947 return (1);
1948 continue;
1949 }
1950 if (strcmp(w1->w_name, b->b_lock2) == 0)
1951 if (strcmp(w2->w_name, b->b_lock1) == 0)
1952 return (1);
1953 }
1954 return (0);
1955}
1956#endif
1957
1958static struct witness *
1959witness_get(void)
1960{
1961 struct witness *w;
1962 int index;
1963
1964 if (witness_cold == 0)
1965 mtx_assert(&w_mtx, MA_OWNED);
1966
1967 if (witness_watch == -1) {
1968 mtx_unlock_spin(&w_mtx);
1969 return (NULL);
1970 }
1971 if (STAILQ_EMPTY(&w_free)) {
1972 witness_watch = -1;
1973 mtx_unlock_spin(&w_mtx);
1974 printf("WITNESS: unable to allocate a new witness object\n");
1975 return (NULL);
1976 }
1977 w = STAILQ_FIRST(&w_free);
1978 STAILQ_REMOVE_HEAD(&w_free, w_list);
1979 w_free_cnt--;
1980 index = w->w_index;
1981 MPASS(index > 0 && index == w_max_used_index+1 &&
1982 index < WITNESS_COUNT);
1983 bzero(w, sizeof(*w));
1984 w->w_index = index;
1985 if (index > w_max_used_index)
1986 w_max_used_index = index;
1987 return (w);
1988}
1989
1990static void
1991witness_free(struct witness *w)
1992{
1993
1994 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1995 w_free_cnt++;
1996}
1997
1998static struct lock_list_entry *
1999witness_lock_list_get(void)
2000{
2001 struct lock_list_entry *lle;
2002
2003 if (witness_watch == -1)
2004 return (NULL);
2005 mtx_lock_spin(&w_mtx);
2006 lle = w_lock_list_free;
2007 if (lle == NULL) {
2008 witness_watch = -1;
2009 mtx_unlock_spin(&w_mtx);
2010 printf("%s: witness exhausted\n", __func__);
2011 return (NULL);
2012 }
2013 w_lock_list_free = lle->ll_next;
2014 mtx_unlock_spin(&w_mtx);
2015 bzero(lle, sizeof(*lle));
2016 return (lle);
2017}
2018
2019static void
2020witness_lock_list_free(struct lock_list_entry *lle)
2021{
2022
2023 mtx_lock_spin(&w_mtx);
2024 lle->ll_next = w_lock_list_free;
2025 w_lock_list_free = lle;
2026 mtx_unlock_spin(&w_mtx);
2027}
2028
2029static struct lock_instance *
2030find_instance(struct lock_list_entry *list, struct lock_object *lock)
2031{
2032 struct lock_list_entry *lle;
2033 struct lock_instance *instance;
2034 int i;
2035
2036 for (lle = list; lle != NULL; lle = lle->ll_next)
2037 for (i = lle->ll_count - 1; i >= 0; i--) {
2038 instance = &lle->ll_children[i];
2039 if (instance->li_lock == lock)
2040 return (instance);
2041 }
2042 return (NULL);
2043}
2044
2045static void
2046witness_list_lock(struct lock_instance *instance)
2047{
2048 struct lock_object *lock;
2049
2050 lock = instance->li_lock;
2051 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2052 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2053 if (lock->lo_witness->w_name != lock->lo_name)
2054 printf(" (%s)", lock->lo_witness->w_name);
2055 printf(" r = %d (%p) locked @ %s:%d\n",
2056 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2057 instance->li_line);
2058}
2059
2060#ifdef DDB
2061static int
2062witness_thread_has_locks(struct thread *td)
2063{
2064
2065 if (td->td_sleeplocks == NULL)
2066 return (0);
2067 return (td->td_sleeplocks->ll_count != 0);
2068}
2069
2070static int
2071witness_proc_has_locks(struct proc *p)
2072{
2073 struct thread *td;
2074
2075 FOREACH_THREAD_IN_PROC(p, td) {
2076 if (witness_thread_has_locks(td))
2077 return (1);
2078 }
2079 return (0);
2080}
2081#endif
2082
2083int
2084witness_list_locks(struct lock_list_entry **lock_list)
2085{
2086 struct lock_list_entry *lle;
2087 int i, nheld;
2088
2089 nheld = 0;
2090 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2091 for (i = lle->ll_count - 1; i >= 0; i--) {
2092 witness_list_lock(&lle->ll_children[i]);
2093 nheld++;
2094 }
2095 return (nheld);
2096}
2097
2098/*
2099 * This is a bit risky at best. We call this function when we have timed
2100 * out acquiring a spin lock, and we assume that the other CPU is stuck
2101 * with this lock held. So, we go groveling around in the other CPU's
2102 * per-cpu data to try to find the lock instance for this spin lock to
2103 * see when it was last acquired.
2104 */
2105void
2106witness_display_spinlock(struct lock_object *lock, struct thread *owner)
2107{
2108 struct lock_instance *instance;
2109 struct pcpu *pc;
2110
2111 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2112 return;
2113 pc = pcpu_find(owner->td_oncpu);
2114 instance = find_instance(pc->pc_spinlocks, lock);
2115 if (instance != NULL)
2116 witness_list_lock(instance);
2117}
2118
2119void
2120witness_save(struct lock_object *lock, const char **filep, int *linep)
2121{
2122 struct lock_list_entry *lock_list;
2123 struct lock_instance *instance;
2124 struct lock_class *class;
2125
2126 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2127 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2128 return;
2129 class = LOCK_CLASS(lock);
2130 if (class->lc_flags & LC_SLEEPLOCK)
2131 lock_list = curthread->td_sleeplocks;
2132 else {
2133 if (witness_skipspin)
2134 return;
2135 lock_list = PCPU_GET(spinlocks);
2136 }
2137 instance = find_instance(lock_list, lock);
2138 if (instance == NULL)
2139 panic("%s: lock (%s) %s not locked", __func__,
2140 class->lc_name, lock->lo_name);
2141 *filep = instance->li_file;
2142 *linep = instance->li_line;
2143}
2144
2145void
2146witness_restore(struct lock_object *lock, const char *file, int line)
2147{
2148 struct lock_list_entry *lock_list;
2149 struct lock_instance *instance;
2150 struct lock_class *class;
2151
2152 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2153 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2154 return;
2155 class = LOCK_CLASS(lock);
2156 if (class->lc_flags & LC_SLEEPLOCK)
2157 lock_list = curthread->td_sleeplocks;
2158 else {
2159 if (witness_skipspin)
2160 return;
2161 lock_list = PCPU_GET(spinlocks);
2162 }
2163 instance = find_instance(lock_list, lock);
2164 if (instance == NULL)
2165 panic("%s: lock (%s) %s not locked", __func__,
2166 class->lc_name, lock->lo_name);
2167 lock->lo_witness->w_file = file;
2168 lock->lo_witness->w_line = line;
2169 instance->li_file = file;
2170 instance->li_line = line;
2171}
2172
2173void
2174witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2175{
2176#ifdef INVARIANT_SUPPORT
2177 struct lock_instance *instance;
2178 struct lock_class *class;
2179
2180 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2181 return;
2182 class = LOCK_CLASS(lock);
2183 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2184 instance = find_instance(curthread->td_sleeplocks, lock);
2185 else if ((class->lc_flags & LC_SPINLOCK) != 0)
2186 instance = find_instance(PCPU_GET(spinlocks), lock);
2187 else {
2188 panic("Lock (%s) %s is not sleep or spin!",
2189 class->lc_name, lock->lo_name);
2190 }
2191 file = fixup_filename(file);
2192 switch (flags) {
2193 case LA_UNLOCKED:
2194 if (instance != NULL)
2195 panic("Lock (%s) %s locked @ %s:%d.",
2196 class->lc_name, lock->lo_name, file, line);
2197 break;
2198 case LA_LOCKED:
2199 case LA_LOCKED | LA_RECURSED:
2200 case LA_LOCKED | LA_NOTRECURSED:
2201 case LA_SLOCKED:
2202 case LA_SLOCKED | LA_RECURSED:
2203 case LA_SLOCKED | LA_NOTRECURSED:
2204 case LA_XLOCKED:
2205 case LA_XLOCKED | LA_RECURSED:
2206 case LA_XLOCKED | LA_NOTRECURSED:
2207 if (instance == NULL) {
2208 panic("Lock (%s) %s not locked @ %s:%d.",
2209 class->lc_name, lock->lo_name, file, line);
2210 break;
2211 }
2212 if ((flags & LA_XLOCKED) != 0 &&
2213 (instance->li_flags & LI_EXCLUSIVE) == 0)
2214 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2215 class->lc_name, lock->lo_name, file, line);
2216 if ((flags & LA_SLOCKED) != 0 &&
2217 (instance->li_flags & LI_EXCLUSIVE) != 0)
2218 panic("Lock (%s) %s exclusively locked @ %s:%d.",
2219 class->lc_name, lock->lo_name, file, line);
2220 if ((flags & LA_RECURSED) != 0 &&
2221 (instance->li_flags & LI_RECURSEMASK) == 0)
2222 panic("Lock (%s) %s not recursed @ %s:%d.",
2223 class->lc_name, lock->lo_name, file, line);
2224 if ((flags & LA_NOTRECURSED) != 0 &&
2225 (instance->li_flags & LI_RECURSEMASK) != 0)
2226 panic("Lock (%s) %s recursed @ %s:%d.",
2227 class->lc_name, lock->lo_name, file, line);
2228 break;
2229 default:
2230 panic("Invalid lock assertion at %s:%d.", file, line);
2231
2232 }
2233#endif /* INVARIANT_SUPPORT */
2234}
2235
2236static void
2237witness_setflag(struct lock_object *lock, int flag, int set)
2238{
2239 struct lock_list_entry *lock_list;
2240 struct lock_instance *instance;
2241 struct lock_class *class;
2242
2243 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2244 return;
2245 class = LOCK_CLASS(lock);
2246 if (class->lc_flags & LC_SLEEPLOCK)
2247 lock_list = curthread->td_sleeplocks;
2248 else {
2249 if (witness_skipspin)
2250 return;
2251 lock_list = PCPU_GET(spinlocks);
2252 }
2253 instance = find_instance(lock_list, lock);
2254 if (instance == NULL)
2255 panic("%s: lock (%s) %s not locked", __func__,
2256 class->lc_name, lock->lo_name);
2257
2258 if (set)
2259 instance->li_flags |= flag;
2260 else
2261 instance->li_flags &= ~flag;
2262}
2263
2264void
2265witness_norelease(struct lock_object *lock)
2266{
2267
2268 witness_setflag(lock, LI_NORELEASE, 1);
2269}
2270
2271void
2272witness_releaseok(struct lock_object *lock)
2273{
2274
2275 witness_setflag(lock, LI_NORELEASE, 0);
2276}
2277
2278#ifdef DDB
2279static void
2280witness_ddb_list(struct thread *td)
2281{
2282
2283 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2284 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2285
2286 if (witness_watch < 1)
2287 return;
2288
2289 witness_list_locks(&td->td_sleeplocks);
2290
2291 /*
2292 * We only handle spinlocks if td == curthread. This is somewhat broken
2293 * if td is currently executing on some other CPU and holds spin locks
2294 * as we won't display those locks. If we had a MI way of getting
2295 * the per-cpu data for a given cpu then we could use
2296 * td->td_oncpu to get the list of spinlocks for this thread
2297 * and "fix" this.
2298 *
2299 * That still wouldn't really fix this unless we locked the scheduler
2300 * lock or stopped the other CPU to make sure it wasn't changing the
2301 * list out from under us. It is probably best to just not try to
2302 * handle threads on other CPU's for now.
2303 */
2304 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2305 witness_list_locks(PCPU_PTR(spinlocks));
2306}
2307
2308DB_SHOW_COMMAND(locks, db_witness_list)
2309{
2310 struct thread *td;
2311
2312 if (have_addr)
2313 td = db_lookup_thread(addr, TRUE);
2314 else
2315 td = kdb_thread;
2316 witness_ddb_list(td);
2317}
2318
2319DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2320{
2321 struct thread *td;
2322 struct proc *p;
2323
2324 /*
2325 * It would be nice to list only threads and processes that actually
2326 * held sleep locks, but that information is currently not exported
2327 * by WITNESS.
2328 */
2329 FOREACH_PROC_IN_SYSTEM(p) {
2330 if (!witness_proc_has_locks(p))
2331 continue;
2332 FOREACH_THREAD_IN_PROC(p, td) {
2333 if (!witness_thread_has_locks(td))
2334 continue;
2335 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2336 p->p_comm, td, td->td_tid);
2337 witness_ddb_list(td);
2338 }
2339 }
2340}
2341DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2342
2343DB_SHOW_COMMAND(witness, db_witness_display)
2344{
2345
2346 witness_ddb_display(db_printf);
2347}
2348#endif
2349
2350static int
2351sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2352{
2353 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2354 struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2355 struct sbuf *sb;
2356 u_int w_rmatrix1, w_rmatrix2;
2357 int error, generation, i, j;
2358
2359 tmp_data1 = NULL;
2360 tmp_data2 = NULL;
2361 tmp_w1 = NULL;
2362 tmp_w2 = NULL;
2363 if (witness_watch < 1) {
2364 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2365 return (error);
2366 }
2367 if (witness_cold) {
2368 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2369 return (error);
2370 }
2371 error = 0;
2372 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2373 if (sb == NULL)
2374 return (ENOMEM);
2375
2376 /* Allocate and init temporary storage space. */
2377 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2378 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2379 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2380 M_WAITOK | M_ZERO);
2381 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2382 M_WAITOK | M_ZERO);
2383 stack_zero(&tmp_data1->wlod_stack);
2384 stack_zero(&tmp_data2->wlod_stack);
2385
2386restart:
2387 mtx_lock_spin(&w_mtx);
2388 generation = w_generation;
2389 mtx_unlock_spin(&w_mtx);
2390 sbuf_printf(sb, "Number of known direct relationships is %d\n",
2391 w_lohash.wloh_count);
2392 for (i = 1; i < w_max_used_index; i++) {
2393 mtx_lock_spin(&w_mtx);
2394 if (generation != w_generation) {
2395 mtx_unlock_spin(&w_mtx);
2396
2397 /* The graph has changed, try again. */
2398 req->oldidx = 0;
2399 sbuf_clear(sb);
2400 goto restart;
2401 }
2402
2403 w1 = &w_data[i];
2404 if (w1->w_reversed == 0) {
2405 mtx_unlock_spin(&w_mtx);
2406 continue;
2407 }
2408
2409 /* Copy w1 locally so we can release the spin lock. */
2410 *tmp_w1 = *w1;
2411 mtx_unlock_spin(&w_mtx);
2412
2413 if (tmp_w1->w_reversed == 0)
2414 continue;
2415 for (j = 1; j < w_max_used_index; j++) {
2416 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2417 continue;
2418
2419 mtx_lock_spin(&w_mtx);
2420 if (generation != w_generation) {
2421 mtx_unlock_spin(&w_mtx);
2422
2423 /* The graph has changed, try again. */
2424 req->oldidx = 0;
2425 sbuf_clear(sb);
2426 goto restart;
2427 }
2428
2429 w2 = &w_data[j];
2430 data1 = witness_lock_order_get(w1, w2);
2431 data2 = witness_lock_order_get(w2, w1);
2432
2433 /*
2434 * Copy information locally so we can release the
2435 * spin lock.
2436 */
2437 *tmp_w2 = *w2;
2438 w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2439 w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2440
2441 if (data1) {
2442 stack_zero(&tmp_data1->wlod_stack);
2443 stack_copy(&data1->wlod_stack,
2444 &tmp_data1->wlod_stack);
2445 }
2446 if (data2 && data2 != data1) {
2447 stack_zero(&tmp_data2->wlod_stack);
2448 stack_copy(&data2->wlod_stack,
2449 &tmp_data2->wlod_stack);
2450 }
2451 mtx_unlock_spin(&w_mtx);
2452
2453 sbuf_printf(sb,
2454 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2455 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2456 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2457#if 0
2458 sbuf_printf(sb,
2459 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2460 tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2461 tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2462#endif
2463 if (data1) {
2464 sbuf_printf(sb,
2465 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2466 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2467 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2468 stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2469 sbuf_printf(sb, "\n");
2470 }
2471 if (data2 && data2 != data1) {
2472 sbuf_printf(sb,
2473 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2474 tmp_w2->w_name, tmp_w2->w_class->lc_name,
2475 tmp_w1->w_name, tmp_w1->w_class->lc_name);
2476 stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2477 sbuf_printf(sb, "\n");
2478 }
2479 }
2480 }
2481 mtx_lock_spin(&w_mtx);
2482 if (generation != w_generation) {
2483 mtx_unlock_spin(&w_mtx);
2484
2485 /*
2486 * The graph changed while we were printing stack data,
2487 * try again.
2488 */
2489 req->oldidx = 0;
2490 sbuf_clear(sb);
2491 goto restart;
2492 }
2493 mtx_unlock_spin(&w_mtx);
2494
2495 /* Free temporary storage space. */
2496 free(tmp_data1, M_TEMP);
2497 free(tmp_data2, M_TEMP);
2498 free(tmp_w1, M_TEMP);
2499 free(tmp_w2, M_TEMP);
2500
2501 sbuf_finish(sb);
2502 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2503 sbuf_delete(sb);
2504
2505 return (error);
2506}
2507
2508static int
2509sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2510{
2511 struct witness *w;
2512 struct sbuf *sb;
2513 int error;
2514
2515 if (witness_watch < 1) {
2516 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2517 return (error);
2518 }
2519 if (witness_cold) {
2520 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2521 return (error);
2522 }
2523 error = 0;
2524 sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN);
2525 if (sb == NULL)
2526 return (ENOMEM);
2527 sbuf_printf(sb, "\n");
2528
2529 mtx_lock_spin(&w_mtx);
2530 STAILQ_FOREACH(w, &w_all, w_list)
2531 w->w_displayed = 0;
2532 STAILQ_FOREACH(w, &w_all, w_list)
2533 witness_add_fullgraph(sb, w);
2534 mtx_unlock_spin(&w_mtx);
2535
2536 /*
2537 * While using SBUF_FIXEDLEN, check if the sbuf overflowed.
2538 */
2539 if (sbuf_overflowed(sb)) {
2540 sbuf_delete(sb);
2541 panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n",
2542 __func__);
2543 }
2544
2545 /*
2546 * Close the sbuf and return to userland.
2547 */
2548 sbuf_finish(sb);
2549 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2550 sbuf_delete(sb);
2551
2552 return (error);
2553}
2554
2555static int
2556sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2557{
2558 int error, value;
2559
2560 value = witness_watch;
2561 error = sysctl_handle_int(oidp, &value, 0, req);
2562 if (error != 0 || req->newptr == NULL)
2563 return (error);
2564 if (value > 1 || value < -1 ||
2565 (witness_watch == -1 && value != witness_watch))
2566 return (EINVAL);
2567 witness_watch = value;
2568 return (0);
2569}
2570
2571static void
2572witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2573{
2574 int i;
2575
2576 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2577 return;
2578 w->w_displayed = 1;
2579
2580 WITNESS_INDEX_ASSERT(w->w_index);
2581 for (i = 1; i <= w_max_used_index; i++) {
2582 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2583 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2584 w_data[i].w_name);
2585 witness_add_fullgraph(sb, &w_data[i]);
2586 }
2587 }
2588}
2589
2590/*
2591 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2592 * interprets the key as a string and reads until the null
2593 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2594 * hash value computed from the key.
2595 */
2596static uint32_t
2597witness_hash_djb2(const uint8_t *key, uint32_t size)
2598{
2599 unsigned int hash = 5381;
2600 int i;
2601
2602 /* hash = hash * 33 + key[i] */
2603 if (size)
2604 for (i = 0; i < size; i++)
2605 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2606 else
2607 for (i = 0; key[i] != 0; i++)
2608 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2609
2610 return (hash);
2611}
2612
2613
2614/*
2615 * Initializes the two witness hash tables. Called exactly once from
2616 * witness_initialize().
2617 */
2618static void
2619witness_init_hash_tables(void)
2620{
2621 int i;
2622
2623 MPASS(witness_cold);
2624
2625 /* Initialize the hash tables. */
2626 for (i = 0; i < WITNESS_HASH_SIZE; i++)
2627 w_hash.wh_array[i] = NULL;
2628
2629 w_hash.wh_size = WITNESS_HASH_SIZE;
2630 w_hash.wh_count = 0;
2631
2632 /* Initialize the lock order data hash. */
2633 w_lofree = NULL;
2634 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2635 memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2636 w_lodata[i].wlod_next = w_lofree;
2637 w_lofree = &w_lodata[i];
2638 }
2639 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2640 w_lohash.wloh_count = 0;
2641 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2642 w_lohash.wloh_array[i] = NULL;
2643}
2644
2645static struct witness *
2646witness_hash_get(const char *key)
2647{
2648 struct witness *w;
2649 uint32_t hash;
2650
2651 MPASS(key != NULL);
2652 if (witness_cold == 0)
2653 mtx_assert(&w_mtx, MA_OWNED);
2654 hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2655 w = w_hash.wh_array[hash];
2656 while (w != NULL) {
2657 if (strcmp(w->w_name, key) == 0)
2658 goto out;
2659 w = w->w_hash_next;
2660 }
2661
2662out:
2663 return (w);
2664}
2665
2666static void
2667witness_hash_put(struct witness *w)
2668{
2669 uint32_t hash;
2670
2671 MPASS(w != NULL);
2672 MPASS(w->w_name != NULL);
2673 if (witness_cold == 0)
2674 mtx_assert(&w_mtx, MA_OWNED);
2675 KASSERT(witness_hash_get(w->w_name) == NULL,
2676 ("%s: trying to add a hash entry that already exists!", __func__));
2677 KASSERT(w->w_hash_next == NULL,
2678 ("%s: w->w_hash_next != NULL", __func__));
2679
2680 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2681 w->w_hash_next = w_hash.wh_array[hash];
2682 w_hash.wh_array[hash] = w;
2683 w_hash.wh_count++;
2684}
2685
2686
2687static struct witness_lock_order_data *
2688witness_lock_order_get(struct witness *parent, struct witness *child)
2689{
2690 struct witness_lock_order_data *data = NULL;
2691 struct witness_lock_order_key key;
2692 unsigned int hash;
2693
2694 MPASS(parent != NULL && child != NULL);
2695 key.from = parent->w_index;
2696 key.to = child->w_index;
2697 WITNESS_INDEX_ASSERT(key.from);
2698 WITNESS_INDEX_ASSERT(key.to);
2699 if ((w_rmatrix[parent->w_index][child->w_index]
2700 & WITNESS_LOCK_ORDER_KNOWN) == 0)
2701 goto out;
2702
2703 hash = witness_hash_djb2((const char*)&key,
2704 sizeof(key)) % w_lohash.wloh_size;
2705 data = w_lohash.wloh_array[hash];
2706 while (data != NULL) {
2707 if (witness_lock_order_key_equal(&data->wlod_key, &key))
2708 break;
2709 data = data->wlod_next;
2710 }
2711
2712out:
2713 return (data);
2714}
2715
2716/*
2717 * Verify that parent and child have a known relationship, are not the same,
2718 * and child is actually a child of parent. This is done without w_mtx
2719 * to avoid contention in the common case.
2720 */
2721static int
2722witness_lock_order_check(struct witness *parent, struct witness *child)
2723{
2724
2725 if (parent != child &&
2726 w_rmatrix[parent->w_index][child->w_index]
2727 & WITNESS_LOCK_ORDER_KNOWN &&
2728 isitmychild(parent, child))
2729 return (1);
2730
2731 return (0);
2732}
2733
2734static int
2735witness_lock_order_add(struct witness *parent, struct witness *child)
2736{
2737 struct witness_lock_order_data *data = NULL;
2738 struct witness_lock_order_key key;
2739 unsigned int hash;
2740
2741 MPASS(parent != NULL && child != NULL);
2742 key.from = parent->w_index;
2743 key.to = child->w_index;
2744 WITNESS_INDEX_ASSERT(key.from);
2745 WITNESS_INDEX_ASSERT(key.to);
2746 if (w_rmatrix[parent->w_index][child->w_index]
2747 & WITNESS_LOCK_ORDER_KNOWN)
2748 return (1);
2749
2750 hash = witness_hash_djb2((const char*)&key,
2751 sizeof(key)) % w_lohash.wloh_size;
2752 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2753 data = w_lofree;
2754 if (data == NULL)
2755 return (0);
2756 w_lofree = data->wlod_next;
2757 data->wlod_next = w_lohash.wloh_array[hash];
2758 data->wlod_key = key;
2759 w_lohash.wloh_array[hash] = data;
2760 w_lohash.wloh_count++;
2761 stack_zero(&data->wlod_stack);
2762 stack_save(&data->wlod_stack);
2763 return (1);
2764}
2765
2766/* Call this whenver the structure of the witness graph changes. */
2767static void
2768witness_increment_graph_generation(void)
2769{
2770
2771 if (witness_cold == 0)
2772 mtx_assert(&w_mtx, MA_OWNED);
2773 w_generation++;
2774}
2775
2776#ifdef KDB
2777static void
2778_witness_debugger(int cond, const char *msg)
2779{
2780
2781 if (witness_trace && cond)
2782 kdb_backtrace();
2783 if (witness_kdb && cond)
2784 kdb_enter(KDB_WHY_WITNESS, msg);
2785}
2786#endif
533 * UNIX Domain Sockets
534 */
535 { "unp_global_rwlock", &lock_class_rw },
536 { "unp_list_lock", &lock_class_mtx_sleep },
537 { "unp", &lock_class_mtx_sleep },
538 { "so_snd", &lock_class_mtx_sleep },
539 { NULL, NULL },
540 /*
541 * UDP/IP
542 */
543 { "udp", &lock_class_rw },
544 { "udpinp", &lock_class_rw },
545 { "so_snd", &lock_class_mtx_sleep },
546 { NULL, NULL },
547 /*
548 * TCP/IP
549 */
550 { "tcp", &lock_class_rw },
551 { "tcpinp", &lock_class_rw },
552 { "so_snd", &lock_class_mtx_sleep },
553 { NULL, NULL },
554 /*
555 * SLIP
556 */
557 { "slip_mtx", &lock_class_mtx_sleep },
558 { "slip sc_mtx", &lock_class_mtx_sleep },
559 { NULL, NULL },
560 /*
561 * netatalk
562 */
563 { "ddp_list_mtx", &lock_class_mtx_sleep },
564 { "ddp_mtx", &lock_class_mtx_sleep },
565 { NULL, NULL },
566 /*
567 * BPF
568 */
569 { "bpf global lock", &lock_class_mtx_sleep },
570 { "bpf interface lock", &lock_class_mtx_sleep },
571 { "bpf cdev lock", &lock_class_mtx_sleep },
572 { NULL, NULL },
573 /*
574 * NFS server
575 */
576 { "nfsd_mtx", &lock_class_mtx_sleep },
577 { "so_snd", &lock_class_mtx_sleep },
578 { NULL, NULL },
579
580 /*
581 * IEEE 802.11
582 */
583 { "802.11 com lock", &lock_class_mtx_sleep},
584 { NULL, NULL },
585 /*
586 * Network drivers
587 */
588 { "network driver", &lock_class_mtx_sleep},
589 { NULL, NULL },
590
591 /*
592 * Netgraph
593 */
594 { "ng_node", &lock_class_mtx_sleep },
595 { "ng_worklist", &lock_class_mtx_sleep },
596 { NULL, NULL },
597 /*
598 * CDEV
599 */
600 { "system map", &lock_class_mtx_sleep },
601 { "vm page queue mutex", &lock_class_mtx_sleep },
602 { "vnode interlock", &lock_class_mtx_sleep },
603 { "cdev", &lock_class_mtx_sleep },
604 { NULL, NULL },
605 /*
606 * kqueue/VFS interaction
607 */
608 { "kqueue", &lock_class_mtx_sleep },
609 { "struct mount mtx", &lock_class_mtx_sleep },
610 { "vnode interlock", &lock_class_mtx_sleep },
611 { NULL, NULL },
612 /*
613 * spin locks
614 */
615#ifdef SMP
616 { "ap boot", &lock_class_mtx_spin },
617#endif
618 { "rm.mutex_mtx", &lock_class_mtx_spin },
619 { "sio", &lock_class_mtx_spin },
620 { "scrlock", &lock_class_mtx_spin },
621#ifdef __i386__
622 { "cy", &lock_class_mtx_spin },
623#endif
624#ifdef __sparc64__
625 { "pcib_mtx", &lock_class_mtx_spin },
626 { "rtc_mtx", &lock_class_mtx_spin },
627#endif
628 { "scc_hwmtx", &lock_class_mtx_spin },
629 { "uart_hwmtx", &lock_class_mtx_spin },
630 { "fast_taskqueue", &lock_class_mtx_spin },
631 { "intr table", &lock_class_mtx_spin },
632#ifdef HWPMC_HOOKS
633 { "pmc-per-proc", &lock_class_mtx_spin },
634#endif
635 { "process slock", &lock_class_mtx_spin },
636 { "sleepq chain", &lock_class_mtx_spin },
637 { "umtx lock", &lock_class_mtx_spin },
638 { "rm_spinlock", &lock_class_mtx_spin },
639 { "turnstile chain", &lock_class_mtx_spin },
640 { "turnstile lock", &lock_class_mtx_spin },
641 { "sched lock", &lock_class_mtx_spin },
642 { "td_contested", &lock_class_mtx_spin },
643 { "callout", &lock_class_mtx_spin },
644 { "entropy harvest mutex", &lock_class_mtx_spin },
645 { "syscons video lock", &lock_class_mtx_spin },
646 { "time lock", &lock_class_mtx_spin },
647#ifdef SMP
648 { "smp rendezvous", &lock_class_mtx_spin },
649#endif
650#ifdef __powerpc__
651 { "tlb0", &lock_class_mtx_spin },
652#endif
653 /*
654 * leaf locks
655 */
656 { "intrcnt", &lock_class_mtx_spin },
657 { "icu", &lock_class_mtx_spin },
658#if defined(SMP) && defined(__sparc64__)
659 { "ipi", &lock_class_mtx_spin },
660#endif
661#ifdef __i386__
662 { "allpmaps", &lock_class_mtx_spin },
663 { "descriptor tables", &lock_class_mtx_spin },
664#endif
665 { "clk", &lock_class_mtx_spin },
666 { "cpuset", &lock_class_mtx_spin },
667 { "mprof lock", &lock_class_mtx_spin },
668 { "zombie lock", &lock_class_mtx_spin },
669 { "ALD Queue", &lock_class_mtx_spin },
670#ifdef __ia64__
671 { "MCA spin lock", &lock_class_mtx_spin },
672#endif
673#if defined(__i386__) || defined(__amd64__)
674 { "pcicfg", &lock_class_mtx_spin },
675 { "NDIS thread lock", &lock_class_mtx_spin },
676#endif
677 { "tw_osl_io_lock", &lock_class_mtx_spin },
678 { "tw_osl_q_lock", &lock_class_mtx_spin },
679 { "tw_cl_io_lock", &lock_class_mtx_spin },
680 { "tw_cl_intr_lock", &lock_class_mtx_spin },
681 { "tw_cl_gen_lock", &lock_class_mtx_spin },
682#ifdef HWPMC_HOOKS
683 { "pmc-leaf", &lock_class_mtx_spin },
684#endif
685 { "blocked lock", &lock_class_mtx_spin },
686 { NULL, NULL },
687 { NULL, NULL }
688};
689
690#ifdef BLESSING
691/*
692 * Pairs of locks which have been blessed
693 * Don't complain about order problems with blessed locks
694 */
695static struct witness_blessed blessed_list[] = {
696};
697static int blessed_count =
698 sizeof(blessed_list) / sizeof(struct witness_blessed);
699#endif
700
701/*
702 * This global is set to 0 once it becomes safe to use the witness code.
703 */
704static int witness_cold = 1;
705
706/*
707 * This global is set to 1 once the static lock orders have been enrolled
708 * so that a warning can be issued for any spin locks enrolled later.
709 */
710static int witness_spin_warn = 0;
711
712/*
713 * The WITNESS-enabled diagnostic code. Note that the witness code does
714 * assume that the early boot is single-threaded at least until after this
715 * routine is completed.
716 */
717static void
718witness_initialize(void *dummy __unused)
719{
720 struct lock_object *lock;
721 struct witness_order_list_entry *order;
722 struct witness *w, *w1;
723 int i;
724
725 w_data = malloc(sizeof (struct witness) * WITNESS_COUNT, M_WITNESS,
726 M_NOWAIT | M_ZERO);
727
728 /*
729 * We have to release Giant before initializing its witness
730 * structure so that WITNESS doesn't get confused.
731 */
732 mtx_unlock(&Giant);
733 mtx_assert(&Giant, MA_NOTOWNED);
734
735 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
736 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
737 MTX_NOWITNESS | MTX_NOPROFILE);
738 for (i = WITNESS_COUNT - 1; i >= 0; i--) {
739 w = &w_data[i];
740 memset(w, 0, sizeof(*w));
741 w_data[i].w_index = i; /* Witness index never changes. */
742 witness_free(w);
743 }
744 KASSERT(STAILQ_FIRST(&w_free)->w_index == 0,
745 ("%s: Invalid list of free witness objects", __func__));
746
747 /* Witness with index 0 is not used to aid in debugging. */
748 STAILQ_REMOVE_HEAD(&w_free, w_list);
749 w_free_cnt--;
750
751 memset(w_rmatrix, 0,
752 (sizeof(**w_rmatrix) * (WITNESS_COUNT+1) * (WITNESS_COUNT+1)));
753
754 for (i = 0; i < LOCK_CHILDCOUNT; i++)
755 witness_lock_list_free(&w_locklistdata[i]);
756 witness_init_hash_tables();
757
758 /* First add in all the specified order lists. */
759 for (order = order_lists; order->w_name != NULL; order++) {
760 w = enroll(order->w_name, order->w_class);
761 if (w == NULL)
762 continue;
763 w->w_file = "order list";
764 for (order++; order->w_name != NULL; order++) {
765 w1 = enroll(order->w_name, order->w_class);
766 if (w1 == NULL)
767 continue;
768 w1->w_file = "order list";
769 itismychild(w, w1);
770 w = w1;
771 }
772 }
773 witness_spin_warn = 1;
774
775 /* Iterate through all locks and add them to witness. */
776 for (i = 0; pending_locks[i].wh_lock != NULL; i++) {
777 lock = pending_locks[i].wh_lock;
778 KASSERT(lock->lo_flags & LO_WITNESS,
779 ("%s: lock %s is on pending list but not LO_WITNESS",
780 __func__, lock->lo_name));
781 lock->lo_witness = enroll(pending_locks[i].wh_type,
782 LOCK_CLASS(lock));
783 }
784
785 /* Mark the witness code as being ready for use. */
786 witness_cold = 0;
787
788 mtx_lock(&Giant);
789}
790SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize,
791 NULL);
792
793void
794witness_init(struct lock_object *lock, const char *type)
795{
796 struct lock_class *class;
797
798 /* Various sanity checks. */
799 class = LOCK_CLASS(lock);
800 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
801 (class->lc_flags & LC_RECURSABLE) == 0)
802 panic("%s: lock (%s) %s can not be recursable", __func__,
803 class->lc_name, lock->lo_name);
804 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
805 (class->lc_flags & LC_SLEEPABLE) == 0)
806 panic("%s: lock (%s) %s can not be sleepable", __func__,
807 class->lc_name, lock->lo_name);
808 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
809 (class->lc_flags & LC_UPGRADABLE) == 0)
810 panic("%s: lock (%s) %s can not be upgradable", __func__,
811 class->lc_name, lock->lo_name);
812
813 /*
814 * If we shouldn't watch this lock, then just clear lo_witness.
815 * Otherwise, if witness_cold is set, then it is too early to
816 * enroll this lock, so defer it to witness_initialize() by adding
817 * it to the pending_locks list. If it is not too early, then enroll
818 * the lock now.
819 */
820 if (witness_watch < 1 || panicstr != NULL ||
821 (lock->lo_flags & LO_WITNESS) == 0)
822 lock->lo_witness = NULL;
823 else if (witness_cold) {
824 pending_locks[pending_cnt].wh_lock = lock;
825 pending_locks[pending_cnt++].wh_type = type;
826 if (pending_cnt > WITNESS_PENDLIST)
827 panic("%s: pending locks list is too small, bump it\n",
828 __func__);
829 } else
830 lock->lo_witness = enroll(type, class);
831}
832
833void
834witness_destroy(struct lock_object *lock)
835{
836 struct lock_class *class;
837 struct witness *w;
838
839 class = LOCK_CLASS(lock);
840
841 if (witness_cold)
842 panic("lock (%s) %s destroyed while witness_cold",
843 class->lc_name, lock->lo_name);
844
845 /* XXX: need to verify that no one holds the lock */
846 if ((lock->lo_flags & LO_WITNESS) == 0 || lock->lo_witness == NULL)
847 return;
848 w = lock->lo_witness;
849
850 mtx_lock_spin(&w_mtx);
851 MPASS(w->w_refcount > 0);
852 w->w_refcount--;
853
854 if (w->w_refcount == 0)
855 depart(w);
856 mtx_unlock_spin(&w_mtx);
857}
858
859#ifdef DDB
860static void
861witness_ddb_compute_levels(void)
862{
863 struct witness *w;
864
865 /*
866 * First clear all levels.
867 */
868 STAILQ_FOREACH(w, &w_all, w_list)
869 w->w_ddb_level = -1;
870
871 /*
872 * Look for locks with no parents and level all their descendants.
873 */
874 STAILQ_FOREACH(w, &w_all, w_list) {
875
876 /* If the witness has ancestors (is not a root), skip it. */
877 if (w->w_num_ancestors > 0)
878 continue;
879 witness_ddb_level_descendants(w, 0);
880 }
881}
882
883static void
884witness_ddb_level_descendants(struct witness *w, int l)
885{
886 int i;
887
888 if (w->w_ddb_level >= l)
889 return;
890
891 w->w_ddb_level = l;
892 l++;
893
894 for (i = 1; i <= w_max_used_index; i++) {
895 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
896 witness_ddb_level_descendants(&w_data[i], l);
897 }
898}
899
900static void
901witness_ddb_display_descendants(void(*prnt)(const char *fmt, ...),
902 struct witness *w, int indent)
903{
904 int i;
905
906 for (i = 0; i < indent; i++)
907 prnt(" ");
908 prnt("%s (type: %s, depth: %d, active refs: %d)",
909 w->w_name, w->w_class->lc_name,
910 w->w_ddb_level, w->w_refcount);
911 if (w->w_displayed) {
912 prnt(" -- (already displayed)\n");
913 return;
914 }
915 w->w_displayed = 1;
916 if (w->w_file != NULL && w->w_line != 0)
917 prnt(" -- last acquired @ %s:%d\n", w->w_file,
918 w->w_line);
919 else
920 prnt(" -- never acquired\n");
921 indent++;
922 WITNESS_INDEX_ASSERT(w->w_index);
923 for (i = 1; i <= w_max_used_index; i++) {
924 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT)
925 witness_ddb_display_descendants(prnt, &w_data[i],
926 indent);
927 }
928}
929
930static void
931witness_ddb_display_list(void(*prnt)(const char *fmt, ...),
932 struct witness_list *list)
933{
934 struct witness *w;
935
936 STAILQ_FOREACH(w, list, w_typelist) {
937 if (w->w_file == NULL || w->w_ddb_level > 0)
938 continue;
939
940 /* This lock has no anscestors - display its descendants. */
941 witness_ddb_display_descendants(prnt, w, 0);
942 }
943}
944
945static void
946witness_ddb_display(void(*prnt)(const char *fmt, ...))
947{
948 struct witness *w;
949
950 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
951 witness_ddb_compute_levels();
952
953 /* Clear all the displayed flags. */
954 STAILQ_FOREACH(w, &w_all, w_list)
955 w->w_displayed = 0;
956
957 /*
958 * First, handle sleep locks which have been acquired at least
959 * once.
960 */
961 prnt("Sleep locks:\n");
962 witness_ddb_display_list(prnt, &w_sleep);
963
964 /*
965 * Now do spin locks which have been acquired at least once.
966 */
967 prnt("\nSpin locks:\n");
968 witness_ddb_display_list(prnt, &w_spin);
969
970 /*
971 * Finally, any locks which have not been acquired yet.
972 */
973 prnt("\nLocks which were never acquired:\n");
974 STAILQ_FOREACH(w, &w_all, w_list) {
975 if (w->w_file != NULL || w->w_refcount == 0)
976 continue;
977 prnt("%s (type: %s, depth: %d)\n", w->w_name,
978 w->w_class->lc_name, w->w_ddb_level);
979 }
980}
981#endif /* DDB */
982
983/* Trim useless garbage from filenames. */
984static const char *
985fixup_filename(const char *file)
986{
987
988 if (file == NULL)
989 return (NULL);
990 while (strncmp(file, "../", 3) == 0)
991 file += 3;
992 return (file);
993}
994
995int
996witness_defineorder(struct lock_object *lock1, struct lock_object *lock2)
997{
998
999 if (witness_watch == -1 || panicstr != NULL)
1000 return (0);
1001
1002 /* Require locks that witness knows about. */
1003 if (lock1 == NULL || lock1->lo_witness == NULL || lock2 == NULL ||
1004 lock2->lo_witness == NULL)
1005 return (EINVAL);
1006
1007 mtx_assert(&w_mtx, MA_NOTOWNED);
1008 mtx_lock_spin(&w_mtx);
1009
1010 /*
1011 * If we already have either an explicit or implied lock order that
1012 * is the other way around, then return an error.
1013 */
1014 if (witness_watch &&
1015 isitmydescendant(lock2->lo_witness, lock1->lo_witness)) {
1016 mtx_unlock_spin(&w_mtx);
1017 return (EDOOFUS);
1018 }
1019
1020 /* Try to add the new order. */
1021 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1022 lock2->lo_witness->w_name, lock1->lo_witness->w_name);
1023 itismychild(lock1->lo_witness, lock2->lo_witness);
1024 mtx_unlock_spin(&w_mtx);
1025 return (0);
1026}
1027
1028void
1029witness_checkorder(struct lock_object *lock, int flags, const char *file,
1030 int line, struct lock_object *interlock)
1031{
1032 struct lock_list_entry *lock_list, *lle;
1033 struct lock_instance *lock1, *lock2, *plock;
1034 struct lock_class *class;
1035 struct witness *w, *w1;
1036 struct thread *td;
1037 int i, j;
1038
1039 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL ||
1040 panicstr != NULL)
1041 return;
1042
1043 w = lock->lo_witness;
1044 class = LOCK_CLASS(lock);
1045 td = curthread;
1046 file = fixup_filename(file);
1047
1048 if (class->lc_flags & LC_SLEEPLOCK) {
1049
1050 /*
1051 * Since spin locks include a critical section, this check
1052 * implicitly enforces a lock order of all sleep locks before
1053 * all spin locks.
1054 */
1055 if (td->td_critnest != 0 && !kdb_active)
1056 panic("blockable sleep lock (%s) %s @ %s:%d",
1057 class->lc_name, lock->lo_name, file, line);
1058
1059 /*
1060 * If this is the first lock acquired then just return as
1061 * no order checking is needed.
1062 */
1063 lock_list = td->td_sleeplocks;
1064 if (lock_list == NULL || lock_list->ll_count == 0)
1065 return;
1066 } else {
1067
1068 /*
1069 * If this is the first lock, just return as no order
1070 * checking is needed. Avoid problems with thread
1071 * migration pinning the thread while checking if
1072 * spinlocks are held. If at least one spinlock is held
1073 * the thread is in a safe path and it is allowed to
1074 * unpin it.
1075 */
1076 sched_pin();
1077 lock_list = PCPU_GET(spinlocks);
1078 if (lock_list == NULL || lock_list->ll_count == 0) {
1079 sched_unpin();
1080 return;
1081 }
1082 sched_unpin();
1083 }
1084
1085 /*
1086 * Check to see if we are recursing on a lock we already own. If
1087 * so, make sure that we don't mismatch exclusive and shared lock
1088 * acquires.
1089 */
1090 lock1 = find_instance(lock_list, lock);
1091 if (lock1 != NULL) {
1092 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
1093 (flags & LOP_EXCLUSIVE) == 0) {
1094 printf("shared lock of (%s) %s @ %s:%d\n",
1095 class->lc_name, lock->lo_name, file, line);
1096 printf("while exclusively locked from %s:%d\n",
1097 lock1->li_file, lock1->li_line);
1098 panic("share->excl");
1099 }
1100 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
1101 (flags & LOP_EXCLUSIVE) != 0) {
1102 printf("exclusive lock of (%s) %s @ %s:%d\n",
1103 class->lc_name, lock->lo_name, file, line);
1104 printf("while share locked from %s:%d\n",
1105 lock1->li_file, lock1->li_line);
1106 panic("excl->share");
1107 }
1108 return;
1109 }
1110
1111 /*
1112 * Find the previously acquired lock, but ignore interlocks.
1113 */
1114 plock = &lock_list->ll_children[lock_list->ll_count - 1];
1115 if (interlock != NULL && plock->li_lock == interlock) {
1116 if (lock_list->ll_count > 1)
1117 plock =
1118 &lock_list->ll_children[lock_list->ll_count - 2];
1119 else {
1120 lle = lock_list->ll_next;
1121
1122 /*
1123 * The interlock is the only lock we hold, so
1124 * simply return.
1125 */
1126 if (lle == NULL)
1127 return;
1128 plock = &lle->ll_children[lle->ll_count - 1];
1129 }
1130 }
1131
1132 /*
1133 * Try to perform most checks without a lock. If this succeeds we
1134 * can skip acquiring the lock and return success.
1135 */
1136 w1 = plock->li_lock->lo_witness;
1137 if (witness_lock_order_check(w1, w))
1138 return;
1139
1140 /*
1141 * Check for duplicate locks of the same type. Note that we only
1142 * have to check for this on the last lock we just acquired. Any
1143 * other cases will be caught as lock order violations.
1144 */
1145 mtx_lock_spin(&w_mtx);
1146 witness_lock_order_add(w1, w);
1147 if (w1 == w) {
1148 i = w->w_index;
1149 if (!(lock->lo_flags & LO_DUPOK) && !(flags & LOP_DUPOK) &&
1150 !(w_rmatrix[i][i] & WITNESS_REVERSAL)) {
1151 w_rmatrix[i][i] |= WITNESS_REVERSAL;
1152 w->w_reversed = 1;
1153 mtx_unlock_spin(&w_mtx);
1154 printf(
1155 "acquiring duplicate lock of same type: \"%s\"\n",
1156 w->w_name);
1157 printf(" 1st %s @ %s:%d\n", plock->li_lock->lo_name,
1158 plock->li_file, plock->li_line);
1159 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
1160 witness_debugger(1);
1161 } else
1162 mtx_unlock_spin(&w_mtx);
1163 return;
1164 }
1165 mtx_assert(&w_mtx, MA_OWNED);
1166
1167 /*
1168 * If we know that the the lock we are acquiring comes after
1169 * the lock we most recently acquired in the lock order tree,
1170 * then there is no need for any further checks.
1171 */
1172 if (isitmychild(w1, w))
1173 goto out;
1174
1175 for (j = 0, lle = lock_list; lle != NULL; lle = lle->ll_next) {
1176 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
1177
1178 MPASS(j < WITNESS_COUNT);
1179 lock1 = &lle->ll_children[i];
1180
1181 /*
1182 * Ignore the interlock the first time we see it.
1183 */
1184 if (interlock != NULL && interlock == lock1->li_lock) {
1185 interlock = NULL;
1186 continue;
1187 }
1188
1189 /*
1190 * If this lock doesn't undergo witness checking,
1191 * then skip it.
1192 */
1193 w1 = lock1->li_lock->lo_witness;
1194 if (w1 == NULL) {
1195 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
1196 ("lock missing witness structure"));
1197 continue;
1198 }
1199
1200 /*
1201 * If we are locking Giant and this is a sleepable
1202 * lock, then skip it.
1203 */
1204 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
1205 lock == &Giant.lock_object)
1206 continue;
1207
1208 /*
1209 * If we are locking a sleepable lock and this lock
1210 * is Giant, then skip it.
1211 */
1212 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1213 lock1->li_lock == &Giant.lock_object)
1214 continue;
1215
1216 /*
1217 * If we are locking a sleepable lock and this lock
1218 * isn't sleepable, we want to treat it as a lock
1219 * order violation to enfore a general lock order of
1220 * sleepable locks before non-sleepable locks.
1221 */
1222 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1223 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1224 goto reversal;
1225
1226 /*
1227 * If we are locking Giant and this is a non-sleepable
1228 * lock, then treat it as a reversal.
1229 */
1230 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0 &&
1231 lock == &Giant.lock_object)
1232 goto reversal;
1233
1234 /*
1235 * Check the lock order hierarchy for a reveresal.
1236 */
1237 if (!isitmydescendant(w, w1))
1238 continue;
1239 reversal:
1240
1241 /*
1242 * We have a lock order violation, check to see if it
1243 * is allowed or has already been yelled about.
1244 */
1245#ifdef BLESSING
1246
1247 /*
1248 * If the lock order is blessed, just bail. We don't
1249 * look for other lock order violations though, which
1250 * may be a bug.
1251 */
1252 if (blessed(w, w1))
1253 goto out;
1254#endif
1255
1256 /* Bail if this violation is known */
1257 if (w_rmatrix[w1->w_index][w->w_index] & WITNESS_REVERSAL)
1258 goto out;
1259
1260 /* Record this as a violation */
1261 w_rmatrix[w1->w_index][w->w_index] |= WITNESS_REVERSAL;
1262 w_rmatrix[w->w_index][w1->w_index] |= WITNESS_REVERSAL;
1263 w->w_reversed = w1->w_reversed = 1;
1264 witness_increment_graph_generation();
1265 mtx_unlock_spin(&w_mtx);
1266
1267 /*
1268 * Ok, yell about it.
1269 */
1270 if (((lock->lo_flags & LO_SLEEPABLE) != 0 &&
1271 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
1272 printf(
1273 "lock order reversal: (sleepable after non-sleepable)\n");
1274 else if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0
1275 && lock == &Giant.lock_object)
1276 printf(
1277 "lock order reversal: (Giant after non-sleepable)\n");
1278 else
1279 printf("lock order reversal:\n");
1280
1281 /*
1282 * Try to locate an earlier lock with
1283 * witness w in our list.
1284 */
1285 do {
1286 lock2 = &lle->ll_children[i];
1287 MPASS(lock2->li_lock != NULL);
1288 if (lock2->li_lock->lo_witness == w)
1289 break;
1290 if (i == 0 && lle->ll_next != NULL) {
1291 lle = lle->ll_next;
1292 i = lle->ll_count - 1;
1293 MPASS(i >= 0 && i < LOCK_NCHILDREN);
1294 } else
1295 i--;
1296 } while (i >= 0);
1297 if (i < 0) {
1298 printf(" 1st %p %s (%s) @ %s:%d\n",
1299 lock1->li_lock, lock1->li_lock->lo_name,
1300 w1->w_name, lock1->li_file, lock1->li_line);
1301 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
1302 lock->lo_name, w->w_name, file, line);
1303 } else {
1304 printf(" 1st %p %s (%s) @ %s:%d\n",
1305 lock2->li_lock, lock2->li_lock->lo_name,
1306 lock2->li_lock->lo_witness->w_name,
1307 lock2->li_file, lock2->li_line);
1308 printf(" 2nd %p %s (%s) @ %s:%d\n",
1309 lock1->li_lock, lock1->li_lock->lo_name,
1310 w1->w_name, lock1->li_file, lock1->li_line);
1311 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
1312 lock->lo_name, w->w_name, file, line);
1313 }
1314 witness_debugger(1);
1315 return;
1316 }
1317 }
1318
1319 /*
1320 * If requested, build a new lock order. However, don't build a new
1321 * relationship between a sleepable lock and Giant if it is in the
1322 * wrong direction. The correct lock order is that sleepable locks
1323 * always come before Giant.
1324 */
1325 if (flags & LOP_NEWORDER &&
1326 !(plock->li_lock == &Giant.lock_object &&
1327 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
1328 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
1329 w->w_name, plock->li_lock->lo_witness->w_name);
1330 itismychild(plock->li_lock->lo_witness, w);
1331 }
1332out:
1333 mtx_unlock_spin(&w_mtx);
1334}
1335
1336void
1337witness_lock(struct lock_object *lock, int flags, const char *file, int line)
1338{
1339 struct lock_list_entry **lock_list, *lle;
1340 struct lock_instance *instance;
1341 struct witness *w;
1342 struct thread *td;
1343
1344 if (witness_cold || witness_watch == -1 || lock->lo_witness == NULL ||
1345 panicstr != NULL)
1346 return;
1347 w = lock->lo_witness;
1348 td = curthread;
1349 file = fixup_filename(file);
1350
1351 /* Determine lock list for this lock. */
1352 if (LOCK_CLASS(lock)->lc_flags & LC_SLEEPLOCK)
1353 lock_list = &td->td_sleeplocks;
1354 else
1355 lock_list = PCPU_PTR(spinlocks);
1356
1357 /* Check to see if we are recursing on a lock we already own. */
1358 instance = find_instance(*lock_list, lock);
1359 if (instance != NULL) {
1360 instance->li_flags++;
1361 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
1362 td->td_proc->p_pid, lock->lo_name,
1363 instance->li_flags & LI_RECURSEMASK);
1364 instance->li_file = file;
1365 instance->li_line = line;
1366 return;
1367 }
1368
1369 /* Update per-witness last file and line acquire. */
1370 w->w_file = file;
1371 w->w_line = line;
1372
1373 /* Find the next open lock instance in the list and fill it. */
1374 lle = *lock_list;
1375 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
1376 lle = witness_lock_list_get();
1377 if (lle == NULL)
1378 return;
1379 lle->ll_next = *lock_list;
1380 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
1381 td->td_proc->p_pid, lle);
1382 *lock_list = lle;
1383 }
1384 instance = &lle->ll_children[lle->ll_count++];
1385 instance->li_lock = lock;
1386 instance->li_line = line;
1387 instance->li_file = file;
1388 if ((flags & LOP_EXCLUSIVE) != 0)
1389 instance->li_flags = LI_EXCLUSIVE;
1390 else
1391 instance->li_flags = 0;
1392 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
1393 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
1394}
1395
1396void
1397witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
1398{
1399 struct lock_instance *instance;
1400 struct lock_class *class;
1401
1402 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1403 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1404 return;
1405 class = LOCK_CLASS(lock);
1406 file = fixup_filename(file);
1407 if (witness_watch) {
1408 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1409 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
1410 class->lc_name, lock->lo_name, file, line);
1411 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1412 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
1413 class->lc_name, lock->lo_name, file, line);
1414 }
1415 instance = find_instance(curthread->td_sleeplocks, lock);
1416 if (instance == NULL)
1417 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
1418 class->lc_name, lock->lo_name, file, line);
1419 if (witness_watch) {
1420 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
1421 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
1422 class->lc_name, lock->lo_name, file, line);
1423 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1424 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
1425 class->lc_name, lock->lo_name,
1426 instance->li_flags & LI_RECURSEMASK, file, line);
1427 }
1428 instance->li_flags |= LI_EXCLUSIVE;
1429}
1430
1431void
1432witness_downgrade(struct lock_object *lock, int flags, const char *file,
1433 int line)
1434{
1435 struct lock_instance *instance;
1436 struct lock_class *class;
1437
1438 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
1439 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
1440 return;
1441 class = LOCK_CLASS(lock);
1442 file = fixup_filename(file);
1443 if (witness_watch) {
1444 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
1445 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
1446 class->lc_name, lock->lo_name, file, line);
1447 if ((class->lc_flags & LC_SLEEPLOCK) == 0)
1448 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
1449 class->lc_name, lock->lo_name, file, line);
1450 }
1451 instance = find_instance(curthread->td_sleeplocks, lock);
1452 if (instance == NULL)
1453 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
1454 class->lc_name, lock->lo_name, file, line);
1455 if (witness_watch) {
1456 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
1457 panic("downgrade of shared lock (%s) %s @ %s:%d",
1458 class->lc_name, lock->lo_name, file, line);
1459 if ((instance->li_flags & LI_RECURSEMASK) != 0)
1460 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
1461 class->lc_name, lock->lo_name,
1462 instance->li_flags & LI_RECURSEMASK, file, line);
1463 }
1464 instance->li_flags &= ~LI_EXCLUSIVE;
1465}
1466
1467void
1468witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
1469{
1470 struct lock_list_entry **lock_list, *lle;
1471 struct lock_instance *instance;
1472 struct lock_class *class;
1473 struct thread *td;
1474 register_t s;
1475 int i, j;
1476
1477 if (witness_cold || lock->lo_witness == NULL || panicstr != NULL)
1478 return;
1479 td = curthread;
1480 class = LOCK_CLASS(lock);
1481 file = fixup_filename(file);
1482
1483 /* Find lock instance associated with this lock. */
1484 if (class->lc_flags & LC_SLEEPLOCK)
1485 lock_list = &td->td_sleeplocks;
1486 else
1487 lock_list = PCPU_PTR(spinlocks);
1488 lle = *lock_list;
1489 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
1490 for (i = 0; i < (*lock_list)->ll_count; i++) {
1491 instance = &(*lock_list)->ll_children[i];
1492 if (instance->li_lock == lock)
1493 goto found;
1494 }
1495
1496 /*
1497 * When disabling WITNESS through witness_watch we could end up in
1498 * having registered locks in the td_sleeplocks queue.
1499 * We have to make sure we flush these queues, so just search for
1500 * eventual register locks and remove them.
1501 */
1502 if (witness_watch > 0)
1503 panic("lock (%s) %s not locked @ %s:%d", class->lc_name,
1504 lock->lo_name, file, line);
1505 else
1506 return;
1507found:
1508
1509 /* First, check for shared/exclusive mismatches. */
1510 if ((instance->li_flags & LI_EXCLUSIVE) != 0 && witness_watch > 0 &&
1511 (flags & LOP_EXCLUSIVE) == 0) {
1512 printf("shared unlock of (%s) %s @ %s:%d\n", class->lc_name,
1513 lock->lo_name, file, line);
1514 printf("while exclusively locked from %s:%d\n",
1515 instance->li_file, instance->li_line);
1516 panic("excl->ushare");
1517 }
1518 if ((instance->li_flags & LI_EXCLUSIVE) == 0 && witness_watch > 0 &&
1519 (flags & LOP_EXCLUSIVE) != 0) {
1520 printf("exclusive unlock of (%s) %s @ %s:%d\n", class->lc_name,
1521 lock->lo_name, file, line);
1522 printf("while share locked from %s:%d\n", instance->li_file,
1523 instance->li_line);
1524 panic("share->uexcl");
1525 }
1526 /* If we are recursed, unrecurse. */
1527 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
1528 CTR4(KTR_WITNESS, "%s: pid %d unrecursed on %s r=%d", __func__,
1529 td->td_proc->p_pid, instance->li_lock->lo_name,
1530 instance->li_flags);
1531 instance->li_flags--;
1532 return;
1533 }
1534 /* The lock is now being dropped, check for NORELEASE flag */
1535 if ((instance->li_flags & LI_NORELEASE) != 0 && witness_watch > 0) {
1536 printf("forbidden unlock of (%s) %s @ %s:%d\n", class->lc_name,
1537 lock->lo_name, file, line);
1538 panic("lock marked norelease");
1539 }
1540
1541 /* Otherwise, remove this item from the list. */
1542 s = intr_disable();
1543 CTR4(KTR_WITNESS, "%s: pid %d removed %s from lle[%d]", __func__,
1544 td->td_proc->p_pid, instance->li_lock->lo_name,
1545 (*lock_list)->ll_count - 1);
1546 for (j = i; j < (*lock_list)->ll_count - 1; j++)
1547 (*lock_list)->ll_children[j] =
1548 (*lock_list)->ll_children[j + 1];
1549 (*lock_list)->ll_count--;
1550 intr_restore(s);
1551
1552 /*
1553 * In order to reduce contention on w_mtx, we want to keep always an
1554 * head object into lists so that frequent allocation from the
1555 * free witness pool (and subsequent locking) is avoided.
1556 * In order to maintain the current code simple, when the head
1557 * object is totally unloaded it means also that we do not have
1558 * further objects in the list, so the list ownership needs to be
1559 * hand over to another object if the current head needs to be freed.
1560 */
1561 if ((*lock_list)->ll_count == 0) {
1562 if (*lock_list == lle) {
1563 if (lle->ll_next == NULL)
1564 return;
1565 } else
1566 lle = *lock_list;
1567 *lock_list = lle->ll_next;
1568 CTR3(KTR_WITNESS, "%s: pid %d removed lle %p", __func__,
1569 td->td_proc->p_pid, lle);
1570 witness_lock_list_free(lle);
1571 }
1572}
1573
1574void
1575witness_thread_exit(struct thread *td)
1576{
1577 struct lock_list_entry *lle;
1578 int i, n;
1579
1580 lle = td->td_sleeplocks;
1581 if (lle == NULL || panicstr != NULL)
1582 return;
1583 if (lle->ll_count != 0) {
1584 for (n = 0; lle != NULL; lle = lle->ll_next)
1585 for (i = lle->ll_count - 1; i >= 0; i--) {
1586 if (n == 0)
1587 printf("Thread %p exiting with the following locks held:\n",
1588 td);
1589 n++;
1590 witness_list_lock(&lle->ll_children[i]);
1591
1592 }
1593 panic("Thread %p cannot exit while holding sleeplocks\n", td);
1594 }
1595 witness_lock_list_free(lle);
1596}
1597
1598/*
1599 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1600 * exempt Giant and sleepable locks from the checks as well. If any
1601 * non-exempt locks are held, then a supplied message is printed to the
1602 * console along with a list of the offending locks. If indicated in the
1603 * flags then a failure results in a panic as well.
1604 */
1605int
1606witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1607{
1608 struct lock_list_entry *lock_list, *lle;
1609 struct lock_instance *lock1;
1610 struct thread *td;
1611 va_list ap;
1612 int i, n;
1613
1614 if (witness_cold || witness_watch < 1 || panicstr != NULL)
1615 return (0);
1616 n = 0;
1617 td = curthread;
1618 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1619 for (i = lle->ll_count - 1; i >= 0; i--) {
1620 lock1 = &lle->ll_children[i];
1621 if (lock1->li_lock == lock)
1622 continue;
1623 if (flags & WARN_GIANTOK &&
1624 lock1->li_lock == &Giant.lock_object)
1625 continue;
1626 if (flags & WARN_SLEEPOK &&
1627 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1628 continue;
1629 if (n == 0) {
1630 va_start(ap, fmt);
1631 vprintf(fmt, ap);
1632 va_end(ap);
1633 printf(" with the following");
1634 if (flags & WARN_SLEEPOK)
1635 printf(" non-sleepable");
1636 printf(" locks held:\n");
1637 }
1638 n++;
1639 witness_list_lock(lock1);
1640 }
1641
1642 /*
1643 * Pin the thread in order to avoid problems with thread migration.
1644 * Once that all verifies are passed about spinlocks ownership,
1645 * the thread is in a safe path and it can be unpinned.
1646 */
1647 sched_pin();
1648 lock_list = PCPU_GET(spinlocks);
1649 if (lock_list != NULL && lock_list->ll_count != 0) {
1650 sched_unpin();
1651
1652 /*
1653 * We should only have one spinlock and as long as
1654 * the flags cannot match for this locks class,
1655 * check if the first spinlock is the one curthread
1656 * should hold.
1657 */
1658 lock1 = &lock_list->ll_children[lock_list->ll_count - 1];
1659 if (lock_list->ll_count == 1 && lock_list->ll_next == NULL &&
1660 lock1->li_lock == lock && n == 0)
1661 return (0);
1662
1663 va_start(ap, fmt);
1664 vprintf(fmt, ap);
1665 va_end(ap);
1666 printf(" with the following");
1667 if (flags & WARN_SLEEPOK)
1668 printf(" non-sleepable");
1669 printf(" locks held:\n");
1670 n += witness_list_locks(&lock_list);
1671 } else
1672 sched_unpin();
1673 if (flags & WARN_PANIC && n)
1674 panic("%s", __func__);
1675 else
1676 witness_debugger(n);
1677 return (n);
1678}
1679
1680const char *
1681witness_file(struct lock_object *lock)
1682{
1683 struct witness *w;
1684
1685 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1686 return ("?");
1687 w = lock->lo_witness;
1688 return (w->w_file);
1689}
1690
1691int
1692witness_line(struct lock_object *lock)
1693{
1694 struct witness *w;
1695
1696 if (witness_cold || witness_watch < 1 || lock->lo_witness == NULL)
1697 return (0);
1698 w = lock->lo_witness;
1699 return (w->w_line);
1700}
1701
1702static struct witness *
1703enroll(const char *description, struct lock_class *lock_class)
1704{
1705 struct witness *w;
1706 struct witness_list *typelist;
1707
1708 MPASS(description != NULL);
1709
1710 if (witness_watch == -1 || panicstr != NULL)
1711 return (NULL);
1712 if ((lock_class->lc_flags & LC_SPINLOCK)) {
1713 if (witness_skipspin)
1714 return (NULL);
1715 else
1716 typelist = &w_spin;
1717 } else if ((lock_class->lc_flags & LC_SLEEPLOCK))
1718 typelist = &w_sleep;
1719 else
1720 panic("lock class %s is not sleep or spin",
1721 lock_class->lc_name);
1722
1723 mtx_lock_spin(&w_mtx);
1724 w = witness_hash_get(description);
1725 if (w)
1726 goto found;
1727 if ((w = witness_get()) == NULL)
1728 return (NULL);
1729 MPASS(strlen(description) < MAX_W_NAME);
1730 strcpy(w->w_name, description);
1731 w->w_class = lock_class;
1732 w->w_refcount = 1;
1733 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1734 if (lock_class->lc_flags & LC_SPINLOCK) {
1735 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1736 w_spin_cnt++;
1737 } else if (lock_class->lc_flags & LC_SLEEPLOCK) {
1738 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1739 w_sleep_cnt++;
1740 }
1741
1742 /* Insert new witness into the hash */
1743 witness_hash_put(w);
1744 witness_increment_graph_generation();
1745 mtx_unlock_spin(&w_mtx);
1746 return (w);
1747found:
1748 w->w_refcount++;
1749 mtx_unlock_spin(&w_mtx);
1750 if (lock_class != w->w_class)
1751 panic(
1752 "lock (%s) %s does not match earlier (%s) lock",
1753 description, lock_class->lc_name,
1754 w->w_class->lc_name);
1755 return (w);
1756}
1757
1758static void
1759depart(struct witness *w)
1760{
1761 struct witness_list *list;
1762
1763 MPASS(w->w_refcount == 0);
1764 if (w->w_class->lc_flags & LC_SLEEPLOCK) {
1765 list = &w_sleep;
1766 w_sleep_cnt--;
1767 } else {
1768 list = &w_spin;
1769 w_spin_cnt--;
1770 }
1771 /*
1772 * Set file to NULL as it may point into a loadable module.
1773 */
1774 w->w_file = NULL;
1775 w->w_line = 0;
1776 witness_increment_graph_generation();
1777}
1778
1779
1780static void
1781adopt(struct witness *parent, struct witness *child)
1782{
1783 int pi, ci, i, j;
1784
1785 if (witness_cold == 0)
1786 mtx_assert(&w_mtx, MA_OWNED);
1787
1788 /* If the relationship is already known, there's no work to be done. */
1789 if (isitmychild(parent, child))
1790 return;
1791
1792 /* When the structure of the graph changes, bump up the generation. */
1793 witness_increment_graph_generation();
1794
1795 /*
1796 * The hard part ... create the direct relationship, then propagate all
1797 * indirect relationships.
1798 */
1799 pi = parent->w_index;
1800 ci = child->w_index;
1801 WITNESS_INDEX_ASSERT(pi);
1802 WITNESS_INDEX_ASSERT(ci);
1803 MPASS(pi != ci);
1804 w_rmatrix[pi][ci] |= WITNESS_PARENT;
1805 w_rmatrix[ci][pi] |= WITNESS_CHILD;
1806
1807 /*
1808 * If parent was not already an ancestor of child,
1809 * then we increment the descendant and ancestor counters.
1810 */
1811 if ((w_rmatrix[pi][ci] & WITNESS_ANCESTOR) == 0) {
1812 parent->w_num_descendants++;
1813 child->w_num_ancestors++;
1814 }
1815
1816 /*
1817 * Find each ancestor of 'pi'. Note that 'pi' itself is counted as
1818 * an ancestor of 'pi' during this loop.
1819 */
1820 for (i = 1; i <= w_max_used_index; i++) {
1821 if ((w_rmatrix[i][pi] & WITNESS_ANCESTOR_MASK) == 0 &&
1822 (i != pi))
1823 continue;
1824
1825 /* Find each descendant of 'i' and mark it as a descendant. */
1826 for (j = 1; j <= w_max_used_index; j++) {
1827
1828 /*
1829 * Skip children that are already marked as
1830 * descendants of 'i'.
1831 */
1832 if (w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK)
1833 continue;
1834
1835 /*
1836 * We are only interested in descendants of 'ci'. Note
1837 * that 'ci' itself is counted as a descendant of 'ci'.
1838 */
1839 if ((w_rmatrix[ci][j] & WITNESS_ANCESTOR_MASK) == 0 &&
1840 (j != ci))
1841 continue;
1842 w_rmatrix[i][j] |= WITNESS_ANCESTOR;
1843 w_rmatrix[j][i] |= WITNESS_DESCENDANT;
1844 w_data[i].w_num_descendants++;
1845 w_data[j].w_num_ancestors++;
1846
1847 /*
1848 * Make sure we aren't marking a node as both an
1849 * ancestor and descendant. We should have caught
1850 * this as a lock order reversal earlier.
1851 */
1852 if ((w_rmatrix[i][j] & WITNESS_ANCESTOR_MASK) &&
1853 (w_rmatrix[i][j] & WITNESS_DESCENDANT_MASK)) {
1854 printf("witness rmatrix paradox! [%d][%d]=%d "
1855 "both ancestor and descendant\n",
1856 i, j, w_rmatrix[i][j]);
1857 kdb_backtrace();
1858 printf("Witness disabled.\n");
1859 witness_watch = -1;
1860 }
1861 if ((w_rmatrix[j][i] & WITNESS_ANCESTOR_MASK) &&
1862 (w_rmatrix[j][i] & WITNESS_DESCENDANT_MASK)) {
1863 printf("witness rmatrix paradox! [%d][%d]=%d "
1864 "both ancestor and descendant\n",
1865 j, i, w_rmatrix[j][i]);
1866 kdb_backtrace();
1867 printf("Witness disabled.\n");
1868 witness_watch = -1;
1869 }
1870 }
1871 }
1872}
1873
1874static void
1875itismychild(struct witness *parent, struct witness *child)
1876{
1877
1878 MPASS(child != NULL && parent != NULL);
1879 if (witness_cold == 0)
1880 mtx_assert(&w_mtx, MA_OWNED);
1881
1882 if (!witness_lock_type_equal(parent, child)) {
1883 if (witness_cold == 0)
1884 mtx_unlock_spin(&w_mtx);
1885 panic("%s: parent \"%s\" (%s) and child \"%s\" (%s) are not "
1886 "the same lock type", __func__, parent->w_name,
1887 parent->w_class->lc_name, child->w_name,
1888 child->w_class->lc_name);
1889 }
1890 adopt(parent, child);
1891}
1892
1893/*
1894 * Generic code for the isitmy*() functions. The rmask parameter is the
1895 * expected relationship of w1 to w2.
1896 */
1897static int
1898_isitmyx(struct witness *w1, struct witness *w2, int rmask, const char *fname)
1899{
1900 unsigned char r1, r2;
1901 int i1, i2;
1902
1903 i1 = w1->w_index;
1904 i2 = w2->w_index;
1905 WITNESS_INDEX_ASSERT(i1);
1906 WITNESS_INDEX_ASSERT(i2);
1907 r1 = w_rmatrix[i1][i2] & WITNESS_RELATED_MASK;
1908 r2 = w_rmatrix[i2][i1] & WITNESS_RELATED_MASK;
1909
1910 /* The flags on one better be the inverse of the flags on the other */
1911 if (!((WITNESS_ATOD(r1) == r2 && WITNESS_DTOA(r2) == r1) ||
1912 (WITNESS_DTOA(r1) == r2 && WITNESS_ATOD(r2) == r1))) {
1913 printf("%s: rmatrix mismatch between %s (index %d) and %s "
1914 "(index %d): w_rmatrix[%d][%d] == %hhx but "
1915 "w_rmatrix[%d][%d] == %hhx\n",
1916 fname, w1->w_name, i1, w2->w_name, i2, i1, i2, r1,
1917 i2, i1, r2);
1918 kdb_backtrace();
1919 printf("Witness disabled.\n");
1920 witness_watch = -1;
1921 }
1922 return (r1 & rmask);
1923}
1924
1925/*
1926 * Checks if @child is a direct child of @parent.
1927 */
1928static int
1929isitmychild(struct witness *parent, struct witness *child)
1930{
1931
1932 return (_isitmyx(parent, child, WITNESS_PARENT, __func__));
1933}
1934
1935/*
1936 * Checks if @descendant is a direct or inderect descendant of @ancestor.
1937 */
1938static int
1939isitmydescendant(struct witness *ancestor, struct witness *descendant)
1940{
1941
1942 return (_isitmyx(ancestor, descendant, WITNESS_ANCESTOR_MASK,
1943 __func__));
1944}
1945
1946#ifdef BLESSING
1947static int
1948blessed(struct witness *w1, struct witness *w2)
1949{
1950 int i;
1951 struct witness_blessed *b;
1952
1953 for (i = 0; i < blessed_count; i++) {
1954 b = &blessed_list[i];
1955 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1956 if (strcmp(w2->w_name, b->b_lock2) == 0)
1957 return (1);
1958 continue;
1959 }
1960 if (strcmp(w1->w_name, b->b_lock2) == 0)
1961 if (strcmp(w2->w_name, b->b_lock1) == 0)
1962 return (1);
1963 }
1964 return (0);
1965}
1966#endif
1967
1968static struct witness *
1969witness_get(void)
1970{
1971 struct witness *w;
1972 int index;
1973
1974 if (witness_cold == 0)
1975 mtx_assert(&w_mtx, MA_OWNED);
1976
1977 if (witness_watch == -1) {
1978 mtx_unlock_spin(&w_mtx);
1979 return (NULL);
1980 }
1981 if (STAILQ_EMPTY(&w_free)) {
1982 witness_watch = -1;
1983 mtx_unlock_spin(&w_mtx);
1984 printf("WITNESS: unable to allocate a new witness object\n");
1985 return (NULL);
1986 }
1987 w = STAILQ_FIRST(&w_free);
1988 STAILQ_REMOVE_HEAD(&w_free, w_list);
1989 w_free_cnt--;
1990 index = w->w_index;
1991 MPASS(index > 0 && index == w_max_used_index+1 &&
1992 index < WITNESS_COUNT);
1993 bzero(w, sizeof(*w));
1994 w->w_index = index;
1995 if (index > w_max_used_index)
1996 w_max_used_index = index;
1997 return (w);
1998}
1999
2000static void
2001witness_free(struct witness *w)
2002{
2003
2004 STAILQ_INSERT_HEAD(&w_free, w, w_list);
2005 w_free_cnt++;
2006}
2007
2008static struct lock_list_entry *
2009witness_lock_list_get(void)
2010{
2011 struct lock_list_entry *lle;
2012
2013 if (witness_watch == -1)
2014 return (NULL);
2015 mtx_lock_spin(&w_mtx);
2016 lle = w_lock_list_free;
2017 if (lle == NULL) {
2018 witness_watch = -1;
2019 mtx_unlock_spin(&w_mtx);
2020 printf("%s: witness exhausted\n", __func__);
2021 return (NULL);
2022 }
2023 w_lock_list_free = lle->ll_next;
2024 mtx_unlock_spin(&w_mtx);
2025 bzero(lle, sizeof(*lle));
2026 return (lle);
2027}
2028
2029static void
2030witness_lock_list_free(struct lock_list_entry *lle)
2031{
2032
2033 mtx_lock_spin(&w_mtx);
2034 lle->ll_next = w_lock_list_free;
2035 w_lock_list_free = lle;
2036 mtx_unlock_spin(&w_mtx);
2037}
2038
2039static struct lock_instance *
2040find_instance(struct lock_list_entry *list, struct lock_object *lock)
2041{
2042 struct lock_list_entry *lle;
2043 struct lock_instance *instance;
2044 int i;
2045
2046 for (lle = list; lle != NULL; lle = lle->ll_next)
2047 for (i = lle->ll_count - 1; i >= 0; i--) {
2048 instance = &lle->ll_children[i];
2049 if (instance->li_lock == lock)
2050 return (instance);
2051 }
2052 return (NULL);
2053}
2054
2055static void
2056witness_list_lock(struct lock_instance *instance)
2057{
2058 struct lock_object *lock;
2059
2060 lock = instance->li_lock;
2061 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2062 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2063 if (lock->lo_witness->w_name != lock->lo_name)
2064 printf(" (%s)", lock->lo_witness->w_name);
2065 printf(" r = %d (%p) locked @ %s:%d\n",
2066 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2067 instance->li_line);
2068}
2069
2070#ifdef DDB
2071static int
2072witness_thread_has_locks(struct thread *td)
2073{
2074
2075 if (td->td_sleeplocks == NULL)
2076 return (0);
2077 return (td->td_sleeplocks->ll_count != 0);
2078}
2079
2080static int
2081witness_proc_has_locks(struct proc *p)
2082{
2083 struct thread *td;
2084
2085 FOREACH_THREAD_IN_PROC(p, td) {
2086 if (witness_thread_has_locks(td))
2087 return (1);
2088 }
2089 return (0);
2090}
2091#endif
2092
2093int
2094witness_list_locks(struct lock_list_entry **lock_list)
2095{
2096 struct lock_list_entry *lle;
2097 int i, nheld;
2098
2099 nheld = 0;
2100 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2101 for (i = lle->ll_count - 1; i >= 0; i--) {
2102 witness_list_lock(&lle->ll_children[i]);
2103 nheld++;
2104 }
2105 return (nheld);
2106}
2107
2108/*
2109 * This is a bit risky at best. We call this function when we have timed
2110 * out acquiring a spin lock, and we assume that the other CPU is stuck
2111 * with this lock held. So, we go groveling around in the other CPU's
2112 * per-cpu data to try to find the lock instance for this spin lock to
2113 * see when it was last acquired.
2114 */
2115void
2116witness_display_spinlock(struct lock_object *lock, struct thread *owner)
2117{
2118 struct lock_instance *instance;
2119 struct pcpu *pc;
2120
2121 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2122 return;
2123 pc = pcpu_find(owner->td_oncpu);
2124 instance = find_instance(pc->pc_spinlocks, lock);
2125 if (instance != NULL)
2126 witness_list_lock(instance);
2127}
2128
2129void
2130witness_save(struct lock_object *lock, const char **filep, int *linep)
2131{
2132 struct lock_list_entry *lock_list;
2133 struct lock_instance *instance;
2134 struct lock_class *class;
2135
2136 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2137 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2138 return;
2139 class = LOCK_CLASS(lock);
2140 if (class->lc_flags & LC_SLEEPLOCK)
2141 lock_list = curthread->td_sleeplocks;
2142 else {
2143 if (witness_skipspin)
2144 return;
2145 lock_list = PCPU_GET(spinlocks);
2146 }
2147 instance = find_instance(lock_list, lock);
2148 if (instance == NULL)
2149 panic("%s: lock (%s) %s not locked", __func__,
2150 class->lc_name, lock->lo_name);
2151 *filep = instance->li_file;
2152 *linep = instance->li_line;
2153}
2154
2155void
2156witness_restore(struct lock_object *lock, const char *file, int line)
2157{
2158 struct lock_list_entry *lock_list;
2159 struct lock_instance *instance;
2160 struct lock_class *class;
2161
2162 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2163 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2164 return;
2165 class = LOCK_CLASS(lock);
2166 if (class->lc_flags & LC_SLEEPLOCK)
2167 lock_list = curthread->td_sleeplocks;
2168 else {
2169 if (witness_skipspin)
2170 return;
2171 lock_list = PCPU_GET(spinlocks);
2172 }
2173 instance = find_instance(lock_list, lock);
2174 if (instance == NULL)
2175 panic("%s: lock (%s) %s not locked", __func__,
2176 class->lc_name, lock->lo_name);
2177 lock->lo_witness->w_file = file;
2178 lock->lo_witness->w_line = line;
2179 instance->li_file = file;
2180 instance->li_line = line;
2181}
2182
2183void
2184witness_assert(struct lock_object *lock, int flags, const char *file, int line)
2185{
2186#ifdef INVARIANT_SUPPORT
2187 struct lock_instance *instance;
2188 struct lock_class *class;
2189
2190 if (lock->lo_witness == NULL || witness_watch < 1 || panicstr != NULL)
2191 return;
2192 class = LOCK_CLASS(lock);
2193 if ((class->lc_flags & LC_SLEEPLOCK) != 0)
2194 instance = find_instance(curthread->td_sleeplocks, lock);
2195 else if ((class->lc_flags & LC_SPINLOCK) != 0)
2196 instance = find_instance(PCPU_GET(spinlocks), lock);
2197 else {
2198 panic("Lock (%s) %s is not sleep or spin!",
2199 class->lc_name, lock->lo_name);
2200 }
2201 file = fixup_filename(file);
2202 switch (flags) {
2203 case LA_UNLOCKED:
2204 if (instance != NULL)
2205 panic("Lock (%s) %s locked @ %s:%d.",
2206 class->lc_name, lock->lo_name, file, line);
2207 break;
2208 case LA_LOCKED:
2209 case LA_LOCKED | LA_RECURSED:
2210 case LA_LOCKED | LA_NOTRECURSED:
2211 case LA_SLOCKED:
2212 case LA_SLOCKED | LA_RECURSED:
2213 case LA_SLOCKED | LA_NOTRECURSED:
2214 case LA_XLOCKED:
2215 case LA_XLOCKED | LA_RECURSED:
2216 case LA_XLOCKED | LA_NOTRECURSED:
2217 if (instance == NULL) {
2218 panic("Lock (%s) %s not locked @ %s:%d.",
2219 class->lc_name, lock->lo_name, file, line);
2220 break;
2221 }
2222 if ((flags & LA_XLOCKED) != 0 &&
2223 (instance->li_flags & LI_EXCLUSIVE) == 0)
2224 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
2225 class->lc_name, lock->lo_name, file, line);
2226 if ((flags & LA_SLOCKED) != 0 &&
2227 (instance->li_flags & LI_EXCLUSIVE) != 0)
2228 panic("Lock (%s) %s exclusively locked @ %s:%d.",
2229 class->lc_name, lock->lo_name, file, line);
2230 if ((flags & LA_RECURSED) != 0 &&
2231 (instance->li_flags & LI_RECURSEMASK) == 0)
2232 panic("Lock (%s) %s not recursed @ %s:%d.",
2233 class->lc_name, lock->lo_name, file, line);
2234 if ((flags & LA_NOTRECURSED) != 0 &&
2235 (instance->li_flags & LI_RECURSEMASK) != 0)
2236 panic("Lock (%s) %s recursed @ %s:%d.",
2237 class->lc_name, lock->lo_name, file, line);
2238 break;
2239 default:
2240 panic("Invalid lock assertion at %s:%d.", file, line);
2241
2242 }
2243#endif /* INVARIANT_SUPPORT */
2244}
2245
2246static void
2247witness_setflag(struct lock_object *lock, int flag, int set)
2248{
2249 struct lock_list_entry *lock_list;
2250 struct lock_instance *instance;
2251 struct lock_class *class;
2252
2253 if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL)
2254 return;
2255 class = LOCK_CLASS(lock);
2256 if (class->lc_flags & LC_SLEEPLOCK)
2257 lock_list = curthread->td_sleeplocks;
2258 else {
2259 if (witness_skipspin)
2260 return;
2261 lock_list = PCPU_GET(spinlocks);
2262 }
2263 instance = find_instance(lock_list, lock);
2264 if (instance == NULL)
2265 panic("%s: lock (%s) %s not locked", __func__,
2266 class->lc_name, lock->lo_name);
2267
2268 if (set)
2269 instance->li_flags |= flag;
2270 else
2271 instance->li_flags &= ~flag;
2272}
2273
2274void
2275witness_norelease(struct lock_object *lock)
2276{
2277
2278 witness_setflag(lock, LI_NORELEASE, 1);
2279}
2280
2281void
2282witness_releaseok(struct lock_object *lock)
2283{
2284
2285 witness_setflag(lock, LI_NORELEASE, 0);
2286}
2287
2288#ifdef DDB
2289static void
2290witness_ddb_list(struct thread *td)
2291{
2292
2293 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2294 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2295
2296 if (witness_watch < 1)
2297 return;
2298
2299 witness_list_locks(&td->td_sleeplocks);
2300
2301 /*
2302 * We only handle spinlocks if td == curthread. This is somewhat broken
2303 * if td is currently executing on some other CPU and holds spin locks
2304 * as we won't display those locks. If we had a MI way of getting
2305 * the per-cpu data for a given cpu then we could use
2306 * td->td_oncpu to get the list of spinlocks for this thread
2307 * and "fix" this.
2308 *
2309 * That still wouldn't really fix this unless we locked the scheduler
2310 * lock or stopped the other CPU to make sure it wasn't changing the
2311 * list out from under us. It is probably best to just not try to
2312 * handle threads on other CPU's for now.
2313 */
2314 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2315 witness_list_locks(PCPU_PTR(spinlocks));
2316}
2317
2318DB_SHOW_COMMAND(locks, db_witness_list)
2319{
2320 struct thread *td;
2321
2322 if (have_addr)
2323 td = db_lookup_thread(addr, TRUE);
2324 else
2325 td = kdb_thread;
2326 witness_ddb_list(td);
2327}
2328
2329DB_SHOW_ALL_COMMAND(locks, db_witness_list_all)
2330{
2331 struct thread *td;
2332 struct proc *p;
2333
2334 /*
2335 * It would be nice to list only threads and processes that actually
2336 * held sleep locks, but that information is currently not exported
2337 * by WITNESS.
2338 */
2339 FOREACH_PROC_IN_SYSTEM(p) {
2340 if (!witness_proc_has_locks(p))
2341 continue;
2342 FOREACH_THREAD_IN_PROC(p, td) {
2343 if (!witness_thread_has_locks(td))
2344 continue;
2345 db_printf("Process %d (%s) thread %p (%d)\n", p->p_pid,
2346 p->p_comm, td, td->td_tid);
2347 witness_ddb_list(td);
2348 }
2349 }
2350}
2351DB_SHOW_ALIAS(alllocks, db_witness_list_all)
2352
2353DB_SHOW_COMMAND(witness, db_witness_display)
2354{
2355
2356 witness_ddb_display(db_printf);
2357}
2358#endif
2359
2360static int
2361sysctl_debug_witness_badstacks(SYSCTL_HANDLER_ARGS)
2362{
2363 struct witness_lock_order_data *data1, *data2, *tmp_data1, *tmp_data2;
2364 struct witness *tmp_w1, *tmp_w2, *w1, *w2;
2365 struct sbuf *sb;
2366 u_int w_rmatrix1, w_rmatrix2;
2367 int error, generation, i, j;
2368
2369 tmp_data1 = NULL;
2370 tmp_data2 = NULL;
2371 tmp_w1 = NULL;
2372 tmp_w2 = NULL;
2373 if (witness_watch < 1) {
2374 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2375 return (error);
2376 }
2377 if (witness_cold) {
2378 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2379 return (error);
2380 }
2381 error = 0;
2382 sb = sbuf_new(NULL, NULL, BADSTACK_SBUF_SIZE, SBUF_AUTOEXTEND);
2383 if (sb == NULL)
2384 return (ENOMEM);
2385
2386 /* Allocate and init temporary storage space. */
2387 tmp_w1 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2388 tmp_w2 = malloc(sizeof(struct witness), M_TEMP, M_WAITOK | M_ZERO);
2389 tmp_data1 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2390 M_WAITOK | M_ZERO);
2391 tmp_data2 = malloc(sizeof(struct witness_lock_order_data), M_TEMP,
2392 M_WAITOK | M_ZERO);
2393 stack_zero(&tmp_data1->wlod_stack);
2394 stack_zero(&tmp_data2->wlod_stack);
2395
2396restart:
2397 mtx_lock_spin(&w_mtx);
2398 generation = w_generation;
2399 mtx_unlock_spin(&w_mtx);
2400 sbuf_printf(sb, "Number of known direct relationships is %d\n",
2401 w_lohash.wloh_count);
2402 for (i = 1; i < w_max_used_index; i++) {
2403 mtx_lock_spin(&w_mtx);
2404 if (generation != w_generation) {
2405 mtx_unlock_spin(&w_mtx);
2406
2407 /* The graph has changed, try again. */
2408 req->oldidx = 0;
2409 sbuf_clear(sb);
2410 goto restart;
2411 }
2412
2413 w1 = &w_data[i];
2414 if (w1->w_reversed == 0) {
2415 mtx_unlock_spin(&w_mtx);
2416 continue;
2417 }
2418
2419 /* Copy w1 locally so we can release the spin lock. */
2420 *tmp_w1 = *w1;
2421 mtx_unlock_spin(&w_mtx);
2422
2423 if (tmp_w1->w_reversed == 0)
2424 continue;
2425 for (j = 1; j < w_max_used_index; j++) {
2426 if ((w_rmatrix[i][j] & WITNESS_REVERSAL) == 0 || i > j)
2427 continue;
2428
2429 mtx_lock_spin(&w_mtx);
2430 if (generation != w_generation) {
2431 mtx_unlock_spin(&w_mtx);
2432
2433 /* The graph has changed, try again. */
2434 req->oldidx = 0;
2435 sbuf_clear(sb);
2436 goto restart;
2437 }
2438
2439 w2 = &w_data[j];
2440 data1 = witness_lock_order_get(w1, w2);
2441 data2 = witness_lock_order_get(w2, w1);
2442
2443 /*
2444 * Copy information locally so we can release the
2445 * spin lock.
2446 */
2447 *tmp_w2 = *w2;
2448 w_rmatrix1 = (unsigned int)w_rmatrix[i][j];
2449 w_rmatrix2 = (unsigned int)w_rmatrix[j][i];
2450
2451 if (data1) {
2452 stack_zero(&tmp_data1->wlod_stack);
2453 stack_copy(&data1->wlod_stack,
2454 &tmp_data1->wlod_stack);
2455 }
2456 if (data2 && data2 != data1) {
2457 stack_zero(&tmp_data2->wlod_stack);
2458 stack_copy(&data2->wlod_stack,
2459 &tmp_data2->wlod_stack);
2460 }
2461 mtx_unlock_spin(&w_mtx);
2462
2463 sbuf_printf(sb,
2464 "\nLock order reversal between \"%s\"(%s) and \"%s\"(%s)!\n",
2465 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2466 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2467#if 0
2468 sbuf_printf(sb,
2469 "w_rmatrix[%s][%s] == %x, w_rmatrix[%s][%s] == %x\n",
2470 tmp_w1->name, tmp_w2->w_name, w_rmatrix1,
2471 tmp_w2->name, tmp_w1->w_name, w_rmatrix2);
2472#endif
2473 if (data1) {
2474 sbuf_printf(sb,
2475 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2476 tmp_w1->w_name, tmp_w1->w_class->lc_name,
2477 tmp_w2->w_name, tmp_w2->w_class->lc_name);
2478 stack_sbuf_print(sb, &tmp_data1->wlod_stack);
2479 sbuf_printf(sb, "\n");
2480 }
2481 if (data2 && data2 != data1) {
2482 sbuf_printf(sb,
2483 "Lock order \"%s\"(%s) -> \"%s\"(%s) first seen at:\n",
2484 tmp_w2->w_name, tmp_w2->w_class->lc_name,
2485 tmp_w1->w_name, tmp_w1->w_class->lc_name);
2486 stack_sbuf_print(sb, &tmp_data2->wlod_stack);
2487 sbuf_printf(sb, "\n");
2488 }
2489 }
2490 }
2491 mtx_lock_spin(&w_mtx);
2492 if (generation != w_generation) {
2493 mtx_unlock_spin(&w_mtx);
2494
2495 /*
2496 * The graph changed while we were printing stack data,
2497 * try again.
2498 */
2499 req->oldidx = 0;
2500 sbuf_clear(sb);
2501 goto restart;
2502 }
2503 mtx_unlock_spin(&w_mtx);
2504
2505 /* Free temporary storage space. */
2506 free(tmp_data1, M_TEMP);
2507 free(tmp_data2, M_TEMP);
2508 free(tmp_w1, M_TEMP);
2509 free(tmp_w2, M_TEMP);
2510
2511 sbuf_finish(sb);
2512 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2513 sbuf_delete(sb);
2514
2515 return (error);
2516}
2517
2518static int
2519sysctl_debug_witness_fullgraph(SYSCTL_HANDLER_ARGS)
2520{
2521 struct witness *w;
2522 struct sbuf *sb;
2523 int error;
2524
2525 if (witness_watch < 1) {
2526 error = SYSCTL_OUT(req, w_notrunning, sizeof(w_notrunning));
2527 return (error);
2528 }
2529 if (witness_cold) {
2530 error = SYSCTL_OUT(req, w_stillcold, sizeof(w_stillcold));
2531 return (error);
2532 }
2533 error = 0;
2534 sb = sbuf_new(NULL, NULL, FULLGRAPH_SBUF_SIZE, SBUF_FIXEDLEN);
2535 if (sb == NULL)
2536 return (ENOMEM);
2537 sbuf_printf(sb, "\n");
2538
2539 mtx_lock_spin(&w_mtx);
2540 STAILQ_FOREACH(w, &w_all, w_list)
2541 w->w_displayed = 0;
2542 STAILQ_FOREACH(w, &w_all, w_list)
2543 witness_add_fullgraph(sb, w);
2544 mtx_unlock_spin(&w_mtx);
2545
2546 /*
2547 * While using SBUF_FIXEDLEN, check if the sbuf overflowed.
2548 */
2549 if (sbuf_overflowed(sb)) {
2550 sbuf_delete(sb);
2551 panic("%s: sbuf overflowed, bump FULLGRAPH_SBUF_SIZE value\n",
2552 __func__);
2553 }
2554
2555 /*
2556 * Close the sbuf and return to userland.
2557 */
2558 sbuf_finish(sb);
2559 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2560 sbuf_delete(sb);
2561
2562 return (error);
2563}
2564
2565static int
2566sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
2567{
2568 int error, value;
2569
2570 value = witness_watch;
2571 error = sysctl_handle_int(oidp, &value, 0, req);
2572 if (error != 0 || req->newptr == NULL)
2573 return (error);
2574 if (value > 1 || value < -1 ||
2575 (witness_watch == -1 && value != witness_watch))
2576 return (EINVAL);
2577 witness_watch = value;
2578 return (0);
2579}
2580
2581static void
2582witness_add_fullgraph(struct sbuf *sb, struct witness *w)
2583{
2584 int i;
2585
2586 if (w->w_displayed != 0 || (w->w_file == NULL && w->w_line == 0))
2587 return;
2588 w->w_displayed = 1;
2589
2590 WITNESS_INDEX_ASSERT(w->w_index);
2591 for (i = 1; i <= w_max_used_index; i++) {
2592 if (w_rmatrix[w->w_index][i] & WITNESS_PARENT) {
2593 sbuf_printf(sb, "\"%s\",\"%s\"\n", w->w_name,
2594 w_data[i].w_name);
2595 witness_add_fullgraph(sb, &w_data[i]);
2596 }
2597 }
2598}
2599
2600/*
2601 * A simple hash function. Takes a key pointer and a key size. If size == 0,
2602 * interprets the key as a string and reads until the null
2603 * terminator. Otherwise, reads the first size bytes. Returns an unsigned 32-bit
2604 * hash value computed from the key.
2605 */
2606static uint32_t
2607witness_hash_djb2(const uint8_t *key, uint32_t size)
2608{
2609 unsigned int hash = 5381;
2610 int i;
2611
2612 /* hash = hash * 33 + key[i] */
2613 if (size)
2614 for (i = 0; i < size; i++)
2615 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2616 else
2617 for (i = 0; key[i] != 0; i++)
2618 hash = ((hash << 5) + hash) + (unsigned int)key[i];
2619
2620 return (hash);
2621}
2622
2623
2624/*
2625 * Initializes the two witness hash tables. Called exactly once from
2626 * witness_initialize().
2627 */
2628static void
2629witness_init_hash_tables(void)
2630{
2631 int i;
2632
2633 MPASS(witness_cold);
2634
2635 /* Initialize the hash tables. */
2636 for (i = 0; i < WITNESS_HASH_SIZE; i++)
2637 w_hash.wh_array[i] = NULL;
2638
2639 w_hash.wh_size = WITNESS_HASH_SIZE;
2640 w_hash.wh_count = 0;
2641
2642 /* Initialize the lock order data hash. */
2643 w_lofree = NULL;
2644 for (i = 0; i < WITNESS_LO_DATA_COUNT; i++) {
2645 memset(&w_lodata[i], 0, sizeof(w_lodata[i]));
2646 w_lodata[i].wlod_next = w_lofree;
2647 w_lofree = &w_lodata[i];
2648 }
2649 w_lohash.wloh_size = WITNESS_LO_HASH_SIZE;
2650 w_lohash.wloh_count = 0;
2651 for (i = 0; i < WITNESS_LO_HASH_SIZE; i++)
2652 w_lohash.wloh_array[i] = NULL;
2653}
2654
2655static struct witness *
2656witness_hash_get(const char *key)
2657{
2658 struct witness *w;
2659 uint32_t hash;
2660
2661 MPASS(key != NULL);
2662 if (witness_cold == 0)
2663 mtx_assert(&w_mtx, MA_OWNED);
2664 hash = witness_hash_djb2(key, 0) % w_hash.wh_size;
2665 w = w_hash.wh_array[hash];
2666 while (w != NULL) {
2667 if (strcmp(w->w_name, key) == 0)
2668 goto out;
2669 w = w->w_hash_next;
2670 }
2671
2672out:
2673 return (w);
2674}
2675
2676static void
2677witness_hash_put(struct witness *w)
2678{
2679 uint32_t hash;
2680
2681 MPASS(w != NULL);
2682 MPASS(w->w_name != NULL);
2683 if (witness_cold == 0)
2684 mtx_assert(&w_mtx, MA_OWNED);
2685 KASSERT(witness_hash_get(w->w_name) == NULL,
2686 ("%s: trying to add a hash entry that already exists!", __func__));
2687 KASSERT(w->w_hash_next == NULL,
2688 ("%s: w->w_hash_next != NULL", __func__));
2689
2690 hash = witness_hash_djb2(w->w_name, 0) % w_hash.wh_size;
2691 w->w_hash_next = w_hash.wh_array[hash];
2692 w_hash.wh_array[hash] = w;
2693 w_hash.wh_count++;
2694}
2695
2696
2697static struct witness_lock_order_data *
2698witness_lock_order_get(struct witness *parent, struct witness *child)
2699{
2700 struct witness_lock_order_data *data = NULL;
2701 struct witness_lock_order_key key;
2702 unsigned int hash;
2703
2704 MPASS(parent != NULL && child != NULL);
2705 key.from = parent->w_index;
2706 key.to = child->w_index;
2707 WITNESS_INDEX_ASSERT(key.from);
2708 WITNESS_INDEX_ASSERT(key.to);
2709 if ((w_rmatrix[parent->w_index][child->w_index]
2710 & WITNESS_LOCK_ORDER_KNOWN) == 0)
2711 goto out;
2712
2713 hash = witness_hash_djb2((const char*)&key,
2714 sizeof(key)) % w_lohash.wloh_size;
2715 data = w_lohash.wloh_array[hash];
2716 while (data != NULL) {
2717 if (witness_lock_order_key_equal(&data->wlod_key, &key))
2718 break;
2719 data = data->wlod_next;
2720 }
2721
2722out:
2723 return (data);
2724}
2725
2726/*
2727 * Verify that parent and child have a known relationship, are not the same,
2728 * and child is actually a child of parent. This is done without w_mtx
2729 * to avoid contention in the common case.
2730 */
2731static int
2732witness_lock_order_check(struct witness *parent, struct witness *child)
2733{
2734
2735 if (parent != child &&
2736 w_rmatrix[parent->w_index][child->w_index]
2737 & WITNESS_LOCK_ORDER_KNOWN &&
2738 isitmychild(parent, child))
2739 return (1);
2740
2741 return (0);
2742}
2743
2744static int
2745witness_lock_order_add(struct witness *parent, struct witness *child)
2746{
2747 struct witness_lock_order_data *data = NULL;
2748 struct witness_lock_order_key key;
2749 unsigned int hash;
2750
2751 MPASS(parent != NULL && child != NULL);
2752 key.from = parent->w_index;
2753 key.to = child->w_index;
2754 WITNESS_INDEX_ASSERT(key.from);
2755 WITNESS_INDEX_ASSERT(key.to);
2756 if (w_rmatrix[parent->w_index][child->w_index]
2757 & WITNESS_LOCK_ORDER_KNOWN)
2758 return (1);
2759
2760 hash = witness_hash_djb2((const char*)&key,
2761 sizeof(key)) % w_lohash.wloh_size;
2762 w_rmatrix[parent->w_index][child->w_index] |= WITNESS_LOCK_ORDER_KNOWN;
2763 data = w_lofree;
2764 if (data == NULL)
2765 return (0);
2766 w_lofree = data->wlod_next;
2767 data->wlod_next = w_lohash.wloh_array[hash];
2768 data->wlod_key = key;
2769 w_lohash.wloh_array[hash] = data;
2770 w_lohash.wloh_count++;
2771 stack_zero(&data->wlod_stack);
2772 stack_save(&data->wlod_stack);
2773 return (1);
2774}
2775
2776/* Call this whenver the structure of the witness graph changes. */
2777static void
2778witness_increment_graph_generation(void)
2779{
2780
2781 if (witness_cold == 0)
2782 mtx_assert(&w_mtx, MA_OWNED);
2783 w_generation++;
2784}
2785
2786#ifdef KDB
2787static void
2788_witness_debugger(int cond, const char *msg)
2789{
2790
2791 if (witness_trace && cond)
2792 kdb_backtrace();
2793 if (witness_kdb && cond)
2794 kdb_enter(KDB_WHY_WITNESS, msg);
2795}
2796#endif