Deleted Added
full compact
subr_witness.c (112562) subr_witness.c (112993)
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/subr_witness.c 112562 2003-03-24 21:03:53Z jhb $
30 * $FreeBSD: head/sys/kern/subr_witness.c 112993 2003-04-02 23:53:30Z peter $
31 */
32
33/*
34 * Implementation of the `witness' lock verifier. Originally implemented for
35 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
36 * classes in FreeBSD.
37 */
38
39/*
40 * Main Entry: witness
41 * Pronunciation: 'wit-n&s
42 * Function: noun
43 * Etymology: Middle English witnesse, from Old English witnes knowledge,
44 * testimony, witness, from 2wit
45 * Date: before 12th century
46 * 1 : attestation of a fact or event : TESTIMONY
47 * 2 : one that gives evidence; specifically : one who testifies in
48 * a cause or before a judicial tribunal
49 * 3 : one asked to be present at a transaction so as to be able to
50 * testify to its having taken place
51 * 4 : one who has personal knowledge of something
52 * 5 a : something serving as evidence or proof : SIGN
53 * b : public affirmation by word or example of usually
54 * religious faith or conviction <the heroic witness to divine
55 * life -- Pilot>
56 * 6 capitalized : a member of the Jehovah's Witnesses
57 */
58
59/*
60 * Special rules concerning Giant and lock orders:
61 *
62 * 1) Giant must be acquired before any other mutexes. Stated another way,
63 * no other mutex may be held when Giant is acquired.
64 *
65 * 2) Giant must be released when blocking on a sleepable lock.
66 *
67 * This rule is less obvious, but is a result of Giant providing the same
68 * semantics as spl(). Basically, when a thread sleeps, it must release
69 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
70 * 2).
71 *
72 * 3) Giant may be acquired before or after sleepable locks.
73 *
74 * This rule is also not quite as obvious. Giant may be acquired after
75 * a sleepable lock because it is a non-sleepable lock and non-sleepable
76 * locks may always be acquired while holding a sleepable lock. The second
77 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
78 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
79 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
80 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
81 * execute. Thus, acquiring Giant both before and after a sleepable lock
82 * will not result in a lock order reversal.
83 */
84
85#include "opt_ddb.h"
86#include "opt_witness.h"
31 */
32
33/*
34 * Implementation of the `witness' lock verifier. Originally implemented for
35 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
36 * classes in FreeBSD.
37 */
38
39/*
40 * Main Entry: witness
41 * Pronunciation: 'wit-n&s
42 * Function: noun
43 * Etymology: Middle English witnesse, from Old English witnes knowledge,
44 * testimony, witness, from 2wit
45 * Date: before 12th century
46 * 1 : attestation of a fact or event : TESTIMONY
47 * 2 : one that gives evidence; specifically : one who testifies in
48 * a cause or before a judicial tribunal
49 * 3 : one asked to be present at a transaction so as to be able to
50 * testify to its having taken place
51 * 4 : one who has personal knowledge of something
52 * 5 a : something serving as evidence or proof : SIGN
53 * b : public affirmation by word or example of usually
54 * religious faith or conviction <the heroic witness to divine
55 * life -- Pilot>
56 * 6 capitalized : a member of the Jehovah's Witnesses
57 */
58
59/*
60 * Special rules concerning Giant and lock orders:
61 *
62 * 1) Giant must be acquired before any other mutexes. Stated another way,
63 * no other mutex may be held when Giant is acquired.
64 *
65 * 2) Giant must be released when blocking on a sleepable lock.
66 *
67 * This rule is less obvious, but is a result of Giant providing the same
68 * semantics as spl(). Basically, when a thread sleeps, it must release
69 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
70 * 2).
71 *
72 * 3) Giant may be acquired before or after sleepable locks.
73 *
74 * This rule is also not quite as obvious. Giant may be acquired after
75 * a sleepable lock because it is a non-sleepable lock and non-sleepable
76 * locks may always be acquired while holding a sleepable lock. The second
77 * case, Giant before a sleepable lock, follows from rule 2) above. Suppose
78 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
79 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
80 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
81 * execute. Thus, acquiring Giant both before and after a sleepable lock
82 * will not result in a lock order reversal.
83 */
84
85#include "opt_ddb.h"
86#include "opt_witness.h"
87#ifdef __i386__
88#include "opt_swtch.h"
89#endif
87
88#include <sys/param.h>
89#include <sys/bus.h>
90#include <sys/kernel.h>
91#include <sys/ktr.h>
92#include <sys/lock.h>
93#include <sys/malloc.h>
94#include <sys/mutex.h>
95#include <sys/proc.h>
96#include <sys/sysctl.h>
97#include <sys/systm.h>
98
99#include <ddb/ddb.h>
100
101#include <machine/stdarg.h>
102
103/* Define this to check for blessed mutexes */
104#undef BLESSING
105
106#define WITNESS_COUNT 200
107#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
108/*
109 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
110 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
111 * probably be safe for the most part, but it's still a SWAG.
112 */
113#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
114
115#define WITNESS_NCHILDREN 6
116
117struct witness_child_list_entry;
118
119struct witness {
120 const char *w_name;
121 struct lock_class *w_class;
122 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
123 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
124 struct witness_child_list_entry *w_children; /* Great evilness... */
125 const char *w_file;
126 int w_line;
127 u_int w_level;
128 u_int w_refcount;
129 u_char w_Giant_squawked:1;
130 u_char w_other_squawked:1;
131 u_char w_same_squawked:1;
132 u_char w_displayed:1;
133};
134
135struct witness_child_list_entry {
136 struct witness_child_list_entry *wcl_next;
137 struct witness *wcl_children[WITNESS_NCHILDREN];
138 u_int wcl_count;
139};
140
141STAILQ_HEAD(witness_list, witness);
142
143#ifdef BLESSING
144struct witness_blessed {
145 const char *b_lock1;
146 const char *b_lock2;
147};
148#endif
149
150struct witness_order_list_entry {
151 const char *w_name;
152 struct lock_class *w_class;
153};
154
155#ifdef BLESSING
156static int blessed(struct witness *, struct witness *);
157#endif
158static int depart(struct witness *w);
159static struct witness *enroll(const char *description,
160 struct lock_class *lock_class);
161static int insertchild(struct witness *parent, struct witness *child);
162static int isitmychild(struct witness *parent, struct witness *child);
163static int isitmydescendant(struct witness *parent, struct witness *child);
164static int itismychild(struct witness *parent, struct witness *child);
165static int rebalancetree(struct witness_list *list);
166static void removechild(struct witness *parent, struct witness *child);
167static int reparentchildren(struct witness *newparent,
168 struct witness *oldparent);
169static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
170static void witness_displaydescendants(void(*)(const char *fmt, ...),
171 struct witness *, int indent);
172static const char *fixup_filename(const char *file);
173static void witness_leveldescendents(struct witness *parent, int level);
174static void witness_levelall(void);
175static struct witness *witness_get(void);
176static void witness_free(struct witness *m);
177static struct witness_child_list_entry *witness_child_get(void);
178static void witness_child_free(struct witness_child_list_entry *wcl);
179static struct lock_list_entry *witness_lock_list_get(void);
180static void witness_lock_list_free(struct lock_list_entry *lle);
181static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
182 struct lock_object *lock);
183static void witness_list_lock(struct lock_instance *instance);
184#ifdef DDB
185static void witness_list(struct thread *td);
186static void witness_display_list(void(*prnt)(const char *fmt, ...),
187 struct witness_list *list);
188static void witness_display(void(*)(const char *fmt, ...));
189#endif
190
191MALLOC_DEFINE(M_WITNESS, "witness", "witness structure");
192
193/*
194 * If set to 0, witness is disabled. If set to 1, witness performs full lock
195 * order checking for all locks. If set to 2 or higher, then witness skips
196 * the full lock order check if the lock being acquired is at a higher level
197 * (i.e. farther down in the tree) than the current lock. This last mode is
198 * somewhat experimental and not considered fully safe. At runtime, this
199 * value may be set to 0 to turn off witness. witness is not allowed be
200 * turned on once it is turned off, however.
201 */
202static int witness_watch = 1;
203TUNABLE_INT("debug.witness_watch", &witness_watch);
204SYSCTL_PROC(_debug, OID_AUTO, witness_watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
205 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
206
207#ifdef DDB
208/*
209 * When DDB is enabled and witness_ddb is set to 1, it will cause the system to
210 * drop into kdebug() when:
211 * - a lock heirarchy violation occurs
212 * - locks are held when going to sleep.
213 */
214#ifdef WITNESS_DDB
215int witness_ddb = 1;
216#else
217int witness_ddb = 0;
218#endif
219TUNABLE_INT("debug.witness_ddb", &witness_ddb);
220SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, "");
221
222/*
223 * When DDB is enabled and witness_trace is set to 1, it will cause the system
224 * to print a stack trace:
225 * - a lock heirarchy violation occurs
226 * - locks are held when going to sleep.
227 */
228int witness_trace = 1;
229TUNABLE_INT("debug.witness_trace", &witness_trace);
230SYSCTL_INT(_debug, OID_AUTO, witness_trace, CTLFLAG_RW, &witness_trace, 0, "");
231#endif /* DDB */
232
233#ifdef WITNESS_SKIPSPIN
234int witness_skipspin = 1;
235#else
236int witness_skipspin = 0;
237#endif
238TUNABLE_INT("debug.witness_skipspin", &witness_skipspin);
239SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
240 "");
241
242static struct mtx w_mtx;
243static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
244static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
245static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
246static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
247static struct witness_child_list_entry *w_child_free = NULL;
248static struct lock_list_entry *w_lock_list_free = NULL;
249
250static struct witness w_data[WITNESS_COUNT];
251static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
252static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
253
254static struct witness_order_list_entry order_lists[] = {
255 { "proctree", &lock_class_sx },
256 { "allproc", &lock_class_sx },
257 { "Giant", &lock_class_mtx_sleep },
258 { "filedesc structure", &lock_class_mtx_sleep },
259 { "pipe mutex", &lock_class_mtx_sleep },
260 { "sigio lock", &lock_class_mtx_sleep },
261 { "process group", &lock_class_mtx_sleep },
262 { "process lock", &lock_class_mtx_sleep },
263 { "session", &lock_class_mtx_sleep },
264 { "uidinfo hash", &lock_class_mtx_sleep },
265 { "uidinfo struct", &lock_class_mtx_sleep },
266 { NULL, NULL },
267 /*
268 * spin locks
269 */
270#ifdef SMP
271 { "ap boot", &lock_class_mtx_spin },
272#ifdef __i386__
273 { "com", &lock_class_mtx_spin },
274#endif
275#endif
276 { "sio", &lock_class_mtx_spin },
277#ifdef __i386__
278 { "cy", &lock_class_mtx_spin },
279#endif
280 { "sabtty", &lock_class_mtx_spin },
281 { "zstty", &lock_class_mtx_spin },
282 { "ng_node", &lock_class_mtx_spin },
283 { "ng_worklist", &lock_class_mtx_spin },
284 { "ithread table lock", &lock_class_mtx_spin },
285 { "sched lock", &lock_class_mtx_spin },
286 { "callout", &lock_class_mtx_spin },
287 /*
288 * leaf locks
289 */
290 { "allpmaps", &lock_class_mtx_spin },
291 { "vm page queue free mutex", &lock_class_mtx_spin },
292 { "icu", &lock_class_mtx_spin },
293#ifdef SMP
294 { "smp rendezvous", &lock_class_mtx_spin },
295#if defined(__i386__) && defined(APIC_IO)
296 { "tlb", &lock_class_mtx_spin },
297#endif
90
91#include <sys/param.h>
92#include <sys/bus.h>
93#include <sys/kernel.h>
94#include <sys/ktr.h>
95#include <sys/lock.h>
96#include <sys/malloc.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/sysctl.h>
100#include <sys/systm.h>
101
102#include <ddb/ddb.h>
103
104#include <machine/stdarg.h>
105
106/* Define this to check for blessed mutexes */
107#undef BLESSING
108
109#define WITNESS_COUNT 200
110#define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
111/*
112 * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
113 * will hold LOCK_NCHILDREN * 2 locks. We handle failure ok, and we should
114 * probably be safe for the most part, but it's still a SWAG.
115 */
116#define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
117
118#define WITNESS_NCHILDREN 6
119
120struct witness_child_list_entry;
121
122struct witness {
123 const char *w_name;
124 struct lock_class *w_class;
125 STAILQ_ENTRY(witness) w_list; /* List of all witnesses. */
126 STAILQ_ENTRY(witness) w_typelist; /* Witnesses of a type. */
127 struct witness_child_list_entry *w_children; /* Great evilness... */
128 const char *w_file;
129 int w_line;
130 u_int w_level;
131 u_int w_refcount;
132 u_char w_Giant_squawked:1;
133 u_char w_other_squawked:1;
134 u_char w_same_squawked:1;
135 u_char w_displayed:1;
136};
137
138struct witness_child_list_entry {
139 struct witness_child_list_entry *wcl_next;
140 struct witness *wcl_children[WITNESS_NCHILDREN];
141 u_int wcl_count;
142};
143
144STAILQ_HEAD(witness_list, witness);
145
146#ifdef BLESSING
147struct witness_blessed {
148 const char *b_lock1;
149 const char *b_lock2;
150};
151#endif
152
153struct witness_order_list_entry {
154 const char *w_name;
155 struct lock_class *w_class;
156};
157
158#ifdef BLESSING
159static int blessed(struct witness *, struct witness *);
160#endif
161static int depart(struct witness *w);
162static struct witness *enroll(const char *description,
163 struct lock_class *lock_class);
164static int insertchild(struct witness *parent, struct witness *child);
165static int isitmychild(struct witness *parent, struct witness *child);
166static int isitmydescendant(struct witness *parent, struct witness *child);
167static int itismychild(struct witness *parent, struct witness *child);
168static int rebalancetree(struct witness_list *list);
169static void removechild(struct witness *parent, struct witness *child);
170static int reparentchildren(struct witness *newparent,
171 struct witness *oldparent);
172static int sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
173static void witness_displaydescendants(void(*)(const char *fmt, ...),
174 struct witness *, int indent);
175static const char *fixup_filename(const char *file);
176static void witness_leveldescendents(struct witness *parent, int level);
177static void witness_levelall(void);
178static struct witness *witness_get(void);
179static void witness_free(struct witness *m);
180static struct witness_child_list_entry *witness_child_get(void);
181static void witness_child_free(struct witness_child_list_entry *wcl);
182static struct lock_list_entry *witness_lock_list_get(void);
183static void witness_lock_list_free(struct lock_list_entry *lle);
184static struct lock_instance *find_instance(struct lock_list_entry *lock_list,
185 struct lock_object *lock);
186static void witness_list_lock(struct lock_instance *instance);
187#ifdef DDB
188static void witness_list(struct thread *td);
189static void witness_display_list(void(*prnt)(const char *fmt, ...),
190 struct witness_list *list);
191static void witness_display(void(*)(const char *fmt, ...));
192#endif
193
194MALLOC_DEFINE(M_WITNESS, "witness", "witness structure");
195
196/*
197 * If set to 0, witness is disabled. If set to 1, witness performs full lock
198 * order checking for all locks. If set to 2 or higher, then witness skips
199 * the full lock order check if the lock being acquired is at a higher level
200 * (i.e. farther down in the tree) than the current lock. This last mode is
201 * somewhat experimental and not considered fully safe. At runtime, this
202 * value may be set to 0 to turn off witness. witness is not allowed be
203 * turned on once it is turned off, however.
204 */
205static int witness_watch = 1;
206TUNABLE_INT("debug.witness_watch", &witness_watch);
207SYSCTL_PROC(_debug, OID_AUTO, witness_watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
208 sysctl_debug_witness_watch, "I", "witness is watching lock operations");
209
210#ifdef DDB
211/*
212 * When DDB is enabled and witness_ddb is set to 1, it will cause the system to
213 * drop into kdebug() when:
214 * - a lock heirarchy violation occurs
215 * - locks are held when going to sleep.
216 */
217#ifdef WITNESS_DDB
218int witness_ddb = 1;
219#else
220int witness_ddb = 0;
221#endif
222TUNABLE_INT("debug.witness_ddb", &witness_ddb);
223SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, "");
224
225/*
226 * When DDB is enabled and witness_trace is set to 1, it will cause the system
227 * to print a stack trace:
228 * - a lock heirarchy violation occurs
229 * - locks are held when going to sleep.
230 */
231int witness_trace = 1;
232TUNABLE_INT("debug.witness_trace", &witness_trace);
233SYSCTL_INT(_debug, OID_AUTO, witness_trace, CTLFLAG_RW, &witness_trace, 0, "");
234#endif /* DDB */
235
236#ifdef WITNESS_SKIPSPIN
237int witness_skipspin = 1;
238#else
239int witness_skipspin = 0;
240#endif
241TUNABLE_INT("debug.witness_skipspin", &witness_skipspin);
242SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
243 "");
244
245static struct mtx w_mtx;
246static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
247static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
248static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
249static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
250static struct witness_child_list_entry *w_child_free = NULL;
251static struct lock_list_entry *w_lock_list_free = NULL;
252
253static struct witness w_data[WITNESS_COUNT];
254static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
255static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
256
257static struct witness_order_list_entry order_lists[] = {
258 { "proctree", &lock_class_sx },
259 { "allproc", &lock_class_sx },
260 { "Giant", &lock_class_mtx_sleep },
261 { "filedesc structure", &lock_class_mtx_sleep },
262 { "pipe mutex", &lock_class_mtx_sleep },
263 { "sigio lock", &lock_class_mtx_sleep },
264 { "process group", &lock_class_mtx_sleep },
265 { "process lock", &lock_class_mtx_sleep },
266 { "session", &lock_class_mtx_sleep },
267 { "uidinfo hash", &lock_class_mtx_sleep },
268 { "uidinfo struct", &lock_class_mtx_sleep },
269 { NULL, NULL },
270 /*
271 * spin locks
272 */
273#ifdef SMP
274 { "ap boot", &lock_class_mtx_spin },
275#ifdef __i386__
276 { "com", &lock_class_mtx_spin },
277#endif
278#endif
279 { "sio", &lock_class_mtx_spin },
280#ifdef __i386__
281 { "cy", &lock_class_mtx_spin },
282#endif
283 { "sabtty", &lock_class_mtx_spin },
284 { "zstty", &lock_class_mtx_spin },
285 { "ng_node", &lock_class_mtx_spin },
286 { "ng_worklist", &lock_class_mtx_spin },
287 { "ithread table lock", &lock_class_mtx_spin },
288 { "sched lock", &lock_class_mtx_spin },
289 { "callout", &lock_class_mtx_spin },
290 /*
291 * leaf locks
292 */
293 { "allpmaps", &lock_class_mtx_spin },
294 { "vm page queue free mutex", &lock_class_mtx_spin },
295 { "icu", &lock_class_mtx_spin },
296#ifdef SMP
297 { "smp rendezvous", &lock_class_mtx_spin },
298#if defined(__i386__) && defined(APIC_IO)
299 { "tlb", &lock_class_mtx_spin },
300#endif
301#if defined(__i386__) && defined(LAZY_SWITCH)
302 { "lazypmap", &lock_class_mtx_spin },
303#endif
298#ifdef __sparc64__
299 { "ipi", &lock_class_mtx_spin },
300#endif
301#endif
302 { "clk", &lock_class_mtx_spin },
303 { "mutex profiling lock", &lock_class_mtx_spin },
304 { "kse zombie lock", &lock_class_mtx_spin },
305 { "ALD Queue", &lock_class_mtx_spin },
306#ifdef __ia64__
307 { "MCA spin lock", &lock_class_mtx_spin },
308#endif
309#ifdef __i386__
310 { "pcicfg", &lock_class_mtx_spin },
311#endif
312 { NULL, NULL },
313 { NULL, NULL }
314};
315
316#ifdef BLESSING
317/*
318 * Pairs of locks which have been blessed
319 * Don't complain about order problems with blessed locks
320 */
321static struct witness_blessed blessed_list[] = {
322};
323static int blessed_count =
324 sizeof(blessed_list) / sizeof(struct witness_blessed);
325#endif
326
327/*
328 * List of all locks in the system.
329 */
330TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
331
332static struct mtx all_mtx = {
333 { &lock_class_mtx_sleep, /* mtx_object.lo_class */
334 "All locks list", /* mtx_object.lo_name */
335 "All locks list", /* mtx_object.lo_type */
336 LO_INITIALIZED, /* mtx_object.lo_flags */
337 { NULL, NULL }, /* mtx_object.lo_list */
338 NULL }, /* mtx_object.lo_witness */
339 MTX_UNOWNED, 0, /* mtx_lock, mtx_recurse */
340 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
341 { NULL, NULL } /* mtx_contested */
342};
343
344/*
345 * This global is set to 0 once it becomes safe to use the witness code.
346 */
347static int witness_cold = 1;
348
349/*
350 * Global variables for book keeping.
351 */
352static int lock_cur_cnt;
353static int lock_max_cnt;
354
355/*
356 * The WITNESS-enabled diagnostic code.
357 */
358static void
359witness_initialize(void *dummy __unused)
360{
361 struct lock_object *lock;
362 struct witness_order_list_entry *order;
363 struct witness *w, *w1;
364 int i;
365
366 /*
367 * We have to release Giant before initializing its witness
368 * structure so that WITNESS doesn't get confused.
369 */
370 mtx_unlock(&Giant);
371 mtx_assert(&Giant, MA_NOTOWNED);
372
373 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
374 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
375 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
376 MTX_NOWITNESS);
377 for (i = 0; i < WITNESS_COUNT; i++)
378 witness_free(&w_data[i]);
379 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
380 witness_child_free(&w_childdata[i]);
381 for (i = 0; i < LOCK_CHILDCOUNT; i++)
382 witness_lock_list_free(&w_locklistdata[i]);
383
384 /* First add in all the specified order lists. */
385 for (order = order_lists; order->w_name != NULL; order++) {
386 w = enroll(order->w_name, order->w_class);
387 if (w == NULL)
388 continue;
389 w->w_file = "order list";
390 for (order++; order->w_name != NULL; order++) {
391 w1 = enroll(order->w_name, order->w_class);
392 if (w1 == NULL)
393 continue;
394 w1->w_file = "order list";
395 if (!itismychild(w, w1))
396 panic("Not enough memory for static orders!");
397 w = w1;
398 }
399 }
400
401 /* Iterate through all locks and add them to witness. */
402 mtx_lock(&all_mtx);
403 TAILQ_FOREACH(lock, &all_locks, lo_list) {
404 if (lock->lo_flags & LO_WITNESS)
405 lock->lo_witness = enroll(lock->lo_type,
406 lock->lo_class);
407 else
408 lock->lo_witness = NULL;
409 }
410 mtx_unlock(&all_mtx);
411
412 /* Mark the witness code as being ready for use. */
413 atomic_store_rel_int(&witness_cold, 0);
414
415 mtx_lock(&Giant);
416}
417SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
418
419static int
420sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
421{
422 int error, value;
423
424 value = witness_watch;
425 error = sysctl_handle_int(oidp, &value, 0, req);
426 if (error != 0 || req->newptr == NULL)
427 return (error);
428 error = suser(req->td);
429 if (error != 0)
430 return (error);
431 if (value == witness_watch)
432 return (0);
433 if (value != 0)
434 return (EINVAL);
435 witness_watch = 0;
436 return (0);
437}
438
439void
440witness_init(struct lock_object *lock)
441{
442 struct lock_class *class;
443
444 class = lock->lo_class;
445 if (lock->lo_flags & LO_INITIALIZED)
446 panic("%s: lock (%s) %s is already initialized", __func__,
447 class->lc_name, lock->lo_name);
448 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
449 (class->lc_flags & LC_RECURSABLE) == 0)
450 panic("%s: lock (%s) %s can not be recursable", __func__,
451 class->lc_name, lock->lo_name);
452 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
453 (class->lc_flags & LC_SLEEPABLE) == 0)
454 panic("%s: lock (%s) %s can not be sleepable", __func__,
455 class->lc_name, lock->lo_name);
456 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
457 (class->lc_flags & LC_UPGRADABLE) == 0)
458 panic("%s: lock (%s) %s can not be upgradable", __func__,
459 class->lc_name, lock->lo_name);
460
461 mtx_lock(&all_mtx);
462 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
463 lock->lo_flags |= LO_INITIALIZED;
464 lock_cur_cnt++;
465 if (lock_cur_cnt > lock_max_cnt)
466 lock_max_cnt = lock_cur_cnt;
467 mtx_unlock(&all_mtx);
468 if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
469 (lock->lo_flags & LO_WITNESS) != 0)
470 lock->lo_witness = enroll(lock->lo_type, class);
471 else
472 lock->lo_witness = NULL;
473}
474
475void
476witness_destroy(struct lock_object *lock)
477{
478 struct witness *w;
479
480 if (witness_cold)
481 panic("lock (%s) %s destroyed while witness_cold",
482 lock->lo_class->lc_name, lock->lo_name);
483 if ((lock->lo_flags & LO_INITIALIZED) == 0)
484 panic("%s: lock (%s) %s is not initialized", __func__,
485 lock->lo_class->lc_name, lock->lo_name);
486
487 /* XXX: need to verify that no one holds the lock */
488 w = lock->lo_witness;
489 if (w != NULL) {
490 mtx_lock_spin(&w_mtx);
491 MPASS(w->w_refcount > 0);
492 w->w_refcount--;
493
494 /*
495 * Lock is already released if we have an allocation failure
496 * and depart() fails.
497 */
498 if (w->w_refcount != 0 || depart(w))
499 mtx_unlock_spin(&w_mtx);
500 }
501
502 mtx_lock(&all_mtx);
503 lock_cur_cnt--;
504 TAILQ_REMOVE(&all_locks, lock, lo_list);
505 lock->lo_flags &= ~LO_INITIALIZED;
506 mtx_unlock(&all_mtx);
507}
508
509#ifdef DDB
510static void
511witness_display_list(void(*prnt)(const char *fmt, ...),
512 struct witness_list *list)
513{
514 struct witness *w;
515
516 STAILQ_FOREACH(w, list, w_typelist) {
517 if (w->w_file == NULL || w->w_level > 0)
518 continue;
519 /*
520 * This lock has no anscestors, display its descendants.
521 */
522 witness_displaydescendants(prnt, w, 0);
523 }
524}
525
526static void
527witness_display(void(*prnt)(const char *fmt, ...))
528{
529 struct witness *w;
530
531 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
532 witness_levelall();
533
534 /* Clear all the displayed flags. */
535 STAILQ_FOREACH(w, &w_all, w_list) {
536 w->w_displayed = 0;
537 }
538
539 /*
540 * First, handle sleep locks which have been acquired at least
541 * once.
542 */
543 prnt("Sleep locks:\n");
544 witness_display_list(prnt, &w_sleep);
545
546 /*
547 * Now do spin locks which have been acquired at least once.
548 */
549 prnt("\nSpin locks:\n");
550 witness_display_list(prnt, &w_spin);
551
552 /*
553 * Finally, any locks which have not been acquired yet.
554 */
555 prnt("\nLocks which were never acquired:\n");
556 STAILQ_FOREACH(w, &w_all, w_list) {
557 if (w->w_file != NULL || w->w_refcount == 0)
558 continue;
559 prnt("%s\n", w->w_name);
560 }
561}
562#endif /* DDB */
563
564/* Trim useless garbage from filenames. */
565static const char *
566fixup_filename(const char *file)
567{
568
569 if (file == NULL)
570 return (NULL);
571 while (strncmp(file, "../", 3) == 0)
572 file += 3;
573 return (file);
574}
575
576void
577witness_lock(struct lock_object *lock, int flags, const char *file, int line)
578{
579 struct lock_list_entry **lock_list, *lle;
580 struct lock_instance *lock1, *lock2;
581 struct lock_class *class;
582 struct witness *w, *w1;
583 struct thread *td;
584 int i, j;
585#ifdef DDB
586 int go_into_ddb = 0;
587#endif
588
589 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
590 panicstr != NULL)
591 return;
592 w = lock->lo_witness;
593 class = lock->lo_class;
594 td = curthread;
595 file = fixup_filename(file);
596
597 if (class->lc_flags & LC_SLEEPLOCK) {
598 /*
599 * Since spin locks include a critical section, this check
600 * impliclty enforces a lock order of all sleep locks before
601 * all spin locks.
602 */
603 if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0)
604 panic("blockable sleep lock (%s) %s @ %s:%d",
605 class->lc_name, lock->lo_name, file, line);
606 lock_list = &td->td_sleeplocks;
607 } else
608 lock_list = PCPU_PTR(spinlocks);
609
610 /*
611 * Is this the first lock acquired? If so, then no order checking
612 * is needed.
613 */
614 if (*lock_list == NULL)
615 goto out;
616
617 /*
618 * Check to see if we are recursing on a lock we already own.
619 */
620 lock1 = find_instance(*lock_list, lock);
621 if (lock1 != NULL) {
622 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
623 (flags & LOP_EXCLUSIVE) == 0) {
624 printf("shared lock of (%s) %s @ %s:%d\n",
625 class->lc_name, lock->lo_name, file, line);
626 printf("while exclusively locked from %s:%d\n",
627 lock1->li_file, lock1->li_line);
628 panic("share->excl");
629 }
630 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
631 (flags & LOP_EXCLUSIVE) != 0) {
632 printf("exclusive lock of (%s) %s @ %s:%d\n",
633 class->lc_name, lock->lo_name, file, line);
634 printf("while share locked from %s:%d\n",
635 lock1->li_file, lock1->li_line);
636 panic("excl->share");
637 }
638 lock1->li_flags++;
639 if ((lock->lo_flags & LO_RECURSABLE) == 0) {
640 printf(
641 "recursed on non-recursive lock (%s) %s @ %s:%d\n",
642 class->lc_name, lock->lo_name, file, line);
643 printf("first acquired @ %s:%d\n", lock1->li_file,
644 lock1->li_line);
645 panic("recurse");
646 }
647 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
648 td->td_proc->p_pid, lock->lo_name,
649 lock1->li_flags & LI_RECURSEMASK);
650 lock1->li_file = file;
651 lock1->li_line = line;
652 return;
653 }
654
655 /*
656 * Try locks do not block if they fail to acquire the lock, thus
657 * there is no danger of deadlocks or of switching while holding a
658 * spin lock if we acquire a lock via a try operation.
659 */
660 if (flags & LOP_TRYLOCK)
661 goto out;
662
663 /*
664 * Check for duplicate locks of the same type. Note that we only
665 * have to check for this on the last lock we just acquired. Any
666 * other cases will be caught as lock order violations.
667 */
668 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
669 w1 = lock1->li_lock->lo_witness;
670 if (w1 == w) {
671 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
672 goto out;
673 w->w_same_squawked = 1;
674 printf("acquiring duplicate lock of same type: \"%s\"\n",
675 lock->lo_type);
676 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
677 lock1->li_file, lock1->li_line);
678 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
679#ifdef DDB
680 go_into_ddb = 1;
681#endif
682 goto out;
683 }
684 MPASS(!mtx_owned(&w_mtx));
685 mtx_lock_spin(&w_mtx);
686 /*
687 * If we have a known higher number just say ok
688 */
689 if (witness_watch > 1 && w->w_level > w1->w_level) {
690 mtx_unlock_spin(&w_mtx);
691 goto out;
692 }
693 /*
694 * If we know that the the lock we are acquiring comes after
695 * the lock we most recently acquired in the lock order tree,
696 * then there is no need for any further checks.
697 */
698 if (isitmydescendant(w1, w)) {
699 mtx_unlock_spin(&w_mtx);
700 goto out;
701 }
702 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
703 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
704
705 MPASS(j < WITNESS_COUNT);
706 lock1 = &lle->ll_children[i];
707 w1 = lock1->li_lock->lo_witness;
708
709 /*
710 * If this lock doesn't undergo witness checking,
711 * then skip it.
712 */
713 if (w1 == NULL) {
714 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
715 ("lock missing witness structure"));
716 continue;
717 }
718 /*
719 * If we are locking Giant and this is a sleepable
720 * lock, then skip it.
721 */
722 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
723 lock == &Giant.mtx_object)
724 continue;
725 /*
726 * If we are locking a sleepable lock and this lock
727 * is Giant, then skip it.
728 */
729 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
730 lock1->li_lock == &Giant.mtx_object)
731 continue;
732 /*
733 * If we are locking a sleepable lock and this lock
734 * isn't sleepable, we want to treat it as a lock
735 * order violation to enfore a general lock order of
736 * sleepable locks before non-sleepable locks.
737 */
738 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 &&
739 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
740 /*
741 * Check the lock order hierarchy for a reveresal.
742 */
743 if (!isitmydescendant(w, w1))
744 continue;
745 /*
746 * We have a lock order violation, check to see if it
747 * is allowed or has already been yelled about.
748 */
749 mtx_unlock_spin(&w_mtx);
750#ifdef BLESSING
751 if (blessed(w, w1))
752 goto out;
753#endif
754 if (lock1->li_lock == &Giant.mtx_object) {
755 if (w1->w_Giant_squawked)
756 goto out;
757 else
758 w1->w_Giant_squawked = 1;
759 } else {
760 if (w1->w_other_squawked)
761 goto out;
762 else
763 w1->w_other_squawked = 1;
764 }
765 /*
766 * Ok, yell about it.
767 */
768 printf("lock order reversal\n");
769 /*
770 * Try to locate an earlier lock with
771 * witness w in our list.
772 */
773 do {
774 lock2 = &lle->ll_children[i];
775 MPASS(lock2->li_lock != NULL);
776 if (lock2->li_lock->lo_witness == w)
777 break;
778 i--;
779 if (i == 0 && lle->ll_next != NULL) {
780 lle = lle->ll_next;
781 i = lle->ll_count - 1;
782 MPASS(i >= 0 && i < LOCK_NCHILDREN);
783 }
784 } while (i >= 0);
785 if (i < 0) {
786 printf(" 1st %p %s (%s) @ %s:%d\n",
787 lock1->li_lock, lock1->li_lock->lo_name,
788 lock1->li_lock->lo_type, lock1->li_file,
789 lock1->li_line);
790 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
791 lock->lo_name, lock->lo_type, file, line);
792 } else {
793 printf(" 1st %p %s (%s) @ %s:%d\n",
794 lock2->li_lock, lock2->li_lock->lo_name,
795 lock2->li_lock->lo_type, lock2->li_file,
796 lock2->li_line);
797 printf(" 2nd %p %s (%s) @ %s:%d\n",
798 lock1->li_lock, lock1->li_lock->lo_name,
799 lock1->li_lock->lo_type, lock1->li_file,
800 lock1->li_line);
801 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
802 lock->lo_name, lock->lo_type, file, line);
803 }
804#ifdef DDB
805 go_into_ddb = 1;
806#endif
807 goto out;
808 }
809 }
810 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
811 /*
812 * Don't build a new relationship between a sleepable lock and
813 * Giant if it is the wrong direction. The real lock order is that
814 * sleepable locks come before Giant.
815 */
816 if (!(lock1->li_lock == &Giant.mtx_object &&
817 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
818 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
819 lock->lo_type, lock1->li_lock->lo_type);
820 if (!itismychild(lock1->li_lock->lo_witness, w))
821 /* Witness is dead. */
822 return;
823 }
824 mtx_unlock_spin(&w_mtx);
825
826out:
827#ifdef DDB
828 if (go_into_ddb) {
829 if (witness_trace)
830 backtrace();
831 if (witness_ddb)
832 Debugger(__func__);
833 }
834#endif
835 w->w_file = file;
836 w->w_line = line;
837
838 lle = *lock_list;
839 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
840 lle = witness_lock_list_get();
841 if (lle == NULL)
842 return;
843 lle->ll_next = *lock_list;
844 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
845 td->td_proc->p_pid, lle);
846 *lock_list = lle;
847 }
848 lock1 = &lle->ll_children[lle->ll_count++];
849 lock1->li_lock = lock;
850 lock1->li_line = line;
851 lock1->li_file = file;
852 if ((flags & LOP_EXCLUSIVE) != 0)
853 lock1->li_flags = LI_EXCLUSIVE;
854 else
855 lock1->li_flags = 0;
856 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
857 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
858}
859
860void
861witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
862{
863 struct lock_instance *instance;
864 struct lock_class *class;
865
866 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
867 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
868 return;
869 class = lock->lo_class;
870 file = fixup_filename(file);
871 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
872 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
873 class->lc_name, lock->lo_name, file, line);
874 if ((flags & LOP_TRYLOCK) == 0)
875 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
876 lock->lo_name, file, line);
877 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
878 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
879 class->lc_name, lock->lo_name, file, line);
880 instance = find_instance(curthread->td_sleeplocks, lock);
881 if (instance == NULL)
882 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
883 class->lc_name, lock->lo_name, file, line);
884 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
885 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
886 class->lc_name, lock->lo_name, file, line);
887 if ((instance->li_flags & LI_RECURSEMASK) != 0)
888 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
889 class->lc_name, lock->lo_name,
890 instance->li_flags & LI_RECURSEMASK, file, line);
891 instance->li_flags |= LI_EXCLUSIVE;
892}
893
894void
895witness_downgrade(struct lock_object *lock, int flags, const char *file,
896 int line)
897{
898 struct lock_instance *instance;
899 struct lock_class *class;
900
901 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
902 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
903 return;
904 class = lock->lo_class;
905 file = fixup_filename(file);
906 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
907 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
908 class->lc_name, lock->lo_name, file, line);
909 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
910 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
911 class->lc_name, lock->lo_name, file, line);
912 instance = find_instance(curthread->td_sleeplocks, lock);
913 if (instance == NULL)
914 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
915 class->lc_name, lock->lo_name, file, line);
916 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
917 panic("downgrade of shared lock (%s) %s @ %s:%d",
918 class->lc_name, lock->lo_name, file, line);
919 if ((instance->li_flags & LI_RECURSEMASK) != 0)
920 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
921 class->lc_name, lock->lo_name,
922 instance->li_flags & LI_RECURSEMASK, file, line);
923 instance->li_flags &= ~LI_EXCLUSIVE;
924}
925
926void
927witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
928{
929 struct lock_list_entry **lock_list, *lle;
930 struct lock_instance *instance;
931 struct lock_class *class;
932 struct thread *td;
933 register_t s;
934 int i, j;
935
936 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
937 panicstr != NULL)
938 return;
939 td = curthread;
940 class = lock->lo_class;
941 file = fixup_filename(file);
942 if (class->lc_flags & LC_SLEEPLOCK)
943 lock_list = &td->td_sleeplocks;
944 else
945 lock_list = PCPU_PTR(spinlocks);
946 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
947 for (i = 0; i < (*lock_list)->ll_count; i++) {
948 instance = &(*lock_list)->ll_children[i];
949 if (instance->li_lock == lock) {
950 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
951 (flags & LOP_EXCLUSIVE) == 0) {
952 printf(
953 "shared unlock of (%s) %s @ %s:%d\n",
954 class->lc_name, lock->lo_name,
955 file, line);
956 printf(
957 "while exclusively locked from %s:%d\n",
958 instance->li_file,
959 instance->li_line);
960 panic("excl->ushare");
961 }
962 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
963 (flags & LOP_EXCLUSIVE) != 0) {
964 printf(
965 "exclusive unlock of (%s) %s @ %s:%d\n",
966 class->lc_name, lock->lo_name,
967 file, line);
968 printf(
969 "while share locked from %s:%d\n",
970 instance->li_file,
971 instance->li_line);
972 panic("share->uexcl");
973 }
974 /* If we are recursed, unrecurse. */
975 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
976 CTR4(KTR_WITNESS,
977 "%s: pid %d unrecursed on %s r=%d", __func__,
978 td->td_proc->p_pid,
979 instance->li_lock->lo_name,
980 instance->li_flags);
981 instance->li_flags--;
982 return;
983 }
984 s = intr_disable();
985 CTR4(KTR_WITNESS,
986 "%s: pid %d removed %s from lle[%d]", __func__,
987 td->td_proc->p_pid,
988 instance->li_lock->lo_name,
989 (*lock_list)->ll_count - 1);
990 for (j = i; j < (*lock_list)->ll_count - 1; j++)
991 (*lock_list)->ll_children[j] =
992 (*lock_list)->ll_children[j + 1];
993 (*lock_list)->ll_count--;
994 intr_restore(s);
995 if ((*lock_list)->ll_count == 0) {
996 lle = *lock_list;
997 *lock_list = lle->ll_next;
998 CTR3(KTR_WITNESS,
999 "%s: pid %d removed lle %p", __func__,
1000 td->td_proc->p_pid, lle);
1001 witness_lock_list_free(lle);
1002 }
1003 return;
1004 }
1005 }
1006 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1007 file, line);
1008}
1009
1010/*
1011 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1012 * exempt Giant and sleepable locks from the checks as well. If any
1013 * non-exempt locks are held, then a supplied message is printed to the
1014 * console along with a list of the offending locks. If indicated in the
1015 * flags then a failure results in a panic as well.
1016 */
1017int
1018witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1019{
1020 struct lock_list_entry *lle;
1021 struct lock_instance *lock1;
1022 struct thread *td;
1023 va_list ap;
1024 int i, n;
1025
1026 if (witness_cold || witness_watch == 0 || panicstr != NULL)
1027 return (0);
1028 n = 0;
1029 td = curthread;
1030 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1031 for (i = lle->ll_count - 1; i >= 0; i--) {
1032 lock1 = &lle->ll_children[i];
1033 if (lock1->li_lock == lock)
1034 continue;
1035 if (flags & WARN_GIANTOK &&
1036 lock1->li_lock == &Giant.mtx_object)
1037 continue;
1038 if (flags & WARN_SLEEPOK &&
1039 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1040 continue;
1041 if (n == 0) {
1042 va_start(ap, fmt);
1043 vprintf(fmt, ap);
1044 va_end(ap);
1045 printf(" with the following");
1046 if (flags & WARN_SLEEPOK)
1047 printf(" non-sleepable");
1048 printf("locks held:\n");
1049 }
1050 n++;
1051 witness_list_lock(lock1);
1052 }
1053 if (PCPU_GET(spinlocks) != NULL) {
1054 /*
1055 * Since we already hold a spinlock preemption is
1056 * already blocked.
1057 */
1058 if (n == 0) {
1059 va_start(ap, fmt);
1060 vprintf(fmt, ap);
1061 va_end(ap);
1062 printf(" with the following");
1063 if (flags & WARN_SLEEPOK)
1064 printf(" non-sleepable");
1065 printf("locks held:\n");
1066 }
1067 n += witness_list_locks(PCPU_PTR(spinlocks));
1068 }
1069 if (flags & WARN_PANIC && n)
1070 panic("witness_warn");
1071#ifdef DDB
1072 else if (witness_ddb && n)
1073 Debugger(__func__);
1074#endif
1075 return (n);
1076}
1077
1078const char *
1079witness_file(struct lock_object *lock)
1080{
1081 struct witness *w;
1082
1083 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1084 return ("?");
1085 w = lock->lo_witness;
1086 return (w->w_file);
1087}
1088
1089int
1090witness_line(struct lock_object *lock)
1091{
1092 struct witness *w;
1093
1094 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1095 return (0);
1096 w = lock->lo_witness;
1097 return (w->w_line);
1098}
1099
1100static struct witness *
1101enroll(const char *description, struct lock_class *lock_class)
1102{
1103 struct witness *w;
1104
1105 if (!witness_watch || witness_watch == 0 || panicstr != NULL)
1106 return (NULL);
1107 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1108 return (NULL);
1109 mtx_lock_spin(&w_mtx);
1110 STAILQ_FOREACH(w, &w_all, w_list) {
1111 if (w->w_name == description || (w->w_refcount > 0 &&
1112 strcmp(description, w->w_name) == 0)) {
1113 w->w_refcount++;
1114 mtx_unlock_spin(&w_mtx);
1115 if (lock_class != w->w_class)
1116 panic(
1117 "lock (%s) %s does not match earlier (%s) lock",
1118 description, lock_class->lc_name,
1119 w->w_class->lc_name);
1120 return (w);
1121 }
1122 }
1123 /*
1124 * This isn't quite right, as witness_cold is still 0 while we
1125 * enroll all the locks initialized before witness_initialize().
1126 */
1127 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) {
1128 mtx_unlock_spin(&w_mtx);
1129 panic("spin lock %s not in order list", description);
1130 }
1131 if ((w = witness_get()) == NULL)
1132 return (NULL);
1133 w->w_name = description;
1134 w->w_class = lock_class;
1135 w->w_refcount = 1;
1136 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1137 if (lock_class->lc_flags & LC_SPINLOCK)
1138 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1139 else if (lock_class->lc_flags & LC_SLEEPLOCK)
1140 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1141 else {
1142 mtx_unlock_spin(&w_mtx);
1143 panic("lock class %s is not sleep or spin",
1144 lock_class->lc_name);
1145 }
1146 mtx_unlock_spin(&w_mtx);
1147 return (w);
1148}
1149
1150/* Don't let the door bang you on the way out... */
1151static int
1152depart(struct witness *w)
1153{
1154 struct witness_child_list_entry *wcl, *nwcl;
1155 struct witness_list *list;
1156 struct witness *parent;
1157
1158 MPASS(w->w_refcount == 0);
1159 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1160 list = &w_sleep;
1161 else
1162 list = &w_spin;
1163 /*
1164 * First, we run through the entire tree looking for any
1165 * witnesses that the outgoing witness is a child of. For
1166 * each parent that we find, we reparent all the direct
1167 * children of the outgoing witness to its parent.
1168 */
1169 STAILQ_FOREACH(parent, list, w_typelist) {
1170 if (!isitmychild(parent, w))
1171 continue;
1172 removechild(parent, w);
1173 if (!reparentchildren(parent, w))
1174 return (0);
1175 }
1176
1177 /*
1178 * Now we go through and free up the child list of the
1179 * outgoing witness.
1180 */
1181 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1182 nwcl = wcl->wcl_next;
1183 witness_child_free(wcl);
1184 }
1185
1186 /*
1187 * Detach from various lists and free.
1188 */
1189 STAILQ_REMOVE(list, w, witness, w_typelist);
1190 STAILQ_REMOVE(&w_all, w, witness, w_list);
1191 witness_free(w);
1192
1193 /* Finally, fixup the tree. */
1194 return (rebalancetree(list));
1195}
1196
1197/*
1198 * Prune an entire lock order tree. We look for cases where a lock
1199 * is now both a descendant and a direct child of a given lock. In
1200 * that case, we want to remove the direct child link from the tree.
1201 *
1202 * Returns false if insertchild() fails.
1203 */
1204static int
1205rebalancetree(struct witness_list *list)
1206{
1207 struct witness *child, *parent;
1208
1209 STAILQ_FOREACH(child, list, w_typelist) {
1210 STAILQ_FOREACH(parent, list, w_typelist) {
1211 if (!isitmychild(parent, child))
1212 continue;
1213 removechild(parent, child);
1214 if (isitmydescendant(parent, child))
1215 continue;
1216 if (!insertchild(parent, child))
1217 return (0);
1218 }
1219 }
1220 witness_levelall();
1221 return (1);
1222}
1223
1224/*
1225 * Add "child" as a direct child of "parent". Returns false if
1226 * we fail due to out of memory.
1227 */
1228static int
1229insertchild(struct witness *parent, struct witness *child)
1230{
1231 struct witness_child_list_entry **wcl;
1232
1233 MPASS(child != NULL && parent != NULL);
1234
1235 /*
1236 * Insert "child" after "parent"
1237 */
1238 wcl = &parent->w_children;
1239 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1240 wcl = &(*wcl)->wcl_next;
1241 if (*wcl == NULL) {
1242 *wcl = witness_child_get();
1243 if (*wcl == NULL)
1244 return (0);
1245 }
1246 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1247
1248 return (1);
1249}
1250
1251/*
1252 * Make all the direct descendants of oldparent be direct descendants
1253 * of newparent.
1254 */
1255static int
1256reparentchildren(struct witness *newparent, struct witness *oldparent)
1257{
1258 struct witness_child_list_entry *wcl;
1259 int i;
1260
1261 /* Avoid making a witness a child of itself. */
1262 MPASS(!isitmychild(oldparent, newparent));
1263
1264 for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1265 for (i = 0; i < wcl->wcl_count; i++)
1266 if (!insertchild(newparent, wcl->wcl_children[i]))
1267 return (0);
1268 return (1);
1269}
1270
1271static int
1272itismychild(struct witness *parent, struct witness *child)
1273{
1274 struct witness_list *list;
1275
1276 MPASS(child != NULL && parent != NULL);
1277 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1278 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1279 panic(
1280 "%s: parent (%s) and child (%s) are not the same lock type",
1281 __func__, parent->w_class->lc_name,
1282 child->w_class->lc_name);
1283
1284 if (!insertchild(parent, child))
1285 return (0);
1286
1287 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1288 list = &w_sleep;
1289 else
1290 list = &w_spin;
1291 return (rebalancetree(list));
1292}
1293
1294static void
1295removechild(struct witness *parent, struct witness *child)
1296{
1297 struct witness_child_list_entry **wcl, *wcl1;
1298 int i;
1299
1300 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1301 for (i = 0; i < (*wcl)->wcl_count; i++)
1302 if ((*wcl)->wcl_children[i] == child)
1303 goto found;
1304 return;
1305found:
1306 (*wcl)->wcl_count--;
1307 if ((*wcl)->wcl_count > i)
1308 (*wcl)->wcl_children[i] =
1309 (*wcl)->wcl_children[(*wcl)->wcl_count];
1310 MPASS((*wcl)->wcl_children[i] != NULL);
1311 if ((*wcl)->wcl_count != 0)
1312 return;
1313 wcl1 = *wcl;
1314 *wcl = wcl1->wcl_next;
1315 witness_child_free(wcl1);
1316}
1317
1318static int
1319isitmychild(struct witness *parent, struct witness *child)
1320{
1321 struct witness_child_list_entry *wcl;
1322 int i;
1323
1324 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1325 for (i = 0; i < wcl->wcl_count; i++) {
1326 if (wcl->wcl_children[i] == child)
1327 return (1);
1328 }
1329 }
1330 return (0);
1331}
1332
1333static int
1334isitmydescendant(struct witness *parent, struct witness *child)
1335{
1336 struct witness_child_list_entry *wcl;
1337 int i, j;
1338
1339 if (isitmychild(parent, child))
1340 return (1);
1341 j = 0;
1342 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1343 MPASS(j < 1000);
1344 for (i = 0; i < wcl->wcl_count; i++) {
1345 if (isitmydescendant(wcl->wcl_children[i], child))
1346 return (1);
1347 }
1348 j++;
1349 }
1350 return (0);
1351}
1352
1353static void
1354witness_levelall (void)
1355{
1356 struct witness_list *list;
1357 struct witness *w, *w1;
1358
1359 /*
1360 * First clear all levels.
1361 */
1362 STAILQ_FOREACH(w, &w_all, w_list) {
1363 w->w_level = 0;
1364 }
1365
1366 /*
1367 * Look for locks with no parent and level all their descendants.
1368 */
1369 STAILQ_FOREACH(w, &w_all, w_list) {
1370 /*
1371 * This is just an optimization, technically we could get
1372 * away just walking the all list each time.
1373 */
1374 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1375 list = &w_sleep;
1376 else
1377 list = &w_spin;
1378 STAILQ_FOREACH(w1, list, w_typelist) {
1379 if (isitmychild(w1, w))
1380 goto skip;
1381 }
1382 witness_leveldescendents(w, 0);
1383 skip:
1384 ; /* silence GCC 3.x */
1385 }
1386}
1387
1388static void
1389witness_leveldescendents(struct witness *parent, int level)
1390{
1391 struct witness_child_list_entry *wcl;
1392 int i;
1393
1394 if (parent->w_level < level)
1395 parent->w_level = level;
1396 level++;
1397 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1398 for (i = 0; i < wcl->wcl_count; i++)
1399 witness_leveldescendents(wcl->wcl_children[i], level);
1400}
1401
1402static void
1403witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1404 struct witness *parent, int indent)
1405{
1406 struct witness_child_list_entry *wcl;
1407 int i, level;
1408
1409 level = parent->w_level;
1410 prnt("%-2d", level);
1411 for (i = 0; i < indent; i++)
1412 prnt(" ");
1413 if (parent->w_refcount > 0)
1414 prnt("%s", parent->w_name);
1415 else
1416 prnt("(dead)");
1417 if (parent->w_displayed) {
1418 prnt(" -- (already displayed)\n");
1419 return;
1420 }
1421 parent->w_displayed = 1;
1422 if (parent->w_refcount > 0) {
1423 if (parent->w_file != NULL)
1424 prnt(" -- last acquired @ %s:%d", parent->w_file,
1425 parent->w_line);
1426 }
1427 prnt("\n");
1428 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1429 for (i = 0; i < wcl->wcl_count; i++)
1430 witness_displaydescendants(prnt,
1431 wcl->wcl_children[i], indent + 1);
1432}
1433
1434#ifdef BLESSING
1435static int
1436blessed(struct witness *w1, struct witness *w2)
1437{
1438 int i;
1439 struct witness_blessed *b;
1440
1441 for (i = 0; i < blessed_count; i++) {
1442 b = &blessed_list[i];
1443 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1444 if (strcmp(w2->w_name, b->b_lock2) == 0)
1445 return (1);
1446 continue;
1447 }
1448 if (strcmp(w1->w_name, b->b_lock2) == 0)
1449 if (strcmp(w2->w_name, b->b_lock1) == 0)
1450 return (1);
1451 }
1452 return (0);
1453}
1454#endif
1455
1456static struct witness *
1457witness_get(void)
1458{
1459 struct witness *w;
1460
1461 if (witness_watch == 0) {
1462 mtx_unlock_spin(&w_mtx);
1463 return (NULL);
1464 }
1465 if (STAILQ_EMPTY(&w_free)) {
1466 witness_watch = 0;
1467 mtx_unlock_spin(&w_mtx);
1468 printf("%s: witness exhausted\n", __func__);
1469 return (NULL);
1470 }
1471 w = STAILQ_FIRST(&w_free);
1472 STAILQ_REMOVE_HEAD(&w_free, w_list);
1473 bzero(w, sizeof(*w));
1474 return (w);
1475}
1476
1477static void
1478witness_free(struct witness *w)
1479{
1480
1481 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1482}
1483
1484static struct witness_child_list_entry *
1485witness_child_get(void)
1486{
1487 struct witness_child_list_entry *wcl;
1488
1489 if (witness_watch == 0) {
1490 mtx_unlock_spin(&w_mtx);
1491 return (NULL);
1492 }
1493 wcl = w_child_free;
1494 if (wcl == NULL) {
1495 witness_watch = 0;
1496 mtx_unlock_spin(&w_mtx);
1497 printf("%s: witness exhausted\n", __func__);
1498 return (NULL);
1499 }
1500 w_child_free = wcl->wcl_next;
1501 bzero(wcl, sizeof(*wcl));
1502 return (wcl);
1503}
1504
1505static void
1506witness_child_free(struct witness_child_list_entry *wcl)
1507{
1508
1509 wcl->wcl_next = w_child_free;
1510 w_child_free = wcl;
1511}
1512
1513static struct lock_list_entry *
1514witness_lock_list_get(void)
1515{
1516 struct lock_list_entry *lle;
1517
1518 if (witness_watch == 0)
1519 return (NULL);
1520 mtx_lock_spin(&w_mtx);
1521 lle = w_lock_list_free;
1522 if (lle == NULL) {
1523 witness_watch = 0;
1524 mtx_unlock_spin(&w_mtx);
1525 printf("%s: witness exhausted\n", __func__);
1526 return (NULL);
1527 }
1528 w_lock_list_free = lle->ll_next;
1529 mtx_unlock_spin(&w_mtx);
1530 bzero(lle, sizeof(*lle));
1531 return (lle);
1532}
1533
1534static void
1535witness_lock_list_free(struct lock_list_entry *lle)
1536{
1537
1538 mtx_lock_spin(&w_mtx);
1539 lle->ll_next = w_lock_list_free;
1540 w_lock_list_free = lle;
1541 mtx_unlock_spin(&w_mtx);
1542}
1543
1544static struct lock_instance *
1545find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1546{
1547 struct lock_list_entry *lle;
1548 struct lock_instance *instance;
1549 int i;
1550
1551 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1552 for (i = lle->ll_count - 1; i >= 0; i--) {
1553 instance = &lle->ll_children[i];
1554 if (instance->li_lock == lock)
1555 return (instance);
1556 }
1557 return (NULL);
1558}
1559
1560static void
1561witness_list_lock(struct lock_instance *instance)
1562{
1563 struct lock_object *lock;
1564
1565 lock = instance->li_lock;
1566 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1567 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
1568 if (lock->lo_type != lock->lo_name)
1569 printf(" (%s)", lock->lo_type);
1570 printf(" r = %d (%p) locked @ %s:%d\n",
1571 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1572 instance->li_line);
1573}
1574
1575int
1576witness_list_locks(struct lock_list_entry **lock_list)
1577{
1578 struct lock_list_entry *lle;
1579 int i, nheld;
1580
1581 nheld = 0;
1582 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1583 for (i = lle->ll_count - 1; i >= 0; i--) {
1584 witness_list_lock(&lle->ll_children[i]);
1585 nheld++;
1586 }
1587 return (nheld);
1588}
1589
1590void
1591witness_save(struct lock_object *lock, const char **filep, int *linep)
1592{
1593 struct lock_instance *instance;
1594
1595 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1596 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1597 return;
1598 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1599 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1600 lock->lo_class->lc_name, lock->lo_name);
1601 instance = find_instance(curthread->td_sleeplocks, lock);
1602 if (instance == NULL)
1603 panic("%s: lock (%s) %s not locked", __func__,
1604 lock->lo_class->lc_name, lock->lo_name);
1605 *filep = instance->li_file;
1606 *linep = instance->li_line;
1607}
1608
1609void
1610witness_restore(struct lock_object *lock, const char *file, int line)
1611{
1612 struct lock_instance *instance;
1613
1614 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1615 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1616 return;
1617 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1618 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1619 lock->lo_class->lc_name, lock->lo_name);
1620 instance = find_instance(curthread->td_sleeplocks, lock);
1621 if (instance == NULL)
1622 panic("%s: lock (%s) %s not locked", __func__,
1623 lock->lo_class->lc_name, lock->lo_name);
1624 lock->lo_witness->w_file = file;
1625 lock->lo_witness->w_line = line;
1626 instance->li_file = file;
1627 instance->li_line = line;
1628}
1629
1630void
1631witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1632{
1633#ifdef INVARIANT_SUPPORT
1634 struct lock_instance *instance;
1635
1636 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1637 return;
1638 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1639 instance = find_instance(curthread->td_sleeplocks, lock);
1640 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1641 instance = find_instance(PCPU_GET(spinlocks), lock);
1642 else {
1643 panic("Lock (%s) %s is not sleep or spin!",
1644 lock->lo_class->lc_name, lock->lo_name);
1645 return;
1646 }
1647 file = fixup_filename(file);
1648 switch (flags) {
1649 case LA_UNLOCKED:
1650 if (instance != NULL)
1651 panic("Lock (%s) %s locked @ %s:%d.",
1652 lock->lo_class->lc_name, lock->lo_name, file, line);
1653 break;
1654 case LA_LOCKED:
1655 case LA_LOCKED | LA_RECURSED:
1656 case LA_LOCKED | LA_NOTRECURSED:
1657 case LA_SLOCKED:
1658 case LA_SLOCKED | LA_RECURSED:
1659 case LA_SLOCKED | LA_NOTRECURSED:
1660 case LA_XLOCKED:
1661 case LA_XLOCKED | LA_RECURSED:
1662 case LA_XLOCKED | LA_NOTRECURSED:
1663 if (instance == NULL) {
1664 panic("Lock (%s) %s not locked @ %s:%d.",
1665 lock->lo_class->lc_name, lock->lo_name, file, line);
1666 break;
1667 }
1668 if ((flags & LA_XLOCKED) != 0 &&
1669 (instance->li_flags & LI_EXCLUSIVE) == 0)
1670 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1671 lock->lo_class->lc_name, lock->lo_name, file, line);
1672 if ((flags & LA_SLOCKED) != 0 &&
1673 (instance->li_flags & LI_EXCLUSIVE) != 0)
1674 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1675 lock->lo_class->lc_name, lock->lo_name, file, line);
1676 if ((flags & LA_RECURSED) != 0 &&
1677 (instance->li_flags & LI_RECURSEMASK) == 0)
1678 panic("Lock (%s) %s not recursed @ %s:%d.",
1679 lock->lo_class->lc_name, lock->lo_name, file, line);
1680 if ((flags & LA_NOTRECURSED) != 0 &&
1681 (instance->li_flags & LI_RECURSEMASK) != 0)
1682 panic("Lock (%s) %s recursed @ %s:%d.",
1683 lock->lo_class->lc_name, lock->lo_name, file, line);
1684 break;
1685 default:
1686 panic("Invalid lock assertion at %s:%d.", file, line);
1687
1688 }
1689#endif /* INVARIANT_SUPPORT */
1690}
1691
1692#ifdef DDB
1693static void
1694witness_list(struct thread *td)
1695{
1696
1697 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1698 KASSERT(db_active, ("%s: not in the debugger", __func__));
1699
1700 if (witness_watch == 0)
1701 return;
1702
1703 witness_list_locks(&td->td_sleeplocks);
1704
1705 /*
1706 * We only handle spinlocks if td == curthread. This is somewhat broken
1707 * if td is currently executing on some other CPU and holds spin locks
1708 * as we won't display those locks. If we had a MI way of getting
1709 * the per-cpu data for a given cpu then we could use
1710 * td->td_kse->ke_oncpu to get the list of spinlocks for this thread
1711 * and "fix" this.
1712 *
1713 * That still wouldn't really fix this unless we locked sched_lock
1714 * or stopped the other CPU to make sure it wasn't changing the list
1715 * out from under us. It is probably best to just not try to handle
1716 * threads on other CPU's for now.
1717 */
1718 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1719 witness_list_locks(PCPU_PTR(spinlocks));
1720}
1721
1722DB_SHOW_COMMAND(locks, db_witness_list)
1723{
1724 struct thread *td;
1725 pid_t pid;
1726 struct proc *p;
1727
1728 if (have_addr) {
1729 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1730 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1731 ((addr >> 16) % 16) * 10000;
1732 /* sx_slock(&allproc_lock); */
1733 FOREACH_PROC_IN_SYSTEM(p) {
1734 if (p->p_pid == pid)
1735 break;
1736 }
1737 /* sx_sunlock(&allproc_lock); */
1738 if (p == NULL) {
1739 db_printf("pid %d not found\n", pid);
1740 return;
1741 }
1742 FOREACH_THREAD_IN_PROC(p, td) {
1743 witness_list(td);
1744 }
1745 } else {
1746 td = curthread;
1747 witness_list(td);
1748 }
1749}
1750
1751DB_SHOW_COMMAND(witness, db_witness_display)
1752{
1753
1754 witness_display(db_printf);
1755}
1756#endif
304#ifdef __sparc64__
305 { "ipi", &lock_class_mtx_spin },
306#endif
307#endif
308 { "clk", &lock_class_mtx_spin },
309 { "mutex profiling lock", &lock_class_mtx_spin },
310 { "kse zombie lock", &lock_class_mtx_spin },
311 { "ALD Queue", &lock_class_mtx_spin },
312#ifdef __ia64__
313 { "MCA spin lock", &lock_class_mtx_spin },
314#endif
315#ifdef __i386__
316 { "pcicfg", &lock_class_mtx_spin },
317#endif
318 { NULL, NULL },
319 { NULL, NULL }
320};
321
322#ifdef BLESSING
323/*
324 * Pairs of locks which have been blessed
325 * Don't complain about order problems with blessed locks
326 */
327static struct witness_blessed blessed_list[] = {
328};
329static int blessed_count =
330 sizeof(blessed_list) / sizeof(struct witness_blessed);
331#endif
332
333/*
334 * List of all locks in the system.
335 */
336TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
337
338static struct mtx all_mtx = {
339 { &lock_class_mtx_sleep, /* mtx_object.lo_class */
340 "All locks list", /* mtx_object.lo_name */
341 "All locks list", /* mtx_object.lo_type */
342 LO_INITIALIZED, /* mtx_object.lo_flags */
343 { NULL, NULL }, /* mtx_object.lo_list */
344 NULL }, /* mtx_object.lo_witness */
345 MTX_UNOWNED, 0, /* mtx_lock, mtx_recurse */
346 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
347 { NULL, NULL } /* mtx_contested */
348};
349
350/*
351 * This global is set to 0 once it becomes safe to use the witness code.
352 */
353static int witness_cold = 1;
354
355/*
356 * Global variables for book keeping.
357 */
358static int lock_cur_cnt;
359static int lock_max_cnt;
360
361/*
362 * The WITNESS-enabled diagnostic code.
363 */
364static void
365witness_initialize(void *dummy __unused)
366{
367 struct lock_object *lock;
368 struct witness_order_list_entry *order;
369 struct witness *w, *w1;
370 int i;
371
372 /*
373 * We have to release Giant before initializing its witness
374 * structure so that WITNESS doesn't get confused.
375 */
376 mtx_unlock(&Giant);
377 mtx_assert(&Giant, MA_NOTOWNED);
378
379 CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
380 TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
381 mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
382 MTX_NOWITNESS);
383 for (i = 0; i < WITNESS_COUNT; i++)
384 witness_free(&w_data[i]);
385 for (i = 0; i < WITNESS_CHILDCOUNT; i++)
386 witness_child_free(&w_childdata[i]);
387 for (i = 0; i < LOCK_CHILDCOUNT; i++)
388 witness_lock_list_free(&w_locklistdata[i]);
389
390 /* First add in all the specified order lists. */
391 for (order = order_lists; order->w_name != NULL; order++) {
392 w = enroll(order->w_name, order->w_class);
393 if (w == NULL)
394 continue;
395 w->w_file = "order list";
396 for (order++; order->w_name != NULL; order++) {
397 w1 = enroll(order->w_name, order->w_class);
398 if (w1 == NULL)
399 continue;
400 w1->w_file = "order list";
401 if (!itismychild(w, w1))
402 panic("Not enough memory for static orders!");
403 w = w1;
404 }
405 }
406
407 /* Iterate through all locks and add them to witness. */
408 mtx_lock(&all_mtx);
409 TAILQ_FOREACH(lock, &all_locks, lo_list) {
410 if (lock->lo_flags & LO_WITNESS)
411 lock->lo_witness = enroll(lock->lo_type,
412 lock->lo_class);
413 else
414 lock->lo_witness = NULL;
415 }
416 mtx_unlock(&all_mtx);
417
418 /* Mark the witness code as being ready for use. */
419 atomic_store_rel_int(&witness_cold, 0);
420
421 mtx_lock(&Giant);
422}
423SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
424
425static int
426sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
427{
428 int error, value;
429
430 value = witness_watch;
431 error = sysctl_handle_int(oidp, &value, 0, req);
432 if (error != 0 || req->newptr == NULL)
433 return (error);
434 error = suser(req->td);
435 if (error != 0)
436 return (error);
437 if (value == witness_watch)
438 return (0);
439 if (value != 0)
440 return (EINVAL);
441 witness_watch = 0;
442 return (0);
443}
444
445void
446witness_init(struct lock_object *lock)
447{
448 struct lock_class *class;
449
450 class = lock->lo_class;
451 if (lock->lo_flags & LO_INITIALIZED)
452 panic("%s: lock (%s) %s is already initialized", __func__,
453 class->lc_name, lock->lo_name);
454 if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
455 (class->lc_flags & LC_RECURSABLE) == 0)
456 panic("%s: lock (%s) %s can not be recursable", __func__,
457 class->lc_name, lock->lo_name);
458 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
459 (class->lc_flags & LC_SLEEPABLE) == 0)
460 panic("%s: lock (%s) %s can not be sleepable", __func__,
461 class->lc_name, lock->lo_name);
462 if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
463 (class->lc_flags & LC_UPGRADABLE) == 0)
464 panic("%s: lock (%s) %s can not be upgradable", __func__,
465 class->lc_name, lock->lo_name);
466
467 mtx_lock(&all_mtx);
468 TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
469 lock->lo_flags |= LO_INITIALIZED;
470 lock_cur_cnt++;
471 if (lock_cur_cnt > lock_max_cnt)
472 lock_max_cnt = lock_cur_cnt;
473 mtx_unlock(&all_mtx);
474 if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
475 (lock->lo_flags & LO_WITNESS) != 0)
476 lock->lo_witness = enroll(lock->lo_type, class);
477 else
478 lock->lo_witness = NULL;
479}
480
481void
482witness_destroy(struct lock_object *lock)
483{
484 struct witness *w;
485
486 if (witness_cold)
487 panic("lock (%s) %s destroyed while witness_cold",
488 lock->lo_class->lc_name, lock->lo_name);
489 if ((lock->lo_flags & LO_INITIALIZED) == 0)
490 panic("%s: lock (%s) %s is not initialized", __func__,
491 lock->lo_class->lc_name, lock->lo_name);
492
493 /* XXX: need to verify that no one holds the lock */
494 w = lock->lo_witness;
495 if (w != NULL) {
496 mtx_lock_spin(&w_mtx);
497 MPASS(w->w_refcount > 0);
498 w->w_refcount--;
499
500 /*
501 * Lock is already released if we have an allocation failure
502 * and depart() fails.
503 */
504 if (w->w_refcount != 0 || depart(w))
505 mtx_unlock_spin(&w_mtx);
506 }
507
508 mtx_lock(&all_mtx);
509 lock_cur_cnt--;
510 TAILQ_REMOVE(&all_locks, lock, lo_list);
511 lock->lo_flags &= ~LO_INITIALIZED;
512 mtx_unlock(&all_mtx);
513}
514
515#ifdef DDB
516static void
517witness_display_list(void(*prnt)(const char *fmt, ...),
518 struct witness_list *list)
519{
520 struct witness *w;
521
522 STAILQ_FOREACH(w, list, w_typelist) {
523 if (w->w_file == NULL || w->w_level > 0)
524 continue;
525 /*
526 * This lock has no anscestors, display its descendants.
527 */
528 witness_displaydescendants(prnt, w, 0);
529 }
530}
531
532static void
533witness_display(void(*prnt)(const char *fmt, ...))
534{
535 struct witness *w;
536
537 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
538 witness_levelall();
539
540 /* Clear all the displayed flags. */
541 STAILQ_FOREACH(w, &w_all, w_list) {
542 w->w_displayed = 0;
543 }
544
545 /*
546 * First, handle sleep locks which have been acquired at least
547 * once.
548 */
549 prnt("Sleep locks:\n");
550 witness_display_list(prnt, &w_sleep);
551
552 /*
553 * Now do spin locks which have been acquired at least once.
554 */
555 prnt("\nSpin locks:\n");
556 witness_display_list(prnt, &w_spin);
557
558 /*
559 * Finally, any locks which have not been acquired yet.
560 */
561 prnt("\nLocks which were never acquired:\n");
562 STAILQ_FOREACH(w, &w_all, w_list) {
563 if (w->w_file != NULL || w->w_refcount == 0)
564 continue;
565 prnt("%s\n", w->w_name);
566 }
567}
568#endif /* DDB */
569
570/* Trim useless garbage from filenames. */
571static const char *
572fixup_filename(const char *file)
573{
574
575 if (file == NULL)
576 return (NULL);
577 while (strncmp(file, "../", 3) == 0)
578 file += 3;
579 return (file);
580}
581
582void
583witness_lock(struct lock_object *lock, int flags, const char *file, int line)
584{
585 struct lock_list_entry **lock_list, *lle;
586 struct lock_instance *lock1, *lock2;
587 struct lock_class *class;
588 struct witness *w, *w1;
589 struct thread *td;
590 int i, j;
591#ifdef DDB
592 int go_into_ddb = 0;
593#endif
594
595 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
596 panicstr != NULL)
597 return;
598 w = lock->lo_witness;
599 class = lock->lo_class;
600 td = curthread;
601 file = fixup_filename(file);
602
603 if (class->lc_flags & LC_SLEEPLOCK) {
604 /*
605 * Since spin locks include a critical section, this check
606 * impliclty enforces a lock order of all sleep locks before
607 * all spin locks.
608 */
609 if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0)
610 panic("blockable sleep lock (%s) %s @ %s:%d",
611 class->lc_name, lock->lo_name, file, line);
612 lock_list = &td->td_sleeplocks;
613 } else
614 lock_list = PCPU_PTR(spinlocks);
615
616 /*
617 * Is this the first lock acquired? If so, then no order checking
618 * is needed.
619 */
620 if (*lock_list == NULL)
621 goto out;
622
623 /*
624 * Check to see if we are recursing on a lock we already own.
625 */
626 lock1 = find_instance(*lock_list, lock);
627 if (lock1 != NULL) {
628 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
629 (flags & LOP_EXCLUSIVE) == 0) {
630 printf("shared lock of (%s) %s @ %s:%d\n",
631 class->lc_name, lock->lo_name, file, line);
632 printf("while exclusively locked from %s:%d\n",
633 lock1->li_file, lock1->li_line);
634 panic("share->excl");
635 }
636 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
637 (flags & LOP_EXCLUSIVE) != 0) {
638 printf("exclusive lock of (%s) %s @ %s:%d\n",
639 class->lc_name, lock->lo_name, file, line);
640 printf("while share locked from %s:%d\n",
641 lock1->li_file, lock1->li_line);
642 panic("excl->share");
643 }
644 lock1->li_flags++;
645 if ((lock->lo_flags & LO_RECURSABLE) == 0) {
646 printf(
647 "recursed on non-recursive lock (%s) %s @ %s:%d\n",
648 class->lc_name, lock->lo_name, file, line);
649 printf("first acquired @ %s:%d\n", lock1->li_file,
650 lock1->li_line);
651 panic("recurse");
652 }
653 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
654 td->td_proc->p_pid, lock->lo_name,
655 lock1->li_flags & LI_RECURSEMASK);
656 lock1->li_file = file;
657 lock1->li_line = line;
658 return;
659 }
660
661 /*
662 * Try locks do not block if they fail to acquire the lock, thus
663 * there is no danger of deadlocks or of switching while holding a
664 * spin lock if we acquire a lock via a try operation.
665 */
666 if (flags & LOP_TRYLOCK)
667 goto out;
668
669 /*
670 * Check for duplicate locks of the same type. Note that we only
671 * have to check for this on the last lock we just acquired. Any
672 * other cases will be caught as lock order violations.
673 */
674 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
675 w1 = lock1->li_lock->lo_witness;
676 if (w1 == w) {
677 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
678 goto out;
679 w->w_same_squawked = 1;
680 printf("acquiring duplicate lock of same type: \"%s\"\n",
681 lock->lo_type);
682 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
683 lock1->li_file, lock1->li_line);
684 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
685#ifdef DDB
686 go_into_ddb = 1;
687#endif
688 goto out;
689 }
690 MPASS(!mtx_owned(&w_mtx));
691 mtx_lock_spin(&w_mtx);
692 /*
693 * If we have a known higher number just say ok
694 */
695 if (witness_watch > 1 && w->w_level > w1->w_level) {
696 mtx_unlock_spin(&w_mtx);
697 goto out;
698 }
699 /*
700 * If we know that the the lock we are acquiring comes after
701 * the lock we most recently acquired in the lock order tree,
702 * then there is no need for any further checks.
703 */
704 if (isitmydescendant(w1, w)) {
705 mtx_unlock_spin(&w_mtx);
706 goto out;
707 }
708 for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
709 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
710
711 MPASS(j < WITNESS_COUNT);
712 lock1 = &lle->ll_children[i];
713 w1 = lock1->li_lock->lo_witness;
714
715 /*
716 * If this lock doesn't undergo witness checking,
717 * then skip it.
718 */
719 if (w1 == NULL) {
720 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
721 ("lock missing witness structure"));
722 continue;
723 }
724 /*
725 * If we are locking Giant and this is a sleepable
726 * lock, then skip it.
727 */
728 if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
729 lock == &Giant.mtx_object)
730 continue;
731 /*
732 * If we are locking a sleepable lock and this lock
733 * is Giant, then skip it.
734 */
735 if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
736 lock1->li_lock == &Giant.mtx_object)
737 continue;
738 /*
739 * If we are locking a sleepable lock and this lock
740 * isn't sleepable, we want to treat it as a lock
741 * order violation to enfore a general lock order of
742 * sleepable locks before non-sleepable locks.
743 */
744 if (!((lock->lo_flags & LO_SLEEPABLE) != 0 &&
745 (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
746 /*
747 * Check the lock order hierarchy for a reveresal.
748 */
749 if (!isitmydescendant(w, w1))
750 continue;
751 /*
752 * We have a lock order violation, check to see if it
753 * is allowed or has already been yelled about.
754 */
755 mtx_unlock_spin(&w_mtx);
756#ifdef BLESSING
757 if (blessed(w, w1))
758 goto out;
759#endif
760 if (lock1->li_lock == &Giant.mtx_object) {
761 if (w1->w_Giant_squawked)
762 goto out;
763 else
764 w1->w_Giant_squawked = 1;
765 } else {
766 if (w1->w_other_squawked)
767 goto out;
768 else
769 w1->w_other_squawked = 1;
770 }
771 /*
772 * Ok, yell about it.
773 */
774 printf("lock order reversal\n");
775 /*
776 * Try to locate an earlier lock with
777 * witness w in our list.
778 */
779 do {
780 lock2 = &lle->ll_children[i];
781 MPASS(lock2->li_lock != NULL);
782 if (lock2->li_lock->lo_witness == w)
783 break;
784 i--;
785 if (i == 0 && lle->ll_next != NULL) {
786 lle = lle->ll_next;
787 i = lle->ll_count - 1;
788 MPASS(i >= 0 && i < LOCK_NCHILDREN);
789 }
790 } while (i >= 0);
791 if (i < 0) {
792 printf(" 1st %p %s (%s) @ %s:%d\n",
793 lock1->li_lock, lock1->li_lock->lo_name,
794 lock1->li_lock->lo_type, lock1->li_file,
795 lock1->li_line);
796 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
797 lock->lo_name, lock->lo_type, file, line);
798 } else {
799 printf(" 1st %p %s (%s) @ %s:%d\n",
800 lock2->li_lock, lock2->li_lock->lo_name,
801 lock2->li_lock->lo_type, lock2->li_file,
802 lock2->li_line);
803 printf(" 2nd %p %s (%s) @ %s:%d\n",
804 lock1->li_lock, lock1->li_lock->lo_name,
805 lock1->li_lock->lo_type, lock1->li_file,
806 lock1->li_line);
807 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
808 lock->lo_name, lock->lo_type, file, line);
809 }
810#ifdef DDB
811 go_into_ddb = 1;
812#endif
813 goto out;
814 }
815 }
816 lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
817 /*
818 * Don't build a new relationship between a sleepable lock and
819 * Giant if it is the wrong direction. The real lock order is that
820 * sleepable locks come before Giant.
821 */
822 if (!(lock1->li_lock == &Giant.mtx_object &&
823 (lock->lo_flags & LO_SLEEPABLE) != 0)) {
824 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
825 lock->lo_type, lock1->li_lock->lo_type);
826 if (!itismychild(lock1->li_lock->lo_witness, w))
827 /* Witness is dead. */
828 return;
829 }
830 mtx_unlock_spin(&w_mtx);
831
832out:
833#ifdef DDB
834 if (go_into_ddb) {
835 if (witness_trace)
836 backtrace();
837 if (witness_ddb)
838 Debugger(__func__);
839 }
840#endif
841 w->w_file = file;
842 w->w_line = line;
843
844 lle = *lock_list;
845 if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
846 lle = witness_lock_list_get();
847 if (lle == NULL)
848 return;
849 lle->ll_next = *lock_list;
850 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
851 td->td_proc->p_pid, lle);
852 *lock_list = lle;
853 }
854 lock1 = &lle->ll_children[lle->ll_count++];
855 lock1->li_lock = lock;
856 lock1->li_line = line;
857 lock1->li_file = file;
858 if ((flags & LOP_EXCLUSIVE) != 0)
859 lock1->li_flags = LI_EXCLUSIVE;
860 else
861 lock1->li_flags = 0;
862 CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
863 td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
864}
865
866void
867witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
868{
869 struct lock_instance *instance;
870 struct lock_class *class;
871
872 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
873 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
874 return;
875 class = lock->lo_class;
876 file = fixup_filename(file);
877 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
878 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
879 class->lc_name, lock->lo_name, file, line);
880 if ((flags & LOP_TRYLOCK) == 0)
881 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
882 lock->lo_name, file, line);
883 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
884 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
885 class->lc_name, lock->lo_name, file, line);
886 instance = find_instance(curthread->td_sleeplocks, lock);
887 if (instance == NULL)
888 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
889 class->lc_name, lock->lo_name, file, line);
890 if ((instance->li_flags & LI_EXCLUSIVE) != 0)
891 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
892 class->lc_name, lock->lo_name, file, line);
893 if ((instance->li_flags & LI_RECURSEMASK) != 0)
894 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
895 class->lc_name, lock->lo_name,
896 instance->li_flags & LI_RECURSEMASK, file, line);
897 instance->li_flags |= LI_EXCLUSIVE;
898}
899
900void
901witness_downgrade(struct lock_object *lock, int flags, const char *file,
902 int line)
903{
904 struct lock_instance *instance;
905 struct lock_class *class;
906
907 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
908 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
909 return;
910 class = lock->lo_class;
911 file = fixup_filename(file);
912 if ((lock->lo_flags & LO_UPGRADABLE) == 0)
913 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
914 class->lc_name, lock->lo_name, file, line);
915 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
916 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
917 class->lc_name, lock->lo_name, file, line);
918 instance = find_instance(curthread->td_sleeplocks, lock);
919 if (instance == NULL)
920 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
921 class->lc_name, lock->lo_name, file, line);
922 if ((instance->li_flags & LI_EXCLUSIVE) == 0)
923 panic("downgrade of shared lock (%s) %s @ %s:%d",
924 class->lc_name, lock->lo_name, file, line);
925 if ((instance->li_flags & LI_RECURSEMASK) != 0)
926 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
927 class->lc_name, lock->lo_name,
928 instance->li_flags & LI_RECURSEMASK, file, line);
929 instance->li_flags &= ~LI_EXCLUSIVE;
930}
931
932void
933witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
934{
935 struct lock_list_entry **lock_list, *lle;
936 struct lock_instance *instance;
937 struct lock_class *class;
938 struct thread *td;
939 register_t s;
940 int i, j;
941
942 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
943 panicstr != NULL)
944 return;
945 td = curthread;
946 class = lock->lo_class;
947 file = fixup_filename(file);
948 if (class->lc_flags & LC_SLEEPLOCK)
949 lock_list = &td->td_sleeplocks;
950 else
951 lock_list = PCPU_PTR(spinlocks);
952 for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
953 for (i = 0; i < (*lock_list)->ll_count; i++) {
954 instance = &(*lock_list)->ll_children[i];
955 if (instance->li_lock == lock) {
956 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
957 (flags & LOP_EXCLUSIVE) == 0) {
958 printf(
959 "shared unlock of (%s) %s @ %s:%d\n",
960 class->lc_name, lock->lo_name,
961 file, line);
962 printf(
963 "while exclusively locked from %s:%d\n",
964 instance->li_file,
965 instance->li_line);
966 panic("excl->ushare");
967 }
968 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
969 (flags & LOP_EXCLUSIVE) != 0) {
970 printf(
971 "exclusive unlock of (%s) %s @ %s:%d\n",
972 class->lc_name, lock->lo_name,
973 file, line);
974 printf(
975 "while share locked from %s:%d\n",
976 instance->li_file,
977 instance->li_line);
978 panic("share->uexcl");
979 }
980 /* If we are recursed, unrecurse. */
981 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
982 CTR4(KTR_WITNESS,
983 "%s: pid %d unrecursed on %s r=%d", __func__,
984 td->td_proc->p_pid,
985 instance->li_lock->lo_name,
986 instance->li_flags);
987 instance->li_flags--;
988 return;
989 }
990 s = intr_disable();
991 CTR4(KTR_WITNESS,
992 "%s: pid %d removed %s from lle[%d]", __func__,
993 td->td_proc->p_pid,
994 instance->li_lock->lo_name,
995 (*lock_list)->ll_count - 1);
996 for (j = i; j < (*lock_list)->ll_count - 1; j++)
997 (*lock_list)->ll_children[j] =
998 (*lock_list)->ll_children[j + 1];
999 (*lock_list)->ll_count--;
1000 intr_restore(s);
1001 if ((*lock_list)->ll_count == 0) {
1002 lle = *lock_list;
1003 *lock_list = lle->ll_next;
1004 CTR3(KTR_WITNESS,
1005 "%s: pid %d removed lle %p", __func__,
1006 td->td_proc->p_pid, lle);
1007 witness_lock_list_free(lle);
1008 }
1009 return;
1010 }
1011 }
1012 panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
1013 file, line);
1014}
1015
1016/*
1017 * Warn if any locks other than 'lock' are held. Flags can be passed in to
1018 * exempt Giant and sleepable locks from the checks as well. If any
1019 * non-exempt locks are held, then a supplied message is printed to the
1020 * console along with a list of the offending locks. If indicated in the
1021 * flags then a failure results in a panic as well.
1022 */
1023int
1024witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
1025{
1026 struct lock_list_entry *lle;
1027 struct lock_instance *lock1;
1028 struct thread *td;
1029 va_list ap;
1030 int i, n;
1031
1032 if (witness_cold || witness_watch == 0 || panicstr != NULL)
1033 return (0);
1034 n = 0;
1035 td = curthread;
1036 for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
1037 for (i = lle->ll_count - 1; i >= 0; i--) {
1038 lock1 = &lle->ll_children[i];
1039 if (lock1->li_lock == lock)
1040 continue;
1041 if (flags & WARN_GIANTOK &&
1042 lock1->li_lock == &Giant.mtx_object)
1043 continue;
1044 if (flags & WARN_SLEEPOK &&
1045 (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
1046 continue;
1047 if (n == 0) {
1048 va_start(ap, fmt);
1049 vprintf(fmt, ap);
1050 va_end(ap);
1051 printf(" with the following");
1052 if (flags & WARN_SLEEPOK)
1053 printf(" non-sleepable");
1054 printf("locks held:\n");
1055 }
1056 n++;
1057 witness_list_lock(lock1);
1058 }
1059 if (PCPU_GET(spinlocks) != NULL) {
1060 /*
1061 * Since we already hold a spinlock preemption is
1062 * already blocked.
1063 */
1064 if (n == 0) {
1065 va_start(ap, fmt);
1066 vprintf(fmt, ap);
1067 va_end(ap);
1068 printf(" with the following");
1069 if (flags & WARN_SLEEPOK)
1070 printf(" non-sleepable");
1071 printf("locks held:\n");
1072 }
1073 n += witness_list_locks(PCPU_PTR(spinlocks));
1074 }
1075 if (flags & WARN_PANIC && n)
1076 panic("witness_warn");
1077#ifdef DDB
1078 else if (witness_ddb && n)
1079 Debugger(__func__);
1080#endif
1081 return (n);
1082}
1083
1084const char *
1085witness_file(struct lock_object *lock)
1086{
1087 struct witness *w;
1088
1089 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1090 return ("?");
1091 w = lock->lo_witness;
1092 return (w->w_file);
1093}
1094
1095int
1096witness_line(struct lock_object *lock)
1097{
1098 struct witness *w;
1099
1100 if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
1101 return (0);
1102 w = lock->lo_witness;
1103 return (w->w_line);
1104}
1105
1106static struct witness *
1107enroll(const char *description, struct lock_class *lock_class)
1108{
1109 struct witness *w;
1110
1111 if (!witness_watch || witness_watch == 0 || panicstr != NULL)
1112 return (NULL);
1113 if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
1114 return (NULL);
1115 mtx_lock_spin(&w_mtx);
1116 STAILQ_FOREACH(w, &w_all, w_list) {
1117 if (w->w_name == description || (w->w_refcount > 0 &&
1118 strcmp(description, w->w_name) == 0)) {
1119 w->w_refcount++;
1120 mtx_unlock_spin(&w_mtx);
1121 if (lock_class != w->w_class)
1122 panic(
1123 "lock (%s) %s does not match earlier (%s) lock",
1124 description, lock_class->lc_name,
1125 w->w_class->lc_name);
1126 return (w);
1127 }
1128 }
1129 /*
1130 * This isn't quite right, as witness_cold is still 0 while we
1131 * enroll all the locks initialized before witness_initialize().
1132 */
1133 if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) {
1134 mtx_unlock_spin(&w_mtx);
1135 panic("spin lock %s not in order list", description);
1136 }
1137 if ((w = witness_get()) == NULL)
1138 return (NULL);
1139 w->w_name = description;
1140 w->w_class = lock_class;
1141 w->w_refcount = 1;
1142 STAILQ_INSERT_HEAD(&w_all, w, w_list);
1143 if (lock_class->lc_flags & LC_SPINLOCK)
1144 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
1145 else if (lock_class->lc_flags & LC_SLEEPLOCK)
1146 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
1147 else {
1148 mtx_unlock_spin(&w_mtx);
1149 panic("lock class %s is not sleep or spin",
1150 lock_class->lc_name);
1151 }
1152 mtx_unlock_spin(&w_mtx);
1153 return (w);
1154}
1155
1156/* Don't let the door bang you on the way out... */
1157static int
1158depart(struct witness *w)
1159{
1160 struct witness_child_list_entry *wcl, *nwcl;
1161 struct witness_list *list;
1162 struct witness *parent;
1163
1164 MPASS(w->w_refcount == 0);
1165 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1166 list = &w_sleep;
1167 else
1168 list = &w_spin;
1169 /*
1170 * First, we run through the entire tree looking for any
1171 * witnesses that the outgoing witness is a child of. For
1172 * each parent that we find, we reparent all the direct
1173 * children of the outgoing witness to its parent.
1174 */
1175 STAILQ_FOREACH(parent, list, w_typelist) {
1176 if (!isitmychild(parent, w))
1177 continue;
1178 removechild(parent, w);
1179 if (!reparentchildren(parent, w))
1180 return (0);
1181 }
1182
1183 /*
1184 * Now we go through and free up the child list of the
1185 * outgoing witness.
1186 */
1187 for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
1188 nwcl = wcl->wcl_next;
1189 witness_child_free(wcl);
1190 }
1191
1192 /*
1193 * Detach from various lists and free.
1194 */
1195 STAILQ_REMOVE(list, w, witness, w_typelist);
1196 STAILQ_REMOVE(&w_all, w, witness, w_list);
1197 witness_free(w);
1198
1199 /* Finally, fixup the tree. */
1200 return (rebalancetree(list));
1201}
1202
1203/*
1204 * Prune an entire lock order tree. We look for cases where a lock
1205 * is now both a descendant and a direct child of a given lock. In
1206 * that case, we want to remove the direct child link from the tree.
1207 *
1208 * Returns false if insertchild() fails.
1209 */
1210static int
1211rebalancetree(struct witness_list *list)
1212{
1213 struct witness *child, *parent;
1214
1215 STAILQ_FOREACH(child, list, w_typelist) {
1216 STAILQ_FOREACH(parent, list, w_typelist) {
1217 if (!isitmychild(parent, child))
1218 continue;
1219 removechild(parent, child);
1220 if (isitmydescendant(parent, child))
1221 continue;
1222 if (!insertchild(parent, child))
1223 return (0);
1224 }
1225 }
1226 witness_levelall();
1227 return (1);
1228}
1229
1230/*
1231 * Add "child" as a direct child of "parent". Returns false if
1232 * we fail due to out of memory.
1233 */
1234static int
1235insertchild(struct witness *parent, struct witness *child)
1236{
1237 struct witness_child_list_entry **wcl;
1238
1239 MPASS(child != NULL && parent != NULL);
1240
1241 /*
1242 * Insert "child" after "parent"
1243 */
1244 wcl = &parent->w_children;
1245 while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
1246 wcl = &(*wcl)->wcl_next;
1247 if (*wcl == NULL) {
1248 *wcl = witness_child_get();
1249 if (*wcl == NULL)
1250 return (0);
1251 }
1252 (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
1253
1254 return (1);
1255}
1256
1257/*
1258 * Make all the direct descendants of oldparent be direct descendants
1259 * of newparent.
1260 */
1261static int
1262reparentchildren(struct witness *newparent, struct witness *oldparent)
1263{
1264 struct witness_child_list_entry *wcl;
1265 int i;
1266
1267 /* Avoid making a witness a child of itself. */
1268 MPASS(!isitmychild(oldparent, newparent));
1269
1270 for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1271 for (i = 0; i < wcl->wcl_count; i++)
1272 if (!insertchild(newparent, wcl->wcl_children[i]))
1273 return (0);
1274 return (1);
1275}
1276
1277static int
1278itismychild(struct witness *parent, struct witness *child)
1279{
1280 struct witness_list *list;
1281
1282 MPASS(child != NULL && parent != NULL);
1283 if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
1284 (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
1285 panic(
1286 "%s: parent (%s) and child (%s) are not the same lock type",
1287 __func__, parent->w_class->lc_name,
1288 child->w_class->lc_name);
1289
1290 if (!insertchild(parent, child))
1291 return (0);
1292
1293 if (parent->w_class->lc_flags & LC_SLEEPLOCK)
1294 list = &w_sleep;
1295 else
1296 list = &w_spin;
1297 return (rebalancetree(list));
1298}
1299
1300static void
1301removechild(struct witness *parent, struct witness *child)
1302{
1303 struct witness_child_list_entry **wcl, *wcl1;
1304 int i;
1305
1306 for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
1307 for (i = 0; i < (*wcl)->wcl_count; i++)
1308 if ((*wcl)->wcl_children[i] == child)
1309 goto found;
1310 return;
1311found:
1312 (*wcl)->wcl_count--;
1313 if ((*wcl)->wcl_count > i)
1314 (*wcl)->wcl_children[i] =
1315 (*wcl)->wcl_children[(*wcl)->wcl_count];
1316 MPASS((*wcl)->wcl_children[i] != NULL);
1317 if ((*wcl)->wcl_count != 0)
1318 return;
1319 wcl1 = *wcl;
1320 *wcl = wcl1->wcl_next;
1321 witness_child_free(wcl1);
1322}
1323
1324static int
1325isitmychild(struct witness *parent, struct witness *child)
1326{
1327 struct witness_child_list_entry *wcl;
1328 int i;
1329
1330 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1331 for (i = 0; i < wcl->wcl_count; i++) {
1332 if (wcl->wcl_children[i] == child)
1333 return (1);
1334 }
1335 }
1336 return (0);
1337}
1338
1339static int
1340isitmydescendant(struct witness *parent, struct witness *child)
1341{
1342 struct witness_child_list_entry *wcl;
1343 int i, j;
1344
1345 if (isitmychild(parent, child))
1346 return (1);
1347 j = 0;
1348 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
1349 MPASS(j < 1000);
1350 for (i = 0; i < wcl->wcl_count; i++) {
1351 if (isitmydescendant(wcl->wcl_children[i], child))
1352 return (1);
1353 }
1354 j++;
1355 }
1356 return (0);
1357}
1358
1359static void
1360witness_levelall (void)
1361{
1362 struct witness_list *list;
1363 struct witness *w, *w1;
1364
1365 /*
1366 * First clear all levels.
1367 */
1368 STAILQ_FOREACH(w, &w_all, w_list) {
1369 w->w_level = 0;
1370 }
1371
1372 /*
1373 * Look for locks with no parent and level all their descendants.
1374 */
1375 STAILQ_FOREACH(w, &w_all, w_list) {
1376 /*
1377 * This is just an optimization, technically we could get
1378 * away just walking the all list each time.
1379 */
1380 if (w->w_class->lc_flags & LC_SLEEPLOCK)
1381 list = &w_sleep;
1382 else
1383 list = &w_spin;
1384 STAILQ_FOREACH(w1, list, w_typelist) {
1385 if (isitmychild(w1, w))
1386 goto skip;
1387 }
1388 witness_leveldescendents(w, 0);
1389 skip:
1390 ; /* silence GCC 3.x */
1391 }
1392}
1393
1394static void
1395witness_leveldescendents(struct witness *parent, int level)
1396{
1397 struct witness_child_list_entry *wcl;
1398 int i;
1399
1400 if (parent->w_level < level)
1401 parent->w_level = level;
1402 level++;
1403 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1404 for (i = 0; i < wcl->wcl_count; i++)
1405 witness_leveldescendents(wcl->wcl_children[i], level);
1406}
1407
1408static void
1409witness_displaydescendants(void(*prnt)(const char *fmt, ...),
1410 struct witness *parent, int indent)
1411{
1412 struct witness_child_list_entry *wcl;
1413 int i, level;
1414
1415 level = parent->w_level;
1416 prnt("%-2d", level);
1417 for (i = 0; i < indent; i++)
1418 prnt(" ");
1419 if (parent->w_refcount > 0)
1420 prnt("%s", parent->w_name);
1421 else
1422 prnt("(dead)");
1423 if (parent->w_displayed) {
1424 prnt(" -- (already displayed)\n");
1425 return;
1426 }
1427 parent->w_displayed = 1;
1428 if (parent->w_refcount > 0) {
1429 if (parent->w_file != NULL)
1430 prnt(" -- last acquired @ %s:%d", parent->w_file,
1431 parent->w_line);
1432 }
1433 prnt("\n");
1434 for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
1435 for (i = 0; i < wcl->wcl_count; i++)
1436 witness_displaydescendants(prnt,
1437 wcl->wcl_children[i], indent + 1);
1438}
1439
1440#ifdef BLESSING
1441static int
1442blessed(struct witness *w1, struct witness *w2)
1443{
1444 int i;
1445 struct witness_blessed *b;
1446
1447 for (i = 0; i < blessed_count; i++) {
1448 b = &blessed_list[i];
1449 if (strcmp(w1->w_name, b->b_lock1) == 0) {
1450 if (strcmp(w2->w_name, b->b_lock2) == 0)
1451 return (1);
1452 continue;
1453 }
1454 if (strcmp(w1->w_name, b->b_lock2) == 0)
1455 if (strcmp(w2->w_name, b->b_lock1) == 0)
1456 return (1);
1457 }
1458 return (0);
1459}
1460#endif
1461
1462static struct witness *
1463witness_get(void)
1464{
1465 struct witness *w;
1466
1467 if (witness_watch == 0) {
1468 mtx_unlock_spin(&w_mtx);
1469 return (NULL);
1470 }
1471 if (STAILQ_EMPTY(&w_free)) {
1472 witness_watch = 0;
1473 mtx_unlock_spin(&w_mtx);
1474 printf("%s: witness exhausted\n", __func__);
1475 return (NULL);
1476 }
1477 w = STAILQ_FIRST(&w_free);
1478 STAILQ_REMOVE_HEAD(&w_free, w_list);
1479 bzero(w, sizeof(*w));
1480 return (w);
1481}
1482
1483static void
1484witness_free(struct witness *w)
1485{
1486
1487 STAILQ_INSERT_HEAD(&w_free, w, w_list);
1488}
1489
1490static struct witness_child_list_entry *
1491witness_child_get(void)
1492{
1493 struct witness_child_list_entry *wcl;
1494
1495 if (witness_watch == 0) {
1496 mtx_unlock_spin(&w_mtx);
1497 return (NULL);
1498 }
1499 wcl = w_child_free;
1500 if (wcl == NULL) {
1501 witness_watch = 0;
1502 mtx_unlock_spin(&w_mtx);
1503 printf("%s: witness exhausted\n", __func__);
1504 return (NULL);
1505 }
1506 w_child_free = wcl->wcl_next;
1507 bzero(wcl, sizeof(*wcl));
1508 return (wcl);
1509}
1510
1511static void
1512witness_child_free(struct witness_child_list_entry *wcl)
1513{
1514
1515 wcl->wcl_next = w_child_free;
1516 w_child_free = wcl;
1517}
1518
1519static struct lock_list_entry *
1520witness_lock_list_get(void)
1521{
1522 struct lock_list_entry *lle;
1523
1524 if (witness_watch == 0)
1525 return (NULL);
1526 mtx_lock_spin(&w_mtx);
1527 lle = w_lock_list_free;
1528 if (lle == NULL) {
1529 witness_watch = 0;
1530 mtx_unlock_spin(&w_mtx);
1531 printf("%s: witness exhausted\n", __func__);
1532 return (NULL);
1533 }
1534 w_lock_list_free = lle->ll_next;
1535 mtx_unlock_spin(&w_mtx);
1536 bzero(lle, sizeof(*lle));
1537 return (lle);
1538}
1539
1540static void
1541witness_lock_list_free(struct lock_list_entry *lle)
1542{
1543
1544 mtx_lock_spin(&w_mtx);
1545 lle->ll_next = w_lock_list_free;
1546 w_lock_list_free = lle;
1547 mtx_unlock_spin(&w_mtx);
1548}
1549
1550static struct lock_instance *
1551find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
1552{
1553 struct lock_list_entry *lle;
1554 struct lock_instance *instance;
1555 int i;
1556
1557 for (lle = lock_list; lle != NULL; lle = lle->ll_next)
1558 for (i = lle->ll_count - 1; i >= 0; i--) {
1559 instance = &lle->ll_children[i];
1560 if (instance->li_lock == lock)
1561 return (instance);
1562 }
1563 return (NULL);
1564}
1565
1566static void
1567witness_list_lock(struct lock_instance *instance)
1568{
1569 struct lock_object *lock;
1570
1571 lock = instance->li_lock;
1572 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
1573 "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
1574 if (lock->lo_type != lock->lo_name)
1575 printf(" (%s)", lock->lo_type);
1576 printf(" r = %d (%p) locked @ %s:%d\n",
1577 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
1578 instance->li_line);
1579}
1580
1581int
1582witness_list_locks(struct lock_list_entry **lock_list)
1583{
1584 struct lock_list_entry *lle;
1585 int i, nheld;
1586
1587 nheld = 0;
1588 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
1589 for (i = lle->ll_count - 1; i >= 0; i--) {
1590 witness_list_lock(&lle->ll_children[i]);
1591 nheld++;
1592 }
1593 return (nheld);
1594}
1595
1596void
1597witness_save(struct lock_object *lock, const char **filep, int *linep)
1598{
1599 struct lock_instance *instance;
1600
1601 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1602 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1603 return;
1604 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1605 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1606 lock->lo_class->lc_name, lock->lo_name);
1607 instance = find_instance(curthread->td_sleeplocks, lock);
1608 if (instance == NULL)
1609 panic("%s: lock (%s) %s not locked", __func__,
1610 lock->lo_class->lc_name, lock->lo_name);
1611 *filep = instance->li_file;
1612 *linep = instance->li_line;
1613}
1614
1615void
1616witness_restore(struct lock_object *lock, const char *file, int line)
1617{
1618 struct lock_instance *instance;
1619
1620 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1621 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1622 return;
1623 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
1624 panic("%s: lock (%s) %s is not a sleep lock", __func__,
1625 lock->lo_class->lc_name, lock->lo_name);
1626 instance = find_instance(curthread->td_sleeplocks, lock);
1627 if (instance == NULL)
1628 panic("%s: lock (%s) %s not locked", __func__,
1629 lock->lo_class->lc_name, lock->lo_name);
1630 lock->lo_witness->w_file = file;
1631 lock->lo_witness->w_line = line;
1632 instance->li_file = file;
1633 instance->li_line = line;
1634}
1635
1636void
1637witness_assert(struct lock_object *lock, int flags, const char *file, int line)
1638{
1639#ifdef INVARIANT_SUPPORT
1640 struct lock_instance *instance;
1641
1642 if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
1643 return;
1644 if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
1645 instance = find_instance(curthread->td_sleeplocks, lock);
1646 else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
1647 instance = find_instance(PCPU_GET(spinlocks), lock);
1648 else {
1649 panic("Lock (%s) %s is not sleep or spin!",
1650 lock->lo_class->lc_name, lock->lo_name);
1651 return;
1652 }
1653 file = fixup_filename(file);
1654 switch (flags) {
1655 case LA_UNLOCKED:
1656 if (instance != NULL)
1657 panic("Lock (%s) %s locked @ %s:%d.",
1658 lock->lo_class->lc_name, lock->lo_name, file, line);
1659 break;
1660 case LA_LOCKED:
1661 case LA_LOCKED | LA_RECURSED:
1662 case LA_LOCKED | LA_NOTRECURSED:
1663 case LA_SLOCKED:
1664 case LA_SLOCKED | LA_RECURSED:
1665 case LA_SLOCKED | LA_NOTRECURSED:
1666 case LA_XLOCKED:
1667 case LA_XLOCKED | LA_RECURSED:
1668 case LA_XLOCKED | LA_NOTRECURSED:
1669 if (instance == NULL) {
1670 panic("Lock (%s) %s not locked @ %s:%d.",
1671 lock->lo_class->lc_name, lock->lo_name, file, line);
1672 break;
1673 }
1674 if ((flags & LA_XLOCKED) != 0 &&
1675 (instance->li_flags & LI_EXCLUSIVE) == 0)
1676 panic("Lock (%s) %s not exclusively locked @ %s:%d.",
1677 lock->lo_class->lc_name, lock->lo_name, file, line);
1678 if ((flags & LA_SLOCKED) != 0 &&
1679 (instance->li_flags & LI_EXCLUSIVE) != 0)
1680 panic("Lock (%s) %s exclusively locked @ %s:%d.",
1681 lock->lo_class->lc_name, lock->lo_name, file, line);
1682 if ((flags & LA_RECURSED) != 0 &&
1683 (instance->li_flags & LI_RECURSEMASK) == 0)
1684 panic("Lock (%s) %s not recursed @ %s:%d.",
1685 lock->lo_class->lc_name, lock->lo_name, file, line);
1686 if ((flags & LA_NOTRECURSED) != 0 &&
1687 (instance->li_flags & LI_RECURSEMASK) != 0)
1688 panic("Lock (%s) %s recursed @ %s:%d.",
1689 lock->lo_class->lc_name, lock->lo_name, file, line);
1690 break;
1691 default:
1692 panic("Invalid lock assertion at %s:%d.", file, line);
1693
1694 }
1695#endif /* INVARIANT_SUPPORT */
1696}
1697
1698#ifdef DDB
1699static void
1700witness_list(struct thread *td)
1701{
1702
1703 KASSERT(!witness_cold, ("%s: witness_cold", __func__));
1704 KASSERT(db_active, ("%s: not in the debugger", __func__));
1705
1706 if (witness_watch == 0)
1707 return;
1708
1709 witness_list_locks(&td->td_sleeplocks);
1710
1711 /*
1712 * We only handle spinlocks if td == curthread. This is somewhat broken
1713 * if td is currently executing on some other CPU and holds spin locks
1714 * as we won't display those locks. If we had a MI way of getting
1715 * the per-cpu data for a given cpu then we could use
1716 * td->td_kse->ke_oncpu to get the list of spinlocks for this thread
1717 * and "fix" this.
1718 *
1719 * That still wouldn't really fix this unless we locked sched_lock
1720 * or stopped the other CPU to make sure it wasn't changing the list
1721 * out from under us. It is probably best to just not try to handle
1722 * threads on other CPU's for now.
1723 */
1724 if (td == curthread && PCPU_GET(spinlocks) != NULL)
1725 witness_list_locks(PCPU_PTR(spinlocks));
1726}
1727
1728DB_SHOW_COMMAND(locks, db_witness_list)
1729{
1730 struct thread *td;
1731 pid_t pid;
1732 struct proc *p;
1733
1734 if (have_addr) {
1735 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
1736 ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
1737 ((addr >> 16) % 16) * 10000;
1738 /* sx_slock(&allproc_lock); */
1739 FOREACH_PROC_IN_SYSTEM(p) {
1740 if (p->p_pid == pid)
1741 break;
1742 }
1743 /* sx_sunlock(&allproc_lock); */
1744 if (p == NULL) {
1745 db_printf("pid %d not found\n", pid);
1746 return;
1747 }
1748 FOREACH_THREAD_IN_PROC(p, td) {
1749 witness_list(td);
1750 }
1751 } else {
1752 td = curthread;
1753 witness_list(td);
1754 }
1755}
1756
1757DB_SHOW_COMMAND(witness, db_witness_display)
1758{
1759
1760 witness_display(db_printf);
1761}
1762#endif