Deleted Added
full compact
kern_mutex.c (154482) kern_mutex.c (154484)
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 154482 2006-01-17 16:47:42Z jhb $");
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 154484 2006-01-17 16:55:17Z jhb $");
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_mprof.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/kdb.h>
50#include <sys/kernel.h>
51#include <sys/ktr.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/resourcevar.h>
57#include <sys/sched.h>
58#include <sys/sbuf.h>
59#include <sys/sysctl.h>
60#include <sys/turnstile.h>
61#include <sys/vmmeter.h>
62
63#include <machine/atomic.h>
64#include <machine/bus.h>
65#include <machine/clock.h>
66#include <machine/cpu.h>
67
68#include <ddb/ddb.h>
69
70#include <fs/devfs/devfs_int.h>
71
72#include <vm/vm.h>
73#include <vm/vm_extern.h>
74
75/*
76 * Force MUTEX_WAKE_ALL for now.
77 * single thread wakeup needs fixes to avoid race conditions with
78 * priority inheritance.
79 */
80#ifndef MUTEX_WAKE_ALL
81#define MUTEX_WAKE_ALL
82#endif
83
84/*
85 * Internal utility macros.
86 */
87#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
88
89#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
90 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
91
92#ifdef DDB
93static void db_show_mtx(struct lock_object *lock);
94#endif
95
96/*
97 * Lock classes for sleep and spin mutexes.
98 */
99struct lock_class lock_class_mtx_sleep = {
100 "sleep mutex",
101 LC_SLEEPLOCK | LC_RECURSABLE,
102#ifdef DDB
103 db_show_mtx
104#endif
105};
106struct lock_class lock_class_mtx_spin = {
107 "spin mutex",
108 LC_SPINLOCK | LC_RECURSABLE,
109#ifdef DDB
110 db_show_mtx
111#endif
112};
113
114/*
115 * System-wide mutexes
116 */
117struct mtx sched_lock;
118struct mtx Giant;
119
120#ifdef MUTEX_PROFILING
121SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
122SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
123static int mutex_prof_enable = 0;
124SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
125 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
126
127struct mutex_prof {
128 const char *name;
129 const char *file;
130 int line;
131 uintmax_t cnt_max;
132 uintmax_t cnt_tot;
133 uintmax_t cnt_cur;
134 uintmax_t cnt_contest_holding;
135 uintmax_t cnt_contest_locking;
136 struct mutex_prof *next;
137};
138
139/*
140 * mprof_buf is a static pool of profiling records to avoid possible
141 * reentrance of the memory allocation functions.
142 *
143 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
144 */
145#ifdef MPROF_BUFFERS
146#define NUM_MPROF_BUFFERS MPROF_BUFFERS
147#else
148#define NUM_MPROF_BUFFERS 1000
149#endif
150static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
151static int first_free_mprof_buf;
152#ifndef MPROF_HASH_SIZE
153#define MPROF_HASH_SIZE 1009
154#endif
155#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
156#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
157#endif
158static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
159/* SWAG: sbuf size = avg stat. line size * number of locks */
160#define MPROF_SBUF_SIZE 256 * 400
161
162static int mutex_prof_acquisitions;
163SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
164 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
165static int mutex_prof_records;
166SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
167 &mutex_prof_records, 0, "Number of profiling records");
168static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
169SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
170 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
171static int mutex_prof_rejected;
172SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
173 &mutex_prof_rejected, 0, "Number of rejected profiling records");
174static int mutex_prof_hashsize = MPROF_HASH_SIZE;
175SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
176 &mutex_prof_hashsize, 0, "Hash size");
177static int mutex_prof_collisions = 0;
178SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
179 &mutex_prof_collisions, 0, "Number of hash collisions");
180
181/*
182 * mprof_mtx protects the profiling buffers and the hash.
183 */
184static struct mtx mprof_mtx;
185MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
186
187static u_int64_t
188nanoseconds(void)
189{
190 struct timespec tv;
191
192 nanotime(&tv);
193 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
194}
195
196static int
197dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
198{
199 struct sbuf *sb;
200 int error, i;
201 static int multiplier = 1;
202
203 if (first_free_mprof_buf == 0)
204 return (SYSCTL_OUT(req, "No locking recorded",
205 sizeof("No locking recorded")));
206
207retry_sbufops:
208 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
209 sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
210 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
211 /*
212 * XXX this spinlock seems to be by far the largest perpetrator
213 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
214 * even before I pessimized it further by moving the average
215 * computation here).
216 */
217 mtx_lock_spin(&mprof_mtx);
218 for (i = 0; i < first_free_mprof_buf; ++i) {
219 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
220 mprof_buf[i].cnt_max / 1000,
221 mprof_buf[i].cnt_tot / 1000,
222 mprof_buf[i].cnt_cur,
223 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
224 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
225 mprof_buf[i].cnt_contest_holding,
226 mprof_buf[i].cnt_contest_locking,
227 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
228 if (sbuf_overflowed(sb)) {
229 mtx_unlock_spin(&mprof_mtx);
230 sbuf_delete(sb);
231 multiplier++;
232 goto retry_sbufops;
233 }
234 }
235 mtx_unlock_spin(&mprof_mtx);
236 sbuf_finish(sb);
237 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
238 sbuf_delete(sb);
239 return (error);
240}
241SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
242 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
243
244static int
245reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
246{
247 int error, v;
248
249 if (first_free_mprof_buf == 0)
250 return (0);
251
252 v = 0;
253 error = sysctl_handle_int(oidp, &v, 0, req);
254 if (error)
255 return (error);
256 if (req->newptr == NULL)
257 return (error);
258 if (v == 0)
259 return (0);
260
261 mtx_lock_spin(&mprof_mtx);
262 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
263 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
264 first_free_mprof_buf = 0;
265 mtx_unlock_spin(&mprof_mtx);
266 return (0);
267}
268SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
269 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
270#endif
271
272/*
273 * Function versions of the inlined __mtx_* macros. These are used by
274 * modules and can also be called from assembly language if needed.
275 */
276void
277_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
278{
279
280 MPASS(curthread != NULL);
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_mprof.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/conf.h>
49#include <sys/kdb.h>
50#include <sys/kernel.h>
51#include <sys/ktr.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/resourcevar.h>
57#include <sys/sched.h>
58#include <sys/sbuf.h>
59#include <sys/sysctl.h>
60#include <sys/turnstile.h>
61#include <sys/vmmeter.h>
62
63#include <machine/atomic.h>
64#include <machine/bus.h>
65#include <machine/clock.h>
66#include <machine/cpu.h>
67
68#include <ddb/ddb.h>
69
70#include <fs/devfs/devfs_int.h>
71
72#include <vm/vm.h>
73#include <vm/vm_extern.h>
74
75/*
76 * Force MUTEX_WAKE_ALL for now.
77 * single thread wakeup needs fixes to avoid race conditions with
78 * priority inheritance.
79 */
80#ifndef MUTEX_WAKE_ALL
81#define MUTEX_WAKE_ALL
82#endif
83
84/*
85 * Internal utility macros.
86 */
87#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
88
89#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
90 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
91
92#ifdef DDB
93static void db_show_mtx(struct lock_object *lock);
94#endif
95
96/*
97 * Lock classes for sleep and spin mutexes.
98 */
99struct lock_class lock_class_mtx_sleep = {
100 "sleep mutex",
101 LC_SLEEPLOCK | LC_RECURSABLE,
102#ifdef DDB
103 db_show_mtx
104#endif
105};
106struct lock_class lock_class_mtx_spin = {
107 "spin mutex",
108 LC_SPINLOCK | LC_RECURSABLE,
109#ifdef DDB
110 db_show_mtx
111#endif
112};
113
114/*
115 * System-wide mutexes
116 */
117struct mtx sched_lock;
118struct mtx Giant;
119
120#ifdef MUTEX_PROFILING
121SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
122SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
123static int mutex_prof_enable = 0;
124SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
125 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
126
127struct mutex_prof {
128 const char *name;
129 const char *file;
130 int line;
131 uintmax_t cnt_max;
132 uintmax_t cnt_tot;
133 uintmax_t cnt_cur;
134 uintmax_t cnt_contest_holding;
135 uintmax_t cnt_contest_locking;
136 struct mutex_prof *next;
137};
138
139/*
140 * mprof_buf is a static pool of profiling records to avoid possible
141 * reentrance of the memory allocation functions.
142 *
143 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
144 */
145#ifdef MPROF_BUFFERS
146#define NUM_MPROF_BUFFERS MPROF_BUFFERS
147#else
148#define NUM_MPROF_BUFFERS 1000
149#endif
150static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
151static int first_free_mprof_buf;
152#ifndef MPROF_HASH_SIZE
153#define MPROF_HASH_SIZE 1009
154#endif
155#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
156#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
157#endif
158static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
159/* SWAG: sbuf size = avg stat. line size * number of locks */
160#define MPROF_SBUF_SIZE 256 * 400
161
162static int mutex_prof_acquisitions;
163SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
164 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
165static int mutex_prof_records;
166SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
167 &mutex_prof_records, 0, "Number of profiling records");
168static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
169SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
170 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
171static int mutex_prof_rejected;
172SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
173 &mutex_prof_rejected, 0, "Number of rejected profiling records");
174static int mutex_prof_hashsize = MPROF_HASH_SIZE;
175SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
176 &mutex_prof_hashsize, 0, "Hash size");
177static int mutex_prof_collisions = 0;
178SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
179 &mutex_prof_collisions, 0, "Number of hash collisions");
180
181/*
182 * mprof_mtx protects the profiling buffers and the hash.
183 */
184static struct mtx mprof_mtx;
185MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
186
187static u_int64_t
188nanoseconds(void)
189{
190 struct timespec tv;
191
192 nanotime(&tv);
193 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
194}
195
196static int
197dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
198{
199 struct sbuf *sb;
200 int error, i;
201 static int multiplier = 1;
202
203 if (first_free_mprof_buf == 0)
204 return (SYSCTL_OUT(req, "No locking recorded",
205 sizeof("No locking recorded")));
206
207retry_sbufops:
208 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
209 sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
210 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
211 /*
212 * XXX this spinlock seems to be by far the largest perpetrator
213 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
214 * even before I pessimized it further by moving the average
215 * computation here).
216 */
217 mtx_lock_spin(&mprof_mtx);
218 for (i = 0; i < first_free_mprof_buf; ++i) {
219 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
220 mprof_buf[i].cnt_max / 1000,
221 mprof_buf[i].cnt_tot / 1000,
222 mprof_buf[i].cnt_cur,
223 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
224 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
225 mprof_buf[i].cnt_contest_holding,
226 mprof_buf[i].cnt_contest_locking,
227 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
228 if (sbuf_overflowed(sb)) {
229 mtx_unlock_spin(&mprof_mtx);
230 sbuf_delete(sb);
231 multiplier++;
232 goto retry_sbufops;
233 }
234 }
235 mtx_unlock_spin(&mprof_mtx);
236 sbuf_finish(sb);
237 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
238 sbuf_delete(sb);
239 return (error);
240}
241SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
242 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
243
244static int
245reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
246{
247 int error, v;
248
249 if (first_free_mprof_buf == 0)
250 return (0);
251
252 v = 0;
253 error = sysctl_handle_int(oidp, &v, 0, req);
254 if (error)
255 return (error);
256 if (req->newptr == NULL)
257 return (error);
258 if (v == 0)
259 return (0);
260
261 mtx_lock_spin(&mprof_mtx);
262 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
263 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
264 first_free_mprof_buf = 0;
265 mtx_unlock_spin(&mprof_mtx);
266 return (0);
267}
268SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
269 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
270#endif
271
272/*
273 * Function versions of the inlined __mtx_* macros. These are used by
274 * modules and can also be called from assembly language if needed.
275 */
276void
277_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
278{
279
280 MPASS(curthread != NULL);
281 KASSERT(LO_CLASSINDEX(&m->mtx_object) == LOCK_CLASS_SLEEP_MUTEX,
281 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
282 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
283 file, line));
284 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
285 file, line);
286 _get_sleep_lock(m, curthread, opts, file, line);
287 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
288 line);
289 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
290#ifdef MUTEX_PROFILING
291 /* don't reset the timer when/if recursing */
292 if (m->mtx_acqtime == 0) {
293 m->mtx_filename = file;
294 m->mtx_lineno = line;
295 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
296 ++mutex_prof_acquisitions;
297 }
298#endif
299}
300
301void
302_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
303{
304
305 MPASS(curthread != NULL);
282 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
283 file, line));
284 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
285 file, line);
286 _get_sleep_lock(m, curthread, opts, file, line);
287 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
288 line);
289 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
290#ifdef MUTEX_PROFILING
291 /* don't reset the timer when/if recursing */
292 if (m->mtx_acqtime == 0) {
293 m->mtx_filename = file;
294 m->mtx_lineno = line;
295 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
296 ++mutex_prof_acquisitions;
297 }
298#endif
299}
300
301void
302_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
303{
304
305 MPASS(curthread != NULL);
306 KASSERT(LO_CLASSINDEX(&m->mtx_object) == LOCK_CLASS_SLEEP_MUTEX,
306 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
307 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
308 file, line));
309 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
310 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
311 line);
312 mtx_assert(m, MA_OWNED);
313#ifdef MUTEX_PROFILING
314 if (m->mtx_acqtime != 0) {
315 static const char *unknown = "(unknown)";
316 struct mutex_prof *mpp;
317 u_int64_t acqtime, now;
318 const char *p, *q;
319 volatile u_int hash;
320
321 now = nanoseconds();
322 acqtime = m->mtx_acqtime;
323 m->mtx_acqtime = 0;
324 if (now <= acqtime)
325 goto out;
326 for (p = m->mtx_filename;
327 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
328 /* nothing */ ;
329 if (p == NULL || *p == '\0')
330 p = unknown;
331 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
332 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
333 mtx_lock_spin(&mprof_mtx);
334 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
335 if (mpp->line == m->mtx_lineno &&
336 strcmp(mpp->file, p) == 0)
337 break;
338 if (mpp == NULL) {
339 /* Just exit if we cannot get a trace buffer */
340 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
341 ++mutex_prof_rejected;
342 goto unlock;
343 }
344 mpp = &mprof_buf[first_free_mprof_buf++];
345 mpp->name = mtx_name(m);
346 mpp->file = p;
347 mpp->line = m->mtx_lineno;
348 mpp->next = mprof_hash[hash];
349 if (mprof_hash[hash] != NULL)
350 ++mutex_prof_collisions;
351 mprof_hash[hash] = mpp;
352 ++mutex_prof_records;
353 }
354 /*
355 * Record if the mutex has been held longer now than ever
356 * before.
357 */
358 if (now - acqtime > mpp->cnt_max)
359 mpp->cnt_max = now - acqtime;
360 mpp->cnt_tot += now - acqtime;
361 mpp->cnt_cur++;
362 /*
363 * There's a small race, really we should cmpxchg
364 * 0 with the current value, but that would bill
365 * the contention to the wrong lock instance if
366 * it followed this also.
367 */
368 mpp->cnt_contest_holding += m->mtx_contest_holding;
369 m->mtx_contest_holding = 0;
370 mpp->cnt_contest_locking += m->mtx_contest_locking;
371 m->mtx_contest_locking = 0;
372unlock:
373 mtx_unlock_spin(&mprof_mtx);
374 }
375out:
376#endif
377 _rel_sleep_lock(m, curthread, opts, file, line);
378}
379
380void
381_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
382{
383
384 MPASS(curthread != NULL);
307 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
308 file, line));
309 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
310 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
311 line);
312 mtx_assert(m, MA_OWNED);
313#ifdef MUTEX_PROFILING
314 if (m->mtx_acqtime != 0) {
315 static const char *unknown = "(unknown)";
316 struct mutex_prof *mpp;
317 u_int64_t acqtime, now;
318 const char *p, *q;
319 volatile u_int hash;
320
321 now = nanoseconds();
322 acqtime = m->mtx_acqtime;
323 m->mtx_acqtime = 0;
324 if (now <= acqtime)
325 goto out;
326 for (p = m->mtx_filename;
327 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
328 /* nothing */ ;
329 if (p == NULL || *p == '\0')
330 p = unknown;
331 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
332 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
333 mtx_lock_spin(&mprof_mtx);
334 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
335 if (mpp->line == m->mtx_lineno &&
336 strcmp(mpp->file, p) == 0)
337 break;
338 if (mpp == NULL) {
339 /* Just exit if we cannot get a trace buffer */
340 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
341 ++mutex_prof_rejected;
342 goto unlock;
343 }
344 mpp = &mprof_buf[first_free_mprof_buf++];
345 mpp->name = mtx_name(m);
346 mpp->file = p;
347 mpp->line = m->mtx_lineno;
348 mpp->next = mprof_hash[hash];
349 if (mprof_hash[hash] != NULL)
350 ++mutex_prof_collisions;
351 mprof_hash[hash] = mpp;
352 ++mutex_prof_records;
353 }
354 /*
355 * Record if the mutex has been held longer now than ever
356 * before.
357 */
358 if (now - acqtime > mpp->cnt_max)
359 mpp->cnt_max = now - acqtime;
360 mpp->cnt_tot += now - acqtime;
361 mpp->cnt_cur++;
362 /*
363 * There's a small race, really we should cmpxchg
364 * 0 with the current value, but that would bill
365 * the contention to the wrong lock instance if
366 * it followed this also.
367 */
368 mpp->cnt_contest_holding += m->mtx_contest_holding;
369 m->mtx_contest_holding = 0;
370 mpp->cnt_contest_locking += m->mtx_contest_locking;
371 m->mtx_contest_locking = 0;
372unlock:
373 mtx_unlock_spin(&mprof_mtx);
374 }
375out:
376#endif
377 _rel_sleep_lock(m, curthread, opts, file, line);
378}
379
380void
381_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
382{
383
384 MPASS(curthread != NULL);
385 KASSERT(LO_CLASSINDEX(&m->mtx_object) == LOCK_CLASS_SPIN_MUTEX,
385 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
386 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
387 m->mtx_object.lo_name, file, line));
388 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
389 file, line);
390 _get_spin_lock(m, curthread, opts, file, line);
391 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
392 line);
393 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
394}
395
396void
397_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
398{
399
400 MPASS(curthread != NULL);
386 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
387 m->mtx_object.lo_name, file, line));
388 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
389 file, line);
390 _get_spin_lock(m, curthread, opts, file, line);
391 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
392 line);
393 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
394}
395
396void
397_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
398{
399
400 MPASS(curthread != NULL);
401 KASSERT(LO_CLASSINDEX(&m->mtx_object) == LOCK_CLASS_SPIN_MUTEX,
401 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
402 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
403 m->mtx_object.lo_name, file, line));
404 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
405 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
406 line);
407 mtx_assert(m, MA_OWNED);
408 _rel_spin_lock(m);
409}
410
411/*
412 * The important part of mtx_trylock{,_flags}()
413 * Tries to acquire lock `m.' If this function is called on a mutex that
414 * is already owned, it will recursively acquire the lock.
415 */
416int
417_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
418{
419 int rval;
420
421 MPASS(curthread != NULL);
402 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
403 m->mtx_object.lo_name, file, line));
404 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
405 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
406 line);
407 mtx_assert(m, MA_OWNED);
408 _rel_spin_lock(m);
409}
410
411/*
412 * The important part of mtx_trylock{,_flags}()
413 * Tries to acquire lock `m.' If this function is called on a mutex that
414 * is already owned, it will recursively acquire the lock.
415 */
416int
417_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
418{
419 int rval;
420
421 MPASS(curthread != NULL);
422 KASSERT(LO_CLASSINDEX(&m->mtx_object) == LOCK_CLASS_SLEEP_MUTEX,
422 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
423 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
424 file, line));
425
426 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
427 m->mtx_recurse++;
428 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
429 rval = 1;
430 } else
431 rval = _obtain_lock(m, (uintptr_t)curthread);
432
433 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
434 if (rval)
435 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
436 file, line);
437
438 return (rval);
439}
440
441/*
442 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
443 *
444 * We call this if the lock is either contested (i.e. we need to go to
445 * sleep waiting for it), or if we need to recurse on it.
446 */
447void
448_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
449 int line)
450{
451#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
452 struct thread *owner;
453#endif
454 uintptr_t v;
455#ifdef KTR
456 int cont_logged = 0;
457#endif
458#ifdef MUTEX_PROFILING
459 int contested;
460#endif
461
462 if (mtx_owned(m)) {
463 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
464 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
465 m->mtx_object.lo_name, file, line));
466 m->mtx_recurse++;
467 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
468 if (LOCK_LOG_TEST(&m->mtx_object, opts))
469 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
470 return;
471 }
472
473 if (LOCK_LOG_TEST(&m->mtx_object, opts))
474 CTR4(KTR_LOCK,
475 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
476 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
477
478#ifdef MUTEX_PROFILING
479 contested = 0;
480#endif
481 while (!_obtain_lock(m, tid)) {
482#ifdef MUTEX_PROFILING
483 contested = 1;
484 atomic_add_int(&m->mtx_contest_holding, 1);
485#endif
486 turnstile_lock(&m->mtx_object);
487 v = m->mtx_lock;
488
489 /*
490 * Check if the lock has been released while spinning for
491 * the turnstile chain lock.
492 */
493 if (v == MTX_UNOWNED) {
494 turnstile_release(&m->mtx_object);
495 cpu_spinwait();
496 continue;
497 }
498
499#ifdef MUTEX_WAKE_ALL
500 MPASS(v != MTX_CONTESTED);
501#else
502 /*
503 * The mutex was marked contested on release. This means that
504 * there are other threads blocked on it. Grab ownership of
505 * it and propagate its priority to the current thread if
506 * necessary.
507 */
508 if (v == MTX_CONTESTED) {
509 m->mtx_lock = tid | MTX_CONTESTED;
510 turnstile_claim(&m->mtx_object);
511 break;
512 }
513#endif
514
515 /*
516 * If the mutex isn't already contested and a failure occurs
517 * setting the contested bit, the mutex was either released
518 * or the state of the MTX_RECURSED bit changed.
519 */
520 if ((v & MTX_CONTESTED) == 0 &&
521 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
522 turnstile_release(&m->mtx_object);
523 cpu_spinwait();
524 continue;
525 }
526
527#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
528 /*
529 * If the current owner of the lock is executing on another
530 * CPU, spin instead of blocking.
531 */
532 owner = (struct thread *)(v & MTX_FLAGMASK);
533#ifdef ADAPTIVE_GIANT
534 if (TD_IS_RUNNING(owner)) {
535#else
536 if (m != &Giant && TD_IS_RUNNING(owner)) {
537#endif
538 turnstile_release(&m->mtx_object);
539 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
540 cpu_spinwait();
541 }
542 continue;
543 }
544#endif /* SMP && !NO_ADAPTIVE_MUTEXES */
545
546 /*
547 * We definitely must sleep for this lock.
548 */
549 mtx_assert(m, MA_NOTOWNED);
550
551#ifdef KTR
552 if (!cont_logged) {
553 CTR6(KTR_CONTENTION,
554 "contention: %p at %s:%d wants %s, taken by %s:%d",
555 (void *)tid, file, line, m->mtx_object.lo_name,
556 WITNESS_FILE(&m->mtx_object),
557 WITNESS_LINE(&m->mtx_object));
558 cont_logged = 1;
559 }
560#endif
561
562 /*
563 * Block on the turnstile.
564 */
565 turnstile_wait(&m->mtx_object, mtx_owner(m));
566 }
567
568#ifdef KTR
569 if (cont_logged) {
570 CTR4(KTR_CONTENTION,
571 "contention end: %s acquired by %p at %s:%d",
572 m->mtx_object.lo_name, (void *)tid, file, line);
573 }
574#endif
575#ifdef MUTEX_PROFILING
576 if (contested)
577 m->mtx_contest_locking++;
578 m->mtx_contest_holding = 0;
579#endif
580 return;
581}
582
583#ifdef SMP
584/*
585 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
586 *
587 * This is only called if we need to actually spin for the lock. Recursion
588 * is handled inline.
589 */
590void
591_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
592 int line)
593{
594 int i = 0;
595
596 if (LOCK_LOG_TEST(&m->mtx_object, opts))
597 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
598
599 for (;;) {
600 if (_obtain_lock(m, tid))
601 break;
602
603 /* Give interrupts a chance while we spin. */
604 spinlock_exit();
605 while (m->mtx_lock != MTX_UNOWNED) {
606 if (i++ < 10000000) {
607 cpu_spinwait();
608 continue;
609 }
610 if (i < 60000000)
611 DELAY(1);
612 else if (!kdb_active && !panicstr) {
613 printf("spin lock %s held by %p for > 5 seconds\n",
614 m->mtx_object.lo_name, (void *)m->mtx_lock);
615#ifdef WITNESS
616 witness_display_spinlock(&m->mtx_object,
617 mtx_owner(m));
618#endif
619 panic("spin lock held too long");
620 }
621 cpu_spinwait();
622 }
623 spinlock_enter();
624 }
625
626 if (LOCK_LOG_TEST(&m->mtx_object, opts))
627 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
628
629 return;
630}
631#endif /* SMP */
632
633/*
634 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
635 *
636 * We are only called here if the lock is recursed or contested (i.e. we
637 * need to wake up a blocked thread).
638 */
639void
640_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
641{
642 struct turnstile *ts;
643#ifndef PREEMPTION
644 struct thread *td, *td1;
645#endif
646
647 if (mtx_recursed(m)) {
648 if (--(m->mtx_recurse) == 0)
649 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
650 if (LOCK_LOG_TEST(&m->mtx_object, opts))
651 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
652 return;
653 }
654
655 turnstile_lock(&m->mtx_object);
656 ts = turnstile_lookup(&m->mtx_object);
657 if (LOCK_LOG_TEST(&m->mtx_object, opts))
658 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
659
660#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
661 if (ts == NULL) {
662 _release_lock_quick(m);
663 if (LOCK_LOG_TEST(&m->mtx_object, opts))
664 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
665 turnstile_release(&m->mtx_object);
666 return;
667 }
668#else
669 MPASS(ts != NULL);
670#endif
671#ifndef PREEMPTION
672 /* XXX */
673 td1 = turnstile_head(ts);
674#endif
675#ifdef MUTEX_WAKE_ALL
676 turnstile_broadcast(ts);
677 _release_lock_quick(m);
678#else
679 if (turnstile_signal(ts)) {
680 _release_lock_quick(m);
681 if (LOCK_LOG_TEST(&m->mtx_object, opts))
682 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
683 } else {
684 m->mtx_lock = MTX_CONTESTED;
685 if (LOCK_LOG_TEST(&m->mtx_object, opts))
686 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
687 m);
688 }
689#endif
690 turnstile_unpend(ts);
691
692#ifndef PREEMPTION
693 /*
694 * XXX: This is just a hack until preemption is done. However,
695 * once preemption is done we need to either wrap the
696 * turnstile_signal() and release of the actual lock in an
697 * extra critical section or change the preemption code to
698 * always just set a flag and never do instant-preempts.
699 */
700 td = curthread;
701 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
702 return;
703 mtx_lock_spin(&sched_lock);
704 if (!TD_IS_RUNNING(td1)) {
705#ifdef notyet
706 if (td->td_ithd != NULL) {
707 struct ithd *it = td->td_ithd;
708
709 if (it->it_interrupted) {
710 if (LOCK_LOG_TEST(&m->mtx_object, opts))
711 CTR2(KTR_LOCK,
712 "_mtx_unlock_sleep: %p interrupted %p",
713 it, it->it_interrupted);
714 intr_thd_fixup(it);
715 }
716 }
717#endif
718 if (LOCK_LOG_TEST(&m->mtx_object, opts))
719 CTR2(KTR_LOCK,
720 "_mtx_unlock_sleep: %p switching out lock=%p", m,
721 (void *)m->mtx_lock);
722
723 mi_switch(SW_INVOL, NULL);
724 if (LOCK_LOG_TEST(&m->mtx_object, opts))
725 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
726 m, (void *)m->mtx_lock);
727 }
728 mtx_unlock_spin(&sched_lock);
729#endif
730
731 return;
732}
733
734/*
735 * All the unlocking of MTX_SPIN locks is done inline.
736 * See the _rel_spin_lock() macro for the details.
737 */
738
739/*
740 * The backing function for the INVARIANTS-enabled mtx_assert()
741 */
742#ifdef INVARIANT_SUPPORT
743void
744_mtx_assert(struct mtx *m, int what, const char *file, int line)
745{
746
747 if (panicstr != NULL || dumping)
748 return;
749 switch (what) {
750 case MA_OWNED:
751 case MA_OWNED | MA_RECURSED:
752 case MA_OWNED | MA_NOTRECURSED:
753 if (!mtx_owned(m))
754 panic("mutex %s not owned at %s:%d",
755 m->mtx_object.lo_name, file, line);
756 if (mtx_recursed(m)) {
757 if ((what & MA_NOTRECURSED) != 0)
758 panic("mutex %s recursed at %s:%d",
759 m->mtx_object.lo_name, file, line);
760 } else if ((what & MA_RECURSED) != 0) {
761 panic("mutex %s unrecursed at %s:%d",
762 m->mtx_object.lo_name, file, line);
763 }
764 break;
765 case MA_NOTOWNED:
766 if (mtx_owned(m))
767 panic("mutex %s owned at %s:%d",
768 m->mtx_object.lo_name, file, line);
769 break;
770 default:
771 panic("unknown mtx_assert at %s:%d", file, line);
772 }
773}
774#endif
775
776/*
777 * The MUTEX_DEBUG-enabled mtx_validate()
778 *
779 * Most of these checks have been moved off into the LO_INITIALIZED flag
780 * maintained by the witness code.
781 */
782#ifdef MUTEX_DEBUG
783
784void mtx_validate(struct mtx *);
785
786void
787mtx_validate(struct mtx *m)
788{
789
790/*
791 * XXX: When kernacc() does not require Giant we can reenable this check
792 */
793#ifdef notyet
794/*
795 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
796 * we can re-enable the kernacc() checks.
797 */
798#ifndef __alpha__
799 /*
800 * Can't call kernacc() from early init386(), especially when
801 * initializing Giant mutex, because some stuff in kernacc()
802 * requires Giant itself.
803 */
804 if (!cold)
805 if (!kernacc((caddr_t)m, sizeof(m),
806 VM_PROT_READ | VM_PROT_WRITE))
807 panic("Can't read and write to mutex %p", m);
808#endif
809#endif
810}
811#endif
812
813/*
814 * General init routine used by the MTX_SYSINIT() macro.
815 */
816void
817mtx_sysinit(void *arg)
818{
819 struct mtx_args *margs = arg;
820
821 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
822}
823
824/*
825 * Mutex initialization routine; initialize lock `m' of type contained in
826 * `opts' with options contained in `opts' and name `name.' The optional
827 * lock type `type' is used as a general lock category name for use with
828 * witness.
829 */
830void
831mtx_init(struct mtx *m, const char *name, const char *type, int opts)
832{
423 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
424 file, line));
425
426 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
427 m->mtx_recurse++;
428 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
429 rval = 1;
430 } else
431 rval = _obtain_lock(m, (uintptr_t)curthread);
432
433 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
434 if (rval)
435 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
436 file, line);
437
438 return (rval);
439}
440
441/*
442 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
443 *
444 * We call this if the lock is either contested (i.e. we need to go to
445 * sleep waiting for it), or if we need to recurse on it.
446 */
447void
448_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
449 int line)
450{
451#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
452 struct thread *owner;
453#endif
454 uintptr_t v;
455#ifdef KTR
456 int cont_logged = 0;
457#endif
458#ifdef MUTEX_PROFILING
459 int contested;
460#endif
461
462 if (mtx_owned(m)) {
463 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
464 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
465 m->mtx_object.lo_name, file, line));
466 m->mtx_recurse++;
467 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
468 if (LOCK_LOG_TEST(&m->mtx_object, opts))
469 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
470 return;
471 }
472
473 if (LOCK_LOG_TEST(&m->mtx_object, opts))
474 CTR4(KTR_LOCK,
475 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
476 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
477
478#ifdef MUTEX_PROFILING
479 contested = 0;
480#endif
481 while (!_obtain_lock(m, tid)) {
482#ifdef MUTEX_PROFILING
483 contested = 1;
484 atomic_add_int(&m->mtx_contest_holding, 1);
485#endif
486 turnstile_lock(&m->mtx_object);
487 v = m->mtx_lock;
488
489 /*
490 * Check if the lock has been released while spinning for
491 * the turnstile chain lock.
492 */
493 if (v == MTX_UNOWNED) {
494 turnstile_release(&m->mtx_object);
495 cpu_spinwait();
496 continue;
497 }
498
499#ifdef MUTEX_WAKE_ALL
500 MPASS(v != MTX_CONTESTED);
501#else
502 /*
503 * The mutex was marked contested on release. This means that
504 * there are other threads blocked on it. Grab ownership of
505 * it and propagate its priority to the current thread if
506 * necessary.
507 */
508 if (v == MTX_CONTESTED) {
509 m->mtx_lock = tid | MTX_CONTESTED;
510 turnstile_claim(&m->mtx_object);
511 break;
512 }
513#endif
514
515 /*
516 * If the mutex isn't already contested and a failure occurs
517 * setting the contested bit, the mutex was either released
518 * or the state of the MTX_RECURSED bit changed.
519 */
520 if ((v & MTX_CONTESTED) == 0 &&
521 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
522 turnstile_release(&m->mtx_object);
523 cpu_spinwait();
524 continue;
525 }
526
527#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
528 /*
529 * If the current owner of the lock is executing on another
530 * CPU, spin instead of blocking.
531 */
532 owner = (struct thread *)(v & MTX_FLAGMASK);
533#ifdef ADAPTIVE_GIANT
534 if (TD_IS_RUNNING(owner)) {
535#else
536 if (m != &Giant && TD_IS_RUNNING(owner)) {
537#endif
538 turnstile_release(&m->mtx_object);
539 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
540 cpu_spinwait();
541 }
542 continue;
543 }
544#endif /* SMP && !NO_ADAPTIVE_MUTEXES */
545
546 /*
547 * We definitely must sleep for this lock.
548 */
549 mtx_assert(m, MA_NOTOWNED);
550
551#ifdef KTR
552 if (!cont_logged) {
553 CTR6(KTR_CONTENTION,
554 "contention: %p at %s:%d wants %s, taken by %s:%d",
555 (void *)tid, file, line, m->mtx_object.lo_name,
556 WITNESS_FILE(&m->mtx_object),
557 WITNESS_LINE(&m->mtx_object));
558 cont_logged = 1;
559 }
560#endif
561
562 /*
563 * Block on the turnstile.
564 */
565 turnstile_wait(&m->mtx_object, mtx_owner(m));
566 }
567
568#ifdef KTR
569 if (cont_logged) {
570 CTR4(KTR_CONTENTION,
571 "contention end: %s acquired by %p at %s:%d",
572 m->mtx_object.lo_name, (void *)tid, file, line);
573 }
574#endif
575#ifdef MUTEX_PROFILING
576 if (contested)
577 m->mtx_contest_locking++;
578 m->mtx_contest_holding = 0;
579#endif
580 return;
581}
582
583#ifdef SMP
584/*
585 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
586 *
587 * This is only called if we need to actually spin for the lock. Recursion
588 * is handled inline.
589 */
590void
591_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
592 int line)
593{
594 int i = 0;
595
596 if (LOCK_LOG_TEST(&m->mtx_object, opts))
597 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
598
599 for (;;) {
600 if (_obtain_lock(m, tid))
601 break;
602
603 /* Give interrupts a chance while we spin. */
604 spinlock_exit();
605 while (m->mtx_lock != MTX_UNOWNED) {
606 if (i++ < 10000000) {
607 cpu_spinwait();
608 continue;
609 }
610 if (i < 60000000)
611 DELAY(1);
612 else if (!kdb_active && !panicstr) {
613 printf("spin lock %s held by %p for > 5 seconds\n",
614 m->mtx_object.lo_name, (void *)m->mtx_lock);
615#ifdef WITNESS
616 witness_display_spinlock(&m->mtx_object,
617 mtx_owner(m));
618#endif
619 panic("spin lock held too long");
620 }
621 cpu_spinwait();
622 }
623 spinlock_enter();
624 }
625
626 if (LOCK_LOG_TEST(&m->mtx_object, opts))
627 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
628
629 return;
630}
631#endif /* SMP */
632
633/*
634 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
635 *
636 * We are only called here if the lock is recursed or contested (i.e. we
637 * need to wake up a blocked thread).
638 */
639void
640_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
641{
642 struct turnstile *ts;
643#ifndef PREEMPTION
644 struct thread *td, *td1;
645#endif
646
647 if (mtx_recursed(m)) {
648 if (--(m->mtx_recurse) == 0)
649 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
650 if (LOCK_LOG_TEST(&m->mtx_object, opts))
651 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
652 return;
653 }
654
655 turnstile_lock(&m->mtx_object);
656 ts = turnstile_lookup(&m->mtx_object);
657 if (LOCK_LOG_TEST(&m->mtx_object, opts))
658 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
659
660#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
661 if (ts == NULL) {
662 _release_lock_quick(m);
663 if (LOCK_LOG_TEST(&m->mtx_object, opts))
664 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
665 turnstile_release(&m->mtx_object);
666 return;
667 }
668#else
669 MPASS(ts != NULL);
670#endif
671#ifndef PREEMPTION
672 /* XXX */
673 td1 = turnstile_head(ts);
674#endif
675#ifdef MUTEX_WAKE_ALL
676 turnstile_broadcast(ts);
677 _release_lock_quick(m);
678#else
679 if (turnstile_signal(ts)) {
680 _release_lock_quick(m);
681 if (LOCK_LOG_TEST(&m->mtx_object, opts))
682 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
683 } else {
684 m->mtx_lock = MTX_CONTESTED;
685 if (LOCK_LOG_TEST(&m->mtx_object, opts))
686 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
687 m);
688 }
689#endif
690 turnstile_unpend(ts);
691
692#ifndef PREEMPTION
693 /*
694 * XXX: This is just a hack until preemption is done. However,
695 * once preemption is done we need to either wrap the
696 * turnstile_signal() and release of the actual lock in an
697 * extra critical section or change the preemption code to
698 * always just set a flag and never do instant-preempts.
699 */
700 td = curthread;
701 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
702 return;
703 mtx_lock_spin(&sched_lock);
704 if (!TD_IS_RUNNING(td1)) {
705#ifdef notyet
706 if (td->td_ithd != NULL) {
707 struct ithd *it = td->td_ithd;
708
709 if (it->it_interrupted) {
710 if (LOCK_LOG_TEST(&m->mtx_object, opts))
711 CTR2(KTR_LOCK,
712 "_mtx_unlock_sleep: %p interrupted %p",
713 it, it->it_interrupted);
714 intr_thd_fixup(it);
715 }
716 }
717#endif
718 if (LOCK_LOG_TEST(&m->mtx_object, opts))
719 CTR2(KTR_LOCK,
720 "_mtx_unlock_sleep: %p switching out lock=%p", m,
721 (void *)m->mtx_lock);
722
723 mi_switch(SW_INVOL, NULL);
724 if (LOCK_LOG_TEST(&m->mtx_object, opts))
725 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
726 m, (void *)m->mtx_lock);
727 }
728 mtx_unlock_spin(&sched_lock);
729#endif
730
731 return;
732}
733
734/*
735 * All the unlocking of MTX_SPIN locks is done inline.
736 * See the _rel_spin_lock() macro for the details.
737 */
738
739/*
740 * The backing function for the INVARIANTS-enabled mtx_assert()
741 */
742#ifdef INVARIANT_SUPPORT
743void
744_mtx_assert(struct mtx *m, int what, const char *file, int line)
745{
746
747 if (panicstr != NULL || dumping)
748 return;
749 switch (what) {
750 case MA_OWNED:
751 case MA_OWNED | MA_RECURSED:
752 case MA_OWNED | MA_NOTRECURSED:
753 if (!mtx_owned(m))
754 panic("mutex %s not owned at %s:%d",
755 m->mtx_object.lo_name, file, line);
756 if (mtx_recursed(m)) {
757 if ((what & MA_NOTRECURSED) != 0)
758 panic("mutex %s recursed at %s:%d",
759 m->mtx_object.lo_name, file, line);
760 } else if ((what & MA_RECURSED) != 0) {
761 panic("mutex %s unrecursed at %s:%d",
762 m->mtx_object.lo_name, file, line);
763 }
764 break;
765 case MA_NOTOWNED:
766 if (mtx_owned(m))
767 panic("mutex %s owned at %s:%d",
768 m->mtx_object.lo_name, file, line);
769 break;
770 default:
771 panic("unknown mtx_assert at %s:%d", file, line);
772 }
773}
774#endif
775
776/*
777 * The MUTEX_DEBUG-enabled mtx_validate()
778 *
779 * Most of these checks have been moved off into the LO_INITIALIZED flag
780 * maintained by the witness code.
781 */
782#ifdef MUTEX_DEBUG
783
784void mtx_validate(struct mtx *);
785
786void
787mtx_validate(struct mtx *m)
788{
789
790/*
791 * XXX: When kernacc() does not require Giant we can reenable this check
792 */
793#ifdef notyet
794/*
795 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
796 * we can re-enable the kernacc() checks.
797 */
798#ifndef __alpha__
799 /*
800 * Can't call kernacc() from early init386(), especially when
801 * initializing Giant mutex, because some stuff in kernacc()
802 * requires Giant itself.
803 */
804 if (!cold)
805 if (!kernacc((caddr_t)m, sizeof(m),
806 VM_PROT_READ | VM_PROT_WRITE))
807 panic("Can't read and write to mutex %p", m);
808#endif
809#endif
810}
811#endif
812
813/*
814 * General init routine used by the MTX_SYSINIT() macro.
815 */
816void
817mtx_sysinit(void *arg)
818{
819 struct mtx_args *margs = arg;
820
821 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
822}
823
824/*
825 * Mutex initialization routine; initialize lock `m' of type contained in
826 * `opts' with options contained in `opts' and name `name.' The optional
827 * lock type `type' is used as a general lock category name for use with
828 * witness.
829 */
830void
831mtx_init(struct mtx *m, const char *name, const char *type, int opts)
832{
833 struct lock_object *lock;
833 struct lock_class *class;
834 int flags;
834
835 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
836 MTX_NOWITNESS | MTX_DUPOK)) == 0);
837
838#ifdef MUTEX_DEBUG
839 /* Diagnostic and error correction */
840 mtx_validate(m);
841#endif
842
835
836 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
837 MTX_NOWITNESS | MTX_DUPOK)) == 0);
838
839#ifdef MUTEX_DEBUG
840 /* Diagnostic and error correction */
841 mtx_validate(m);
842#endif
843
843 lock = &m->mtx_object;
844 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
845 ("mutex \"%s\" %p already initialized", name, m));
846 bzero(m, sizeof(*m));
844 /* Determine lock class and lock flags. */
847 if (opts & MTX_SPIN)
845 if (opts & MTX_SPIN)
848 lock->lo_flags = LOCK_CLASS_SPIN_MUTEX << LO_CLASSSHIFT;
846 class = &lock_class_mtx_spin;
849 else
847 else
850 lock->lo_flags = LOCK_CLASS_SLEEP_MUTEX << LO_CLASSSHIFT;
851 lock->lo_name = name;
852 lock->lo_type = type != NULL ? type : name;
848 class = &lock_class_mtx_sleep;
849 flags = 0;
853 if (opts & MTX_QUIET)
850 if (opts & MTX_QUIET)
854 lock->lo_flags |= LO_QUIET;
851 flags |= LO_QUIET;
855 if (opts & MTX_RECURSE)
852 if (opts & MTX_RECURSE)
856 lock->lo_flags |= LO_RECURSABLE;
853 flags |= LO_RECURSABLE;
857 if ((opts & MTX_NOWITNESS) == 0)
854 if ((opts & MTX_NOWITNESS) == 0)
858 lock->lo_flags |= LO_WITNESS;
855 flags |= LO_WITNESS;
859 if (opts & MTX_DUPOK)
856 if (opts & MTX_DUPOK)
860 lock->lo_flags |= LO_DUPOK;
857 flags |= LO_DUPOK;
861
858
859 /* Initialize mutex. */
862 m->mtx_lock = MTX_UNOWNED;
860 m->mtx_lock = MTX_UNOWNED;
861 m->mtx_recurse = 0;
862#ifdef MUTEX_PROFILING
863 m->mtx_acqtime = 0;
864 m->mtx_filename = NULL;
865 m->mtx_lineno = 0;
866 m->mtx_contest_holding = 0;
867 m->mtx_contest_locking = 0;
868#endif
863
869
864 LOCK_LOG_INIT(lock, opts);
865
866 WITNESS_INIT(lock);
870 lock_init(&m->mtx_object, class, name, type, flags);
867}
868
869/*
870 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
871 * passed in as a flag here because if the corresponding mtx_init() was
872 * called with MTX_QUIET set, then it will already be set in the mutex's
873 * flags.
874 */
875void
876mtx_destroy(struct mtx *m)
877{
878
871}
872
873/*
874 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
875 * passed in as a flag here because if the corresponding mtx_init() was
876 * called with MTX_QUIET set, then it will already be set in the mutex's
877 * flags.
878 */
879void
880mtx_destroy(struct mtx *m)
881{
882
879 LOCK_LOG_DESTROY(&m->mtx_object, 0);
880
881 if (!mtx_owned(m))
882 MPASS(mtx_unowned(m));
883 else {
884 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
885
886 /* Perform the non-mtx related part of mtx_unlock_spin(). */
883 if (!mtx_owned(m))
884 MPASS(mtx_unowned(m));
885 else {
886 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
887
888 /* Perform the non-mtx related part of mtx_unlock_spin(). */
887 if (LO_CLASSINDEX(&m->mtx_object) == LOCK_CLASS_SPIN_MUTEX)
889 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
888 spinlock_exit();
889
890 /* Tell witness this isn't locked to make it happy. */
891 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
892 __LINE__);
893 }
894
890 spinlock_exit();
891
892 /* Tell witness this isn't locked to make it happy. */
893 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
894 __LINE__);
895 }
896
895 WITNESS_DESTROY(&m->mtx_object);
897 lock_destroy(&m->mtx_object);
896}
897
898/*
899 * Intialize the mutex code and system mutexes. This is called from the MD
900 * startup code prior to mi_startup(). The per-CPU data space needs to be
901 * setup before this is called.
902 */
903void
904mutex_init(void)
905{
906
907 /* Setup turnstiles so that sleep mutexes work. */
908 init_turnstiles();
909
910 /*
911 * Initialize mutexes.
912 */
913 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
914 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
915 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
916 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
917 mtx_lock(&Giant);
918}
919
898}
899
900/*
901 * Intialize the mutex code and system mutexes. This is called from the MD
902 * startup code prior to mi_startup(). The per-CPU data space needs to be
903 * setup before this is called.
904 */
905void
906mutex_init(void)
907{
908
909 /* Setup turnstiles so that sleep mutexes work. */
910 init_turnstiles();
911
912 /*
913 * Initialize mutexes.
914 */
915 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
916 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
917 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
918 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
919 mtx_lock(&Giant);
920}
921
920#if LOCK_DEBUG > 0 || defined(DDB)
921/* XXX: This is not mutex-specific. */
922struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
923 &lock_class_mtx_spin,
924 &lock_class_mtx_sleep,
925 &lock_class_sx,
926};
927#endif
928
929#ifdef DDB
922#ifdef DDB
930/* XXX: This function is not mutex-specific. */
931DB_SHOW_COMMAND(lock, db_show_lock)
932{
933 struct lock_object *lock;
934 struct lock_class *class;
935
936 if (!have_addr)
937 return;
938 lock = (struct lock_object *)addr;
939 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
940 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
941 return;
942 }
943 class = LOCK_CLASS(lock);
944 db_printf(" class: %s\n", class->lc_name);
945 db_printf(" name: %s\n", lock->lo_name);
946 if (lock->lo_type && lock->lo_type != lock->lo_name)
947 db_printf(" type: %s\n", lock->lo_type);
948 class->lc_ddb_show(lock);
949}
950
951void
952db_show_mtx(struct lock_object *lock)
953{
954 struct thread *td;
955 struct mtx *m;
956
957 m = (struct mtx *)lock;
958
959 db_printf(" flags: {");
923void
924db_show_mtx(struct lock_object *lock)
925{
926 struct thread *td;
927 struct mtx *m;
928
929 m = (struct mtx *)lock;
930
931 db_printf(" flags: {");
960 if (LO_CLASSINDEX(lock) == LOCK_CLASS_SPIN_MUTEX)
932 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
961 db_printf("SPIN");
962 else
963 db_printf("DEF");
964 if (m->mtx_object.lo_flags & LO_RECURSABLE)
965 db_printf(", RECURSE");
966 if (m->mtx_object.lo_flags & LO_DUPOK)
967 db_printf(", DUPOK");
968 db_printf("}\n");
969 db_printf(" state: {");
970 if (mtx_unowned(m))
971 db_printf("UNOWNED");
972 else {
973 db_printf("OWNED");
974 if (m->mtx_lock & MTX_CONTESTED)
975 db_printf(", CONTESTED");
976 if (m->mtx_lock & MTX_RECURSED)
977 db_printf(", RECURSED");
978 }
979 db_printf("}\n");
980 if (!mtx_unowned(m)) {
981 td = mtx_owner(m);
982 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
983 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
984 if (mtx_recursed(m))
985 db_printf(" recursed: %d\n", m->mtx_recurse);
986 }
987}
988#endif
933 db_printf("SPIN");
934 else
935 db_printf("DEF");
936 if (m->mtx_object.lo_flags & LO_RECURSABLE)
937 db_printf(", RECURSE");
938 if (m->mtx_object.lo_flags & LO_DUPOK)
939 db_printf(", DUPOK");
940 db_printf("}\n");
941 db_printf(" state: {");
942 if (mtx_unowned(m))
943 db_printf("UNOWNED");
944 else {
945 db_printf("OWNED");
946 if (m->mtx_lock & MTX_CONTESTED)
947 db_printf(", CONTESTED");
948 if (m->mtx_lock & MTX_RECURSED)
949 db_printf(", RECURSED");
950 }
951 db_printf("}\n");
952 if (!mtx_unowned(m)) {
953 td = mtx_owner(m);
954 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
955 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
956 if (mtx_recursed(m))
957 db_printf(" recursed: %d\n", m->mtx_recurse);
958 }
959}
960#endif