Deleted Added
full compact
kern_mutex.c (112513) kern_mutex.c (113339)
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/kern_mutex.c 112513 2003-03-23 11:26:11Z tjr $
30 * $FreeBSD: head/sys/kern/kern_mutex.c 113339 2003-04-10 17:35:44Z julian $
31 */
32
33/*
34 * Machine independent bits of mutex implementation.
35 */
36
37#include "opt_adaptive_mutexes.h"
38#include "opt_ddb.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/resourcevar.h>
50#include <sys/sched.h>
51#include <sys/sbuf.h>
52#include <sys/sysctl.h>
53#include <sys/vmmeter.h>
54
55#include <machine/atomic.h>
56#include <machine/bus.h>
57#include <machine/clock.h>
58#include <machine/cpu.h>
59
60#include <ddb/ddb.h>
61
62#include <vm/vm.h>
63#include <vm/vm_extern.h>
64
65/*
66 * Internal utility macros.
67 */
68#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
69
70#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
71 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
72
73/* XXXKSE This test will change. */
74#define thread_running(td) \
31 */
32
33/*
34 * Machine independent bits of mutex implementation.
35 */
36
37#include "opt_adaptive_mutexes.h"
38#include "opt_ddb.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/resourcevar.h>
50#include <sys/sched.h>
51#include <sys/sbuf.h>
52#include <sys/sysctl.h>
53#include <sys/vmmeter.h>
54
55#include <machine/atomic.h>
56#include <machine/bus.h>
57#include <machine/clock.h>
58#include <machine/cpu.h>
59
60#include <ddb/ddb.h>
61
62#include <vm/vm.h>
63#include <vm/vm_extern.h>
64
65/*
66 * Internal utility macros.
67 */
68#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
69
70#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
71 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
72
73/* XXXKSE This test will change. */
74#define thread_running(td) \
75 ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
75 (td->td_state == TDS_RUNNING)
76 /* ((td)->td_oncpu != NOCPU) */
76
77/*
78 * Lock classes for sleep and spin mutexes.
79 */
80struct lock_class lock_class_mtx_sleep = {
81 "sleep mutex",
82 LC_SLEEPLOCK | LC_RECURSABLE
83};
84struct lock_class lock_class_mtx_spin = {
85 "spin mutex",
86 LC_SPINLOCK | LC_RECURSABLE
87};
88
89/*
90 * System-wide mutexes
91 */
92struct mtx sched_lock;
93struct mtx Giant;
94
95/*
96 * Prototypes for non-exported routines.
97 */
98static void propagate_priority(struct thread *);
99
100static void
101propagate_priority(struct thread *td)
102{
103 int pri = td->td_priority;
104 struct mtx *m = td->td_blocked;
105
106 mtx_assert(&sched_lock, MA_OWNED);
107 for (;;) {
108 struct thread *td1;
109
110 td = mtx_owner(m);
111
112 if (td == NULL) {
113 /*
114 * This really isn't quite right. Really
115 * ought to bump priority of thread that
116 * next acquires the mutex.
117 */
118 MPASS(m->mtx_lock == MTX_CONTESTED);
119 return;
120 }
121
122 MPASS(td->td_proc != NULL);
123 MPASS(td->td_proc->p_magic == P_MAGIC);
124 KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex"));
125 if (td->td_priority <= pri) /* lower is higher priority */
126 return;
127
128
129 /*
130 * If lock holder is actually running, just bump priority.
131 */
132 if (TD_IS_RUNNING(td)) {
133 td->td_priority = pri;
134 return;
135 }
136
137#ifndef SMP
138 /*
139 * For UP, we check to see if td is curthread (this shouldn't
140 * ever happen however as it would mean we are in a deadlock.)
141 */
142 KASSERT(td != curthread, ("Deadlock detected"));
143#endif
144
145 /*
146 * If on run queue move to new run queue, and quit.
147 * XXXKSE this gets a lot more complicated under threads
148 * but try anyhow.
149 */
150 if (TD_ON_RUNQ(td)) {
151 MPASS(td->td_blocked == NULL);
152 sched_prio(td, pri);
153 return;
154 }
155 /*
156 * Adjust for any other cases.
157 */
158 td->td_priority = pri;
159
160 /*
161 * If we aren't blocked on a mutex, we should be.
162 */
163 KASSERT(TD_ON_LOCK(td), (
164 "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
165 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
166 m->mtx_object.lo_name));
167
168 /*
169 * Pick up the mutex that td is blocked on.
170 */
171 m = td->td_blocked;
172 MPASS(m != NULL);
173
174 /*
175 * Check if the thread needs to be moved up on
176 * the blocked chain
177 */
178 if (td == TAILQ_FIRST(&m->mtx_blocked)) {
179 continue;
180 }
181
182 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
183 if (td1->td_priority <= pri) {
184 continue;
185 }
186
187 /*
188 * Remove thread from blocked chain and determine where
189 * it should be moved up to. Since we know that td1 has
190 * a lower priority than td, we know that at least one
191 * thread in the chain has a lower priority and that
192 * td1 will thus not be NULL after the loop.
193 */
194 TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq);
195 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) {
196 MPASS(td1->td_proc->p_magic == P_MAGIC);
197 if (td1->td_priority > pri)
198 break;
199 }
200
201 MPASS(td1 != NULL);
202 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
203 CTR4(KTR_LOCK,
204 "propagate_priority: p %p moved before %p on [%p] %s",
205 td, td1, m, m->mtx_object.lo_name);
206 }
207}
208
209#ifdef MUTEX_PROFILING
210SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
211SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
212static int mutex_prof_enable = 0;
213SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
214 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
215
216struct mutex_prof {
217 const char *name;
218 const char *file;
219 int line;
220 uintmax_t cnt_max;
221 uintmax_t cnt_tot;
222 uintmax_t cnt_cur;
223 struct mutex_prof *next;
224};
225
226/*
227 * mprof_buf is a static pool of profiling records to avoid possible
228 * reentrance of the memory allocation functions.
229 *
230 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
231 */
232#define NUM_MPROF_BUFFERS 1000
233static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
234static int first_free_mprof_buf;
235#define MPROF_HASH_SIZE 1009
236static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
237/* SWAG: sbuf size = avg stat. line size * number of locks */
238#define MPROF_SBUF_SIZE 256 * 400
239
240static int mutex_prof_acquisitions;
241SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
242 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
243static int mutex_prof_records;
244SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
245 &mutex_prof_records, 0, "Number of profiling records");
246static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
247SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
248 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
249static int mutex_prof_rejected;
250SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
251 &mutex_prof_rejected, 0, "Number of rejected profiling records");
252static int mutex_prof_hashsize = MPROF_HASH_SIZE;
253SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
254 &mutex_prof_hashsize, 0, "Hash size");
255static int mutex_prof_collisions = 0;
256SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
257 &mutex_prof_collisions, 0, "Number of hash collisions");
258
259/*
260 * mprof_mtx protects the profiling buffers and the hash.
261 */
262static struct mtx mprof_mtx;
263MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
264
265static u_int64_t
266nanoseconds(void)
267{
268 struct timespec tv;
269
270 nanotime(&tv);
271 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
272}
273
274static int
275dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
276{
277 struct sbuf *sb;
278 int error, i;
279 static int multiplier = 1;
280
281 if (first_free_mprof_buf == 0)
282 return (SYSCTL_OUT(req, "No locking recorded",
283 sizeof("No locking recorded")));
284
285retry_sbufops:
286 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
287 sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
288 "max", "total", "count", "avg", "name");
289 /*
290 * XXX this spinlock seems to be by far the largest perpetrator
291 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
292 * even before I pessimized it further by moving the average
293 * computation here).
294 */
295 mtx_lock_spin(&mprof_mtx);
296 for (i = 0; i < first_free_mprof_buf; ++i) {
297 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
298 mprof_buf[i].cnt_max / 1000,
299 mprof_buf[i].cnt_tot / 1000,
300 mprof_buf[i].cnt_cur,
301 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
302 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
303 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
304 if (sbuf_overflowed(sb)) {
305 mtx_unlock_spin(&mprof_mtx);
306 sbuf_delete(sb);
307 multiplier++;
308 goto retry_sbufops;
309 }
310 }
311 mtx_unlock_spin(&mprof_mtx);
312 sbuf_finish(sb);
313 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
314 sbuf_delete(sb);
315 return (error);
316}
317SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
318 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
319#endif
320
321/*
322 * Function versions of the inlined __mtx_* macros. These are used by
323 * modules and can also be called from assembly language if needed.
324 */
325void
326_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
327{
328
329 MPASS(curthread != NULL);
330 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
331 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
332 file, line));
333 _get_sleep_lock(m, curthread, opts, file, line);
334 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
335 line);
336 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
337#ifdef MUTEX_PROFILING
338 /* don't reset the timer when/if recursing */
339 if (m->mtx_acqtime == 0) {
340 m->mtx_filename = file;
341 m->mtx_lineno = line;
342 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
343 ++mutex_prof_acquisitions;
344 }
345#endif
346}
347
348void
349_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
350{
351
352 MPASS(curthread != NULL);
353 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
354 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
355 file, line));
356 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
357 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
358 line);
359 mtx_assert(m, MA_OWNED);
360#ifdef MUTEX_PROFILING
361 if (m->mtx_acqtime != 0) {
362 static const char *unknown = "(unknown)";
363 struct mutex_prof *mpp;
364 u_int64_t acqtime, now;
365 const char *p, *q;
366 volatile u_int hash;
367
368 now = nanoseconds();
369 acqtime = m->mtx_acqtime;
370 m->mtx_acqtime = 0;
371 if (now <= acqtime)
372 goto out;
373 for (p = m->mtx_filename;
374 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
375 /* nothing */ ;
376 if (p == NULL || *p == '\0')
377 p = unknown;
378 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
379 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
380 mtx_lock_spin(&mprof_mtx);
381 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
382 if (mpp->line == m->mtx_lineno &&
383 strcmp(mpp->file, p) == 0)
384 break;
385 if (mpp == NULL) {
386 /* Just exit if we cannot get a trace buffer */
387 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
388 ++mutex_prof_rejected;
389 goto unlock;
390 }
391 mpp = &mprof_buf[first_free_mprof_buf++];
392 mpp->name = mtx_name(m);
393 mpp->file = p;
394 mpp->line = m->mtx_lineno;
395 mpp->next = mprof_hash[hash];
396 if (mprof_hash[hash] != NULL)
397 ++mutex_prof_collisions;
398 mprof_hash[hash] = mpp;
399 ++mutex_prof_records;
400 }
401 /*
402 * Record if the mutex has been held longer now than ever
403 * before.
404 */
405 if (now - acqtime > mpp->cnt_max)
406 mpp->cnt_max = now - acqtime;
407 mpp->cnt_tot += now - acqtime;
408 mpp->cnt_cur++;
409unlock:
410 mtx_unlock_spin(&mprof_mtx);
411 }
412out:
413#endif
414 _rel_sleep_lock(m, curthread, opts, file, line);
415}
416
417void
418_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
419{
420
421 MPASS(curthread != NULL);
422 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
423 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
424 m->mtx_object.lo_name, file, line));
425#if defined(SMP) || LOCK_DEBUG > 0 || 1
426 _get_spin_lock(m, curthread, opts, file, line);
427#else
428 critical_enter();
429#endif
430 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
431 line);
432 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
433}
434
435void
436_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
437{
438
439 MPASS(curthread != NULL);
440 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
441 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
442 m->mtx_object.lo_name, file, line));
443 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
444 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
445 line);
446 mtx_assert(m, MA_OWNED);
447#if defined(SMP) || LOCK_DEBUG > 0 || 1
448 _rel_spin_lock(m);
449#else
450 critical_exit();
451#endif
452}
453
454/*
455 * The important part of mtx_trylock{,_flags}()
456 * Tries to acquire lock `m.' We do NOT handle recursion here. If this
457 * function is called on a recursed mutex, it will return failure and
458 * will not recursively acquire the lock. You are expected to know what
459 * you are doing.
460 */
461int
462_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
463{
464 int rval;
465
466 MPASS(curthread != NULL);
467
468 rval = _obtain_lock(m, curthread);
469
470 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
471 if (rval)
472 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
473 file, line);
474
475 return (rval);
476}
477
478/*
479 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
480 *
481 * We call this if the lock is either contested (i.e. we need to go to
482 * sleep waiting for it), or if we need to recurse on it.
483 */
484void
485_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
486{
487 struct thread *td = curthread;
488 struct thread *td1;
489#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
490 struct thread *owner;
491#endif
492 uintptr_t v;
493#ifdef KTR
494 int cont_logged = 0;
495#endif
496
497 if (mtx_owned(m)) {
498 m->mtx_recurse++;
499 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
500 if (LOCK_LOG_TEST(&m->mtx_object, opts))
501 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
502 return;
503 }
504
505 if (LOCK_LOG_TEST(&m->mtx_object, opts))
506 CTR4(KTR_LOCK,
507 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
508 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
509
510 while (!_obtain_lock(m, td)) {
511
512 mtx_lock_spin(&sched_lock);
513 v = m->mtx_lock;
514
515 /*
516 * Check if the lock has been released while spinning for
517 * the sched_lock.
518 */
519 if (v == MTX_UNOWNED) {
520 mtx_unlock_spin(&sched_lock);
521#ifdef __i386__
522 ia32_pause();
523#endif
524 continue;
525 }
526
527 /*
528 * The mutex was marked contested on release. This means that
529 * there are threads blocked on it.
530 */
531 if (v == MTX_CONTESTED) {
532 td1 = TAILQ_FIRST(&m->mtx_blocked);
533 MPASS(td1 != NULL);
534 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
535
536 if (td1->td_priority < td->td_priority)
537 td->td_priority = td1->td_priority;
538 mtx_unlock_spin(&sched_lock);
539 return;
540 }
541
542 /*
543 * If the mutex isn't already contested and a failure occurs
544 * setting the contested bit, the mutex was either released
545 * or the state of the MTX_RECURSED bit changed.
546 */
547 if ((v & MTX_CONTESTED) == 0 &&
548 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
549 (void *)(v | MTX_CONTESTED))) {
550 mtx_unlock_spin(&sched_lock);
551#ifdef __i386__
552 ia32_pause();
553#endif
554 continue;
555 }
556
557#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
558 /*
559 * If the current owner of the lock is executing on another
560 * CPU, spin instead of blocking.
561 */
562 owner = (struct thread *)(v & MTX_FLAGMASK);
563 if (m != &Giant && thread_running(owner)) {
564 mtx_unlock_spin(&sched_lock);
565 while (mtx_owner(m) == owner && thread_running(owner)) {
566#ifdef __i386__
567 ia32_pause();
568#endif
569 }
570 continue;
571 }
572#endif /* SMP && ADAPTIVE_MUTEXES */
573
574 /*
575 * We definitely must sleep for this lock.
576 */
577 mtx_assert(m, MA_NOTOWNED);
578
579#ifdef notyet
580 /*
581 * If we're borrowing an interrupted thread's VM context, we
582 * must clean up before going to sleep.
583 */
584 if (td->td_ithd != NULL) {
585 struct ithd *it = td->td_ithd;
586
587 if (it->it_interrupted) {
588 if (LOCK_LOG_TEST(&m->mtx_object, opts))
589 CTR2(KTR_LOCK,
590 "_mtx_lock_sleep: %p interrupted %p",
591 it, it->it_interrupted);
592 intr_thd_fixup(it);
593 }
594 }
595#endif
596
597 /*
598 * Put us on the list of threads blocked on this mutex.
599 */
600 if (TAILQ_EMPTY(&m->mtx_blocked)) {
601 td1 = mtx_owner(m);
602 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
603 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
604 } else {
605 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq)
606 if (td1->td_priority > td->td_priority)
607 break;
608 if (td1)
609 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
610 else
611 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
612 }
613#ifdef KTR
614 if (!cont_logged) {
615 CTR6(KTR_CONTENTION,
616 "contention: %p at %s:%d wants %s, taken by %s:%d",
617 td, file, line, m->mtx_object.lo_name,
618 WITNESS_FILE(&m->mtx_object),
619 WITNESS_LINE(&m->mtx_object));
620 cont_logged = 1;
621 }
622#endif
623
624 /*
625 * Save who we're blocked on.
626 */
627 td->td_blocked = m;
628 td->td_lockname = m->mtx_object.lo_name;
629 TD_SET_LOCK(td);
630 propagate_priority(td);
631
632 if (LOCK_LOG_TEST(&m->mtx_object, opts))
633 CTR3(KTR_LOCK,
634 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
635 m->mtx_object.lo_name);
636
637 td->td_proc->p_stats->p_ru.ru_nvcsw++;
638 mi_switch();
639
640 if (LOCK_LOG_TEST(&m->mtx_object, opts))
641 CTR3(KTR_LOCK,
642 "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
643 td, m, m->mtx_object.lo_name);
644
645 mtx_unlock_spin(&sched_lock);
646 }
647
648#ifdef KTR
649 if (cont_logged) {
650 CTR4(KTR_CONTENTION,
651 "contention end: %s acquired by %p at %s:%d",
652 m->mtx_object.lo_name, td, file, line);
653 }
654#endif
655 return;
656}
657
658/*
659 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
660 *
661 * This is only called if we need to actually spin for the lock. Recursion
662 * is handled inline.
663 */
664void
665_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
666{
667 int i = 0;
668
669 if (LOCK_LOG_TEST(&m->mtx_object, opts))
670 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
671
672 for (;;) {
673 if (_obtain_lock(m, curthread))
674 break;
675
676 /* Give interrupts a chance while we spin. */
677 critical_exit();
678 while (m->mtx_lock != MTX_UNOWNED) {
679 if (i++ < 10000000) {
680#ifdef __i386__
681 ia32_pause();
682#endif
683 continue;
684 }
685 if (i < 60000000)
686 DELAY(1);
687#ifdef DDB
688 else if (!db_active)
689#else
690 else
691#endif
692 panic("spin lock %s held by %p for > 5 seconds",
693 m->mtx_object.lo_name, (void *)m->mtx_lock);
694#ifdef __i386__
695 ia32_pause();
696#endif
697 }
698 critical_enter();
699 }
700
701 if (LOCK_LOG_TEST(&m->mtx_object, opts))
702 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
703
704 return;
705}
706
707/*
708 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
709 *
710 * We are only called here if the lock is recursed or contested (i.e. we
711 * need to wake up a blocked thread).
712 */
713void
714_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
715{
716 struct thread *td, *td1;
717 struct mtx *m1;
718 int pri;
719
720 td = curthread;
721
722 if (mtx_recursed(m)) {
723 if (--(m->mtx_recurse) == 0)
724 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
725 if (LOCK_LOG_TEST(&m->mtx_object, opts))
726 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
727 return;
728 }
729
730 mtx_lock_spin(&sched_lock);
731 if (LOCK_LOG_TEST(&m->mtx_object, opts))
732 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
733
734 td1 = TAILQ_FIRST(&m->mtx_blocked);
735#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
736 if (td1 == NULL) {
737 _release_lock_quick(m);
738 if (LOCK_LOG_TEST(&m->mtx_object, opts))
739 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
740 mtx_unlock_spin(&sched_lock);
741 return;
742 }
743#endif
744 MPASS(td->td_proc->p_magic == P_MAGIC);
745 MPASS(td1->td_proc->p_magic == P_MAGIC);
746
747 TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq);
748
749 if (TAILQ_EMPTY(&m->mtx_blocked)) {
750 LIST_REMOVE(m, mtx_contested);
751 _release_lock_quick(m);
752 if (LOCK_LOG_TEST(&m->mtx_object, opts))
753 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
754 } else
755 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
756
757 pri = PRI_MAX;
758 LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
759 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
760 if (cp < pri)
761 pri = cp;
762 }
763
764 if (pri > td->td_base_pri)
765 pri = td->td_base_pri;
766 td->td_priority = pri;
767
768 if (LOCK_LOG_TEST(&m->mtx_object, opts))
769 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
770 m, td1);
771
772 td1->td_blocked = NULL;
773 TD_CLR_LOCK(td1);
774 if (!TD_CAN_RUN(td1)) {
775 mtx_unlock_spin(&sched_lock);
776 return;
777 }
778 setrunqueue(td1);
779
780 if (td->td_critnest == 1 && td1->td_priority < pri) {
781#ifdef notyet
782 if (td->td_ithd != NULL) {
783 struct ithd *it = td->td_ithd;
784
785 if (it->it_interrupted) {
786 if (LOCK_LOG_TEST(&m->mtx_object, opts))
787 CTR2(KTR_LOCK,
788 "_mtx_unlock_sleep: %p interrupted %p",
789 it, it->it_interrupted);
790 intr_thd_fixup(it);
791 }
792 }
793#endif
794 if (LOCK_LOG_TEST(&m->mtx_object, opts))
795 CTR2(KTR_LOCK,
796 "_mtx_unlock_sleep: %p switching out lock=%p", m,
797 (void *)m->mtx_lock);
798
799 td->td_proc->p_stats->p_ru.ru_nivcsw++;
800 mi_switch();
801 if (LOCK_LOG_TEST(&m->mtx_object, opts))
802 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
803 m, (void *)m->mtx_lock);
804 }
805
806 mtx_unlock_spin(&sched_lock);
807
808 return;
809}
810
811/*
812 * All the unlocking of MTX_SPIN locks is done inline.
813 * See the _rel_spin_lock() macro for the details.
814 */
815
816/*
817 * The backing function for the INVARIANTS-enabled mtx_assert()
818 */
819#ifdef INVARIANT_SUPPORT
820void
821_mtx_assert(struct mtx *m, int what, const char *file, int line)
822{
823
824 if (panicstr != NULL)
825 return;
826 switch (what) {
827 case MA_OWNED:
828 case MA_OWNED | MA_RECURSED:
829 case MA_OWNED | MA_NOTRECURSED:
830 if (!mtx_owned(m))
831 panic("mutex %s not owned at %s:%d",
832 m->mtx_object.lo_name, file, line);
833 if (mtx_recursed(m)) {
834 if ((what & MA_NOTRECURSED) != 0)
835 panic("mutex %s recursed at %s:%d",
836 m->mtx_object.lo_name, file, line);
837 } else if ((what & MA_RECURSED) != 0) {
838 panic("mutex %s unrecursed at %s:%d",
839 m->mtx_object.lo_name, file, line);
840 }
841 break;
842 case MA_NOTOWNED:
843 if (mtx_owned(m))
844 panic("mutex %s owned at %s:%d",
845 m->mtx_object.lo_name, file, line);
846 break;
847 default:
848 panic("unknown mtx_assert at %s:%d", file, line);
849 }
850}
851#endif
852
853/*
854 * The MUTEX_DEBUG-enabled mtx_validate()
855 *
856 * Most of these checks have been moved off into the LO_INITIALIZED flag
857 * maintained by the witness code.
858 */
859#ifdef MUTEX_DEBUG
860
861void mtx_validate(struct mtx *);
862
863void
864mtx_validate(struct mtx *m)
865{
866
867/*
868 * XXX: When kernacc() does not require Giant we can reenable this check
869 */
870#ifdef notyet
871/*
872 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
873 * we can re-enable the kernacc() checks.
874 */
875#ifndef __alpha__
876 /*
877 * Can't call kernacc() from early init386(), especially when
878 * initializing Giant mutex, because some stuff in kernacc()
879 * requires Giant itself.
880 */
881 if (!cold)
882 if (!kernacc((caddr_t)m, sizeof(m),
883 VM_PROT_READ | VM_PROT_WRITE))
884 panic("Can't read and write to mutex %p", m);
885#endif
886#endif
887}
888#endif
889
890/*
891 * General init routine used by the MTX_SYSINIT() macro.
892 */
893void
894mtx_sysinit(void *arg)
895{
896 struct mtx_args *margs = arg;
897
898 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
899}
900
901/*
902 * Mutex initialization routine; initialize lock `m' of type contained in
903 * `opts' with options contained in `opts' and name `name.' The optional
904 * lock type `type' is used as a general lock category name for use with
905 * witness.
906 */
907void
908mtx_init(struct mtx *m, const char *name, const char *type, int opts)
909{
910 struct lock_object *lock;
911
912 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
913 MTX_NOWITNESS | MTX_DUPOK)) == 0);
914
915#ifdef MUTEX_DEBUG
916 /* Diagnostic and error correction */
917 mtx_validate(m);
918#endif
919
920 lock = &m->mtx_object;
921 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
922 ("mutex %s %p already initialized", name, m));
923 bzero(m, sizeof(*m));
924 if (opts & MTX_SPIN)
925 lock->lo_class = &lock_class_mtx_spin;
926 else
927 lock->lo_class = &lock_class_mtx_sleep;
928 lock->lo_name = name;
929 lock->lo_type = type != NULL ? type : name;
930 if (opts & MTX_QUIET)
931 lock->lo_flags = LO_QUIET;
932 if (opts & MTX_RECURSE)
933 lock->lo_flags |= LO_RECURSABLE;
934 if ((opts & MTX_NOWITNESS) == 0)
935 lock->lo_flags |= LO_WITNESS;
936 if (opts & MTX_DUPOK)
937 lock->lo_flags |= LO_DUPOK;
938
939 m->mtx_lock = MTX_UNOWNED;
940 TAILQ_INIT(&m->mtx_blocked);
941
942 LOCK_LOG_INIT(lock, opts);
943
944 WITNESS_INIT(lock);
945}
946
947/*
948 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
949 * passed in as a flag here because if the corresponding mtx_init() was
950 * called with MTX_QUIET set, then it will already be set in the mutex's
951 * flags.
952 */
953void
954mtx_destroy(struct mtx *m)
955{
956
957 LOCK_LOG_DESTROY(&m->mtx_object, 0);
958
959 if (!mtx_owned(m))
960 MPASS(mtx_unowned(m));
961 else {
962 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
963
964 /* Tell witness this isn't locked to make it happy. */
965 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
966 __LINE__);
967 }
968
969 WITNESS_DESTROY(&m->mtx_object);
970}
971
972/*
973 * Intialize the mutex code and system mutexes. This is called from the MD
974 * startup code prior to mi_startup(). The per-CPU data space needs to be
975 * setup before this is called.
976 */
977void
978mutex_init(void)
979{
980
981 /* Setup thread0 so that mutexes work. */
982 LIST_INIT(&thread0.td_contested);
983
984 /*
985 * Initialize mutexes.
986 */
987 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
988 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
989 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
990 mtx_lock(&Giant);
991}
77
78/*
79 * Lock classes for sleep and spin mutexes.
80 */
81struct lock_class lock_class_mtx_sleep = {
82 "sleep mutex",
83 LC_SLEEPLOCK | LC_RECURSABLE
84};
85struct lock_class lock_class_mtx_spin = {
86 "spin mutex",
87 LC_SPINLOCK | LC_RECURSABLE
88};
89
90/*
91 * System-wide mutexes
92 */
93struct mtx sched_lock;
94struct mtx Giant;
95
96/*
97 * Prototypes for non-exported routines.
98 */
99static void propagate_priority(struct thread *);
100
101static void
102propagate_priority(struct thread *td)
103{
104 int pri = td->td_priority;
105 struct mtx *m = td->td_blocked;
106
107 mtx_assert(&sched_lock, MA_OWNED);
108 for (;;) {
109 struct thread *td1;
110
111 td = mtx_owner(m);
112
113 if (td == NULL) {
114 /*
115 * This really isn't quite right. Really
116 * ought to bump priority of thread that
117 * next acquires the mutex.
118 */
119 MPASS(m->mtx_lock == MTX_CONTESTED);
120 return;
121 }
122
123 MPASS(td->td_proc != NULL);
124 MPASS(td->td_proc->p_magic == P_MAGIC);
125 KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex"));
126 if (td->td_priority <= pri) /* lower is higher priority */
127 return;
128
129
130 /*
131 * If lock holder is actually running, just bump priority.
132 */
133 if (TD_IS_RUNNING(td)) {
134 td->td_priority = pri;
135 return;
136 }
137
138#ifndef SMP
139 /*
140 * For UP, we check to see if td is curthread (this shouldn't
141 * ever happen however as it would mean we are in a deadlock.)
142 */
143 KASSERT(td != curthread, ("Deadlock detected"));
144#endif
145
146 /*
147 * If on run queue move to new run queue, and quit.
148 * XXXKSE this gets a lot more complicated under threads
149 * but try anyhow.
150 */
151 if (TD_ON_RUNQ(td)) {
152 MPASS(td->td_blocked == NULL);
153 sched_prio(td, pri);
154 return;
155 }
156 /*
157 * Adjust for any other cases.
158 */
159 td->td_priority = pri;
160
161 /*
162 * If we aren't blocked on a mutex, we should be.
163 */
164 KASSERT(TD_ON_LOCK(td), (
165 "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
166 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
167 m->mtx_object.lo_name));
168
169 /*
170 * Pick up the mutex that td is blocked on.
171 */
172 m = td->td_blocked;
173 MPASS(m != NULL);
174
175 /*
176 * Check if the thread needs to be moved up on
177 * the blocked chain
178 */
179 if (td == TAILQ_FIRST(&m->mtx_blocked)) {
180 continue;
181 }
182
183 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
184 if (td1->td_priority <= pri) {
185 continue;
186 }
187
188 /*
189 * Remove thread from blocked chain and determine where
190 * it should be moved up to. Since we know that td1 has
191 * a lower priority than td, we know that at least one
192 * thread in the chain has a lower priority and that
193 * td1 will thus not be NULL after the loop.
194 */
195 TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq);
196 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) {
197 MPASS(td1->td_proc->p_magic == P_MAGIC);
198 if (td1->td_priority > pri)
199 break;
200 }
201
202 MPASS(td1 != NULL);
203 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
204 CTR4(KTR_LOCK,
205 "propagate_priority: p %p moved before %p on [%p] %s",
206 td, td1, m, m->mtx_object.lo_name);
207 }
208}
209
210#ifdef MUTEX_PROFILING
211SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
212SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
213static int mutex_prof_enable = 0;
214SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
215 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
216
217struct mutex_prof {
218 const char *name;
219 const char *file;
220 int line;
221 uintmax_t cnt_max;
222 uintmax_t cnt_tot;
223 uintmax_t cnt_cur;
224 struct mutex_prof *next;
225};
226
227/*
228 * mprof_buf is a static pool of profiling records to avoid possible
229 * reentrance of the memory allocation functions.
230 *
231 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
232 */
233#define NUM_MPROF_BUFFERS 1000
234static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
235static int first_free_mprof_buf;
236#define MPROF_HASH_SIZE 1009
237static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
238/* SWAG: sbuf size = avg stat. line size * number of locks */
239#define MPROF_SBUF_SIZE 256 * 400
240
241static int mutex_prof_acquisitions;
242SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
243 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
244static int mutex_prof_records;
245SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
246 &mutex_prof_records, 0, "Number of profiling records");
247static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
248SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
249 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
250static int mutex_prof_rejected;
251SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
252 &mutex_prof_rejected, 0, "Number of rejected profiling records");
253static int mutex_prof_hashsize = MPROF_HASH_SIZE;
254SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
255 &mutex_prof_hashsize, 0, "Hash size");
256static int mutex_prof_collisions = 0;
257SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
258 &mutex_prof_collisions, 0, "Number of hash collisions");
259
260/*
261 * mprof_mtx protects the profiling buffers and the hash.
262 */
263static struct mtx mprof_mtx;
264MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
265
266static u_int64_t
267nanoseconds(void)
268{
269 struct timespec tv;
270
271 nanotime(&tv);
272 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
273}
274
275static int
276dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
277{
278 struct sbuf *sb;
279 int error, i;
280 static int multiplier = 1;
281
282 if (first_free_mprof_buf == 0)
283 return (SYSCTL_OUT(req, "No locking recorded",
284 sizeof("No locking recorded")));
285
286retry_sbufops:
287 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
288 sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
289 "max", "total", "count", "avg", "name");
290 /*
291 * XXX this spinlock seems to be by far the largest perpetrator
292 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
293 * even before I pessimized it further by moving the average
294 * computation here).
295 */
296 mtx_lock_spin(&mprof_mtx);
297 for (i = 0; i < first_free_mprof_buf; ++i) {
298 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
299 mprof_buf[i].cnt_max / 1000,
300 mprof_buf[i].cnt_tot / 1000,
301 mprof_buf[i].cnt_cur,
302 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
303 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
304 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
305 if (sbuf_overflowed(sb)) {
306 mtx_unlock_spin(&mprof_mtx);
307 sbuf_delete(sb);
308 multiplier++;
309 goto retry_sbufops;
310 }
311 }
312 mtx_unlock_spin(&mprof_mtx);
313 sbuf_finish(sb);
314 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
315 sbuf_delete(sb);
316 return (error);
317}
318SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
319 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
320#endif
321
322/*
323 * Function versions of the inlined __mtx_* macros. These are used by
324 * modules and can also be called from assembly language if needed.
325 */
326void
327_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
328{
329
330 MPASS(curthread != NULL);
331 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
332 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
333 file, line));
334 _get_sleep_lock(m, curthread, opts, file, line);
335 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
336 line);
337 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
338#ifdef MUTEX_PROFILING
339 /* don't reset the timer when/if recursing */
340 if (m->mtx_acqtime == 0) {
341 m->mtx_filename = file;
342 m->mtx_lineno = line;
343 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
344 ++mutex_prof_acquisitions;
345 }
346#endif
347}
348
349void
350_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
351{
352
353 MPASS(curthread != NULL);
354 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
355 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
356 file, line));
357 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
358 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
359 line);
360 mtx_assert(m, MA_OWNED);
361#ifdef MUTEX_PROFILING
362 if (m->mtx_acqtime != 0) {
363 static const char *unknown = "(unknown)";
364 struct mutex_prof *mpp;
365 u_int64_t acqtime, now;
366 const char *p, *q;
367 volatile u_int hash;
368
369 now = nanoseconds();
370 acqtime = m->mtx_acqtime;
371 m->mtx_acqtime = 0;
372 if (now <= acqtime)
373 goto out;
374 for (p = m->mtx_filename;
375 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
376 /* nothing */ ;
377 if (p == NULL || *p == '\0')
378 p = unknown;
379 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
380 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
381 mtx_lock_spin(&mprof_mtx);
382 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
383 if (mpp->line == m->mtx_lineno &&
384 strcmp(mpp->file, p) == 0)
385 break;
386 if (mpp == NULL) {
387 /* Just exit if we cannot get a trace buffer */
388 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
389 ++mutex_prof_rejected;
390 goto unlock;
391 }
392 mpp = &mprof_buf[first_free_mprof_buf++];
393 mpp->name = mtx_name(m);
394 mpp->file = p;
395 mpp->line = m->mtx_lineno;
396 mpp->next = mprof_hash[hash];
397 if (mprof_hash[hash] != NULL)
398 ++mutex_prof_collisions;
399 mprof_hash[hash] = mpp;
400 ++mutex_prof_records;
401 }
402 /*
403 * Record if the mutex has been held longer now than ever
404 * before.
405 */
406 if (now - acqtime > mpp->cnt_max)
407 mpp->cnt_max = now - acqtime;
408 mpp->cnt_tot += now - acqtime;
409 mpp->cnt_cur++;
410unlock:
411 mtx_unlock_spin(&mprof_mtx);
412 }
413out:
414#endif
415 _rel_sleep_lock(m, curthread, opts, file, line);
416}
417
418void
419_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
420{
421
422 MPASS(curthread != NULL);
423 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
424 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
425 m->mtx_object.lo_name, file, line));
426#if defined(SMP) || LOCK_DEBUG > 0 || 1
427 _get_spin_lock(m, curthread, opts, file, line);
428#else
429 critical_enter();
430#endif
431 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
432 line);
433 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
434}
435
436void
437_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
438{
439
440 MPASS(curthread != NULL);
441 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
442 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
443 m->mtx_object.lo_name, file, line));
444 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
445 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
446 line);
447 mtx_assert(m, MA_OWNED);
448#if defined(SMP) || LOCK_DEBUG > 0 || 1
449 _rel_spin_lock(m);
450#else
451 critical_exit();
452#endif
453}
454
455/*
456 * The important part of mtx_trylock{,_flags}()
457 * Tries to acquire lock `m.' We do NOT handle recursion here. If this
458 * function is called on a recursed mutex, it will return failure and
459 * will not recursively acquire the lock. You are expected to know what
460 * you are doing.
461 */
462int
463_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
464{
465 int rval;
466
467 MPASS(curthread != NULL);
468
469 rval = _obtain_lock(m, curthread);
470
471 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
472 if (rval)
473 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
474 file, line);
475
476 return (rval);
477}
478
479/*
480 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
481 *
482 * We call this if the lock is either contested (i.e. we need to go to
483 * sleep waiting for it), or if we need to recurse on it.
484 */
485void
486_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
487{
488 struct thread *td = curthread;
489 struct thread *td1;
490#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
491 struct thread *owner;
492#endif
493 uintptr_t v;
494#ifdef KTR
495 int cont_logged = 0;
496#endif
497
498 if (mtx_owned(m)) {
499 m->mtx_recurse++;
500 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
501 if (LOCK_LOG_TEST(&m->mtx_object, opts))
502 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
503 return;
504 }
505
506 if (LOCK_LOG_TEST(&m->mtx_object, opts))
507 CTR4(KTR_LOCK,
508 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
509 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
510
511 while (!_obtain_lock(m, td)) {
512
513 mtx_lock_spin(&sched_lock);
514 v = m->mtx_lock;
515
516 /*
517 * Check if the lock has been released while spinning for
518 * the sched_lock.
519 */
520 if (v == MTX_UNOWNED) {
521 mtx_unlock_spin(&sched_lock);
522#ifdef __i386__
523 ia32_pause();
524#endif
525 continue;
526 }
527
528 /*
529 * The mutex was marked contested on release. This means that
530 * there are threads blocked on it.
531 */
532 if (v == MTX_CONTESTED) {
533 td1 = TAILQ_FIRST(&m->mtx_blocked);
534 MPASS(td1 != NULL);
535 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
536
537 if (td1->td_priority < td->td_priority)
538 td->td_priority = td1->td_priority;
539 mtx_unlock_spin(&sched_lock);
540 return;
541 }
542
543 /*
544 * If the mutex isn't already contested and a failure occurs
545 * setting the contested bit, the mutex was either released
546 * or the state of the MTX_RECURSED bit changed.
547 */
548 if ((v & MTX_CONTESTED) == 0 &&
549 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
550 (void *)(v | MTX_CONTESTED))) {
551 mtx_unlock_spin(&sched_lock);
552#ifdef __i386__
553 ia32_pause();
554#endif
555 continue;
556 }
557
558#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
559 /*
560 * If the current owner of the lock is executing on another
561 * CPU, spin instead of blocking.
562 */
563 owner = (struct thread *)(v & MTX_FLAGMASK);
564 if (m != &Giant && thread_running(owner)) {
565 mtx_unlock_spin(&sched_lock);
566 while (mtx_owner(m) == owner && thread_running(owner)) {
567#ifdef __i386__
568 ia32_pause();
569#endif
570 }
571 continue;
572 }
573#endif /* SMP && ADAPTIVE_MUTEXES */
574
575 /*
576 * We definitely must sleep for this lock.
577 */
578 mtx_assert(m, MA_NOTOWNED);
579
580#ifdef notyet
581 /*
582 * If we're borrowing an interrupted thread's VM context, we
583 * must clean up before going to sleep.
584 */
585 if (td->td_ithd != NULL) {
586 struct ithd *it = td->td_ithd;
587
588 if (it->it_interrupted) {
589 if (LOCK_LOG_TEST(&m->mtx_object, opts))
590 CTR2(KTR_LOCK,
591 "_mtx_lock_sleep: %p interrupted %p",
592 it, it->it_interrupted);
593 intr_thd_fixup(it);
594 }
595 }
596#endif
597
598 /*
599 * Put us on the list of threads blocked on this mutex.
600 */
601 if (TAILQ_EMPTY(&m->mtx_blocked)) {
602 td1 = mtx_owner(m);
603 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
604 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
605 } else {
606 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq)
607 if (td1->td_priority > td->td_priority)
608 break;
609 if (td1)
610 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
611 else
612 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
613 }
614#ifdef KTR
615 if (!cont_logged) {
616 CTR6(KTR_CONTENTION,
617 "contention: %p at %s:%d wants %s, taken by %s:%d",
618 td, file, line, m->mtx_object.lo_name,
619 WITNESS_FILE(&m->mtx_object),
620 WITNESS_LINE(&m->mtx_object));
621 cont_logged = 1;
622 }
623#endif
624
625 /*
626 * Save who we're blocked on.
627 */
628 td->td_blocked = m;
629 td->td_lockname = m->mtx_object.lo_name;
630 TD_SET_LOCK(td);
631 propagate_priority(td);
632
633 if (LOCK_LOG_TEST(&m->mtx_object, opts))
634 CTR3(KTR_LOCK,
635 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
636 m->mtx_object.lo_name);
637
638 td->td_proc->p_stats->p_ru.ru_nvcsw++;
639 mi_switch();
640
641 if (LOCK_LOG_TEST(&m->mtx_object, opts))
642 CTR3(KTR_LOCK,
643 "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
644 td, m, m->mtx_object.lo_name);
645
646 mtx_unlock_spin(&sched_lock);
647 }
648
649#ifdef KTR
650 if (cont_logged) {
651 CTR4(KTR_CONTENTION,
652 "contention end: %s acquired by %p at %s:%d",
653 m->mtx_object.lo_name, td, file, line);
654 }
655#endif
656 return;
657}
658
659/*
660 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
661 *
662 * This is only called if we need to actually spin for the lock. Recursion
663 * is handled inline.
664 */
665void
666_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
667{
668 int i = 0;
669
670 if (LOCK_LOG_TEST(&m->mtx_object, opts))
671 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
672
673 for (;;) {
674 if (_obtain_lock(m, curthread))
675 break;
676
677 /* Give interrupts a chance while we spin. */
678 critical_exit();
679 while (m->mtx_lock != MTX_UNOWNED) {
680 if (i++ < 10000000) {
681#ifdef __i386__
682 ia32_pause();
683#endif
684 continue;
685 }
686 if (i < 60000000)
687 DELAY(1);
688#ifdef DDB
689 else if (!db_active)
690#else
691 else
692#endif
693 panic("spin lock %s held by %p for > 5 seconds",
694 m->mtx_object.lo_name, (void *)m->mtx_lock);
695#ifdef __i386__
696 ia32_pause();
697#endif
698 }
699 critical_enter();
700 }
701
702 if (LOCK_LOG_TEST(&m->mtx_object, opts))
703 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
704
705 return;
706}
707
708/*
709 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
710 *
711 * We are only called here if the lock is recursed or contested (i.e. we
712 * need to wake up a blocked thread).
713 */
714void
715_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
716{
717 struct thread *td, *td1;
718 struct mtx *m1;
719 int pri;
720
721 td = curthread;
722
723 if (mtx_recursed(m)) {
724 if (--(m->mtx_recurse) == 0)
725 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
726 if (LOCK_LOG_TEST(&m->mtx_object, opts))
727 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
728 return;
729 }
730
731 mtx_lock_spin(&sched_lock);
732 if (LOCK_LOG_TEST(&m->mtx_object, opts))
733 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
734
735 td1 = TAILQ_FIRST(&m->mtx_blocked);
736#if defined(SMP) && defined(ADAPTIVE_MUTEXES)
737 if (td1 == NULL) {
738 _release_lock_quick(m);
739 if (LOCK_LOG_TEST(&m->mtx_object, opts))
740 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
741 mtx_unlock_spin(&sched_lock);
742 return;
743 }
744#endif
745 MPASS(td->td_proc->p_magic == P_MAGIC);
746 MPASS(td1->td_proc->p_magic == P_MAGIC);
747
748 TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq);
749
750 if (TAILQ_EMPTY(&m->mtx_blocked)) {
751 LIST_REMOVE(m, mtx_contested);
752 _release_lock_quick(m);
753 if (LOCK_LOG_TEST(&m->mtx_object, opts))
754 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
755 } else
756 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
757
758 pri = PRI_MAX;
759 LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
760 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
761 if (cp < pri)
762 pri = cp;
763 }
764
765 if (pri > td->td_base_pri)
766 pri = td->td_base_pri;
767 td->td_priority = pri;
768
769 if (LOCK_LOG_TEST(&m->mtx_object, opts))
770 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
771 m, td1);
772
773 td1->td_blocked = NULL;
774 TD_CLR_LOCK(td1);
775 if (!TD_CAN_RUN(td1)) {
776 mtx_unlock_spin(&sched_lock);
777 return;
778 }
779 setrunqueue(td1);
780
781 if (td->td_critnest == 1 && td1->td_priority < pri) {
782#ifdef notyet
783 if (td->td_ithd != NULL) {
784 struct ithd *it = td->td_ithd;
785
786 if (it->it_interrupted) {
787 if (LOCK_LOG_TEST(&m->mtx_object, opts))
788 CTR2(KTR_LOCK,
789 "_mtx_unlock_sleep: %p interrupted %p",
790 it, it->it_interrupted);
791 intr_thd_fixup(it);
792 }
793 }
794#endif
795 if (LOCK_LOG_TEST(&m->mtx_object, opts))
796 CTR2(KTR_LOCK,
797 "_mtx_unlock_sleep: %p switching out lock=%p", m,
798 (void *)m->mtx_lock);
799
800 td->td_proc->p_stats->p_ru.ru_nivcsw++;
801 mi_switch();
802 if (LOCK_LOG_TEST(&m->mtx_object, opts))
803 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
804 m, (void *)m->mtx_lock);
805 }
806
807 mtx_unlock_spin(&sched_lock);
808
809 return;
810}
811
812/*
813 * All the unlocking of MTX_SPIN locks is done inline.
814 * See the _rel_spin_lock() macro for the details.
815 */
816
817/*
818 * The backing function for the INVARIANTS-enabled mtx_assert()
819 */
820#ifdef INVARIANT_SUPPORT
821void
822_mtx_assert(struct mtx *m, int what, const char *file, int line)
823{
824
825 if (panicstr != NULL)
826 return;
827 switch (what) {
828 case MA_OWNED:
829 case MA_OWNED | MA_RECURSED:
830 case MA_OWNED | MA_NOTRECURSED:
831 if (!mtx_owned(m))
832 panic("mutex %s not owned at %s:%d",
833 m->mtx_object.lo_name, file, line);
834 if (mtx_recursed(m)) {
835 if ((what & MA_NOTRECURSED) != 0)
836 panic("mutex %s recursed at %s:%d",
837 m->mtx_object.lo_name, file, line);
838 } else if ((what & MA_RECURSED) != 0) {
839 panic("mutex %s unrecursed at %s:%d",
840 m->mtx_object.lo_name, file, line);
841 }
842 break;
843 case MA_NOTOWNED:
844 if (mtx_owned(m))
845 panic("mutex %s owned at %s:%d",
846 m->mtx_object.lo_name, file, line);
847 break;
848 default:
849 panic("unknown mtx_assert at %s:%d", file, line);
850 }
851}
852#endif
853
854/*
855 * The MUTEX_DEBUG-enabled mtx_validate()
856 *
857 * Most of these checks have been moved off into the LO_INITIALIZED flag
858 * maintained by the witness code.
859 */
860#ifdef MUTEX_DEBUG
861
862void mtx_validate(struct mtx *);
863
864void
865mtx_validate(struct mtx *m)
866{
867
868/*
869 * XXX: When kernacc() does not require Giant we can reenable this check
870 */
871#ifdef notyet
872/*
873 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
874 * we can re-enable the kernacc() checks.
875 */
876#ifndef __alpha__
877 /*
878 * Can't call kernacc() from early init386(), especially when
879 * initializing Giant mutex, because some stuff in kernacc()
880 * requires Giant itself.
881 */
882 if (!cold)
883 if (!kernacc((caddr_t)m, sizeof(m),
884 VM_PROT_READ | VM_PROT_WRITE))
885 panic("Can't read and write to mutex %p", m);
886#endif
887#endif
888}
889#endif
890
891/*
892 * General init routine used by the MTX_SYSINIT() macro.
893 */
894void
895mtx_sysinit(void *arg)
896{
897 struct mtx_args *margs = arg;
898
899 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
900}
901
902/*
903 * Mutex initialization routine; initialize lock `m' of type contained in
904 * `opts' with options contained in `opts' and name `name.' The optional
905 * lock type `type' is used as a general lock category name for use with
906 * witness.
907 */
908void
909mtx_init(struct mtx *m, const char *name, const char *type, int opts)
910{
911 struct lock_object *lock;
912
913 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
914 MTX_NOWITNESS | MTX_DUPOK)) == 0);
915
916#ifdef MUTEX_DEBUG
917 /* Diagnostic and error correction */
918 mtx_validate(m);
919#endif
920
921 lock = &m->mtx_object;
922 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
923 ("mutex %s %p already initialized", name, m));
924 bzero(m, sizeof(*m));
925 if (opts & MTX_SPIN)
926 lock->lo_class = &lock_class_mtx_spin;
927 else
928 lock->lo_class = &lock_class_mtx_sleep;
929 lock->lo_name = name;
930 lock->lo_type = type != NULL ? type : name;
931 if (opts & MTX_QUIET)
932 lock->lo_flags = LO_QUIET;
933 if (opts & MTX_RECURSE)
934 lock->lo_flags |= LO_RECURSABLE;
935 if ((opts & MTX_NOWITNESS) == 0)
936 lock->lo_flags |= LO_WITNESS;
937 if (opts & MTX_DUPOK)
938 lock->lo_flags |= LO_DUPOK;
939
940 m->mtx_lock = MTX_UNOWNED;
941 TAILQ_INIT(&m->mtx_blocked);
942
943 LOCK_LOG_INIT(lock, opts);
944
945 WITNESS_INIT(lock);
946}
947
948/*
949 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
950 * passed in as a flag here because if the corresponding mtx_init() was
951 * called with MTX_QUIET set, then it will already be set in the mutex's
952 * flags.
953 */
954void
955mtx_destroy(struct mtx *m)
956{
957
958 LOCK_LOG_DESTROY(&m->mtx_object, 0);
959
960 if (!mtx_owned(m))
961 MPASS(mtx_unowned(m));
962 else {
963 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
964
965 /* Tell witness this isn't locked to make it happy. */
966 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
967 __LINE__);
968 }
969
970 WITNESS_DESTROY(&m->mtx_object);
971}
972
973/*
974 * Intialize the mutex code and system mutexes. This is called from the MD
975 * startup code prior to mi_startup(). The per-CPU data space needs to be
976 * setup before this is called.
977 */
978void
979mutex_init(void)
980{
981
982 /* Setup thread0 so that mutexes work. */
983 LIST_INIT(&thread0.td_contested);
984
985 /*
986 * Initialize mutexes.
987 */
988 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
989 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
990 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
991 mtx_lock(&Giant);
992}