Deleted Added
full compact
subr_turnstile.c (65856) subr_turnstile.c (67352)
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 12 unchanged lines hidden (view full) ---

21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 12 unchanged lines hidden (view full) ---

21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * $FreeBSD: head/sys/kern/subr_turnstile.c 65856 2000-09-14 20:15:16Z jhb $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/subr_turnstile.c 67352 2000-10-20 07:26:37Z jhb $
30 */
31
32/*
33 * Main Entry: witness
34 * Pronunciation: 'wit-n&s
35 * Function: noun
36 * Etymology: Middle English witnesse, from Old English witnes knowledge,
37 * testimony, witness, from 2wit

--- 7 unchanged lines hidden (view full) ---

45 * 5 a : something serving as evidence or proof : SIGN
46 * b : public affirmation by word or example of usually
47 * religious faith or conviction <the heroic witness to divine
48 * life -- Pilot>
49 * 6 capitalized : a member of the Jehovah's Witnesses
50 */
51
52#include <sys/param.h>
31 */
32
33/*
34 * Main Entry: witness
35 * Pronunciation: 'wit-n&s
36 * Function: noun
37 * Etymology: Middle English witnesse, from Old English witnes knowledge,
38 * testimony, witness, from 2wit

--- 7 unchanged lines hidden (view full) ---

46 * 5 a : something serving as evidence or proof : SIGN
47 * b : public affirmation by word or example of usually
48 * religious faith or conviction <the heroic witness to divine
49 * life -- Pilot>
50 * 6 capitalized : a member of the Jehovah's Witnesses
51 */
52
53#include <sys/param.h>
54#include <sys/bus.h>
55#include <sys/kernel.h>
56#include <sys/malloc.h>
53#include <sys/proc.h>
54#include <sys/systm.h>
57#include <sys/proc.h>
58#include <sys/systm.h>
59#include <sys/vmmeter.h>
55#include <sys/ktr.h>
56
60#include <sys/ktr.h>
61
62#include <machine/atomic.h>
63#include <machine/bus.h>
64#include <machine/clock.h>
57#include <machine/cpu.h>
65#include <machine/cpu.h>
66
67#include <vm/vm.h>
68#include <vm/vm_extern.h>
69
58#define _KERN_MUTEX_C_ /* Cause non-inlined mtx_*() to be compiled. */
70#define _KERN_MUTEX_C_ /* Cause non-inlined mtx_*() to be compiled. */
59#include <machine/mutex.h>
71#include <sys/mutex.h>
60
61/*
72
73/*
74 * Machine independent bits of the mutex implementation
75 */
76/* All mutexes in system (used for debug/panic) */
77#ifdef MUTEX_DEBUG
78static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0,
79 "All mutexes queue head" };
80static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, &all_mtx_debug,
81 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
82 { NULL, NULL }, &all_mtx, &all_mtx };
83#else /* MUTEX_DEBUG */
84static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
85 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
86 { NULL, NULL }, &all_mtx, &all_mtx };
87#endif /* MUTEX_DEBUG */
88
89static int mtx_cur_cnt;
90static int mtx_max_cnt;
91
92void _mtx_enter_giant_def(void);
93void _mtx_exit_giant_def(void);
94static void propagate_priority(struct proc *) __unused;
95
96#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
97#define mtx_owner(m) (mtx_unowned(m) ? NULL \
98 : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
99
100#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
101#define SET_PRIO(p, pri) (p)->p_priority = (pri)
102
103/*
104 * XXX Temporary, for use from assembly language
105 */
106
107void
108_mtx_enter_giant_def(void)
109{
110
111 mtx_enter(&Giant, MTX_DEF);
112}
113
114void
115_mtx_exit_giant_def(void)
116{
117
118 mtx_exit(&Giant, MTX_DEF);
119}
120
121static void
122propagate_priority(struct proc *p)
123{
124 int pri = p->p_priority;
125 struct mtx *m = p->p_blocked;
126
127 for (;;) {
128 struct proc *p1;
129
130 p = mtx_owner(m);
131
132 if (p == NULL) {
133 /*
134 * This really isn't quite right. Really
135 * ought to bump priority of process that
136 * next acquires the mutex.
137 */
138 MPASS(m->mtx_lock == MTX_CONTESTED);
139 return;
140 }
141 MPASS(p->p_magic == P_MAGIC);
142 if (p->p_priority <= pri)
143 return;
144 /*
145 * If lock holder is actually running, just bump priority.
146 */
147 if (TAILQ_NEXT(p, p_procq) == NULL) {
148 MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
149 SET_PRIO(p, pri);
150 return;
151 }
152 /*
153 * If on run queue move to new run queue, and
154 * quit.
155 */
156 if (p->p_stat == SRUN) {
157 MPASS(p->p_blocked == NULL);
158 remrunqueue(p);
159 SET_PRIO(p, pri);
160 setrunqueue(p);
161 return;
162 }
163
164 /*
165 * If we aren't blocked on a mutex, give up and quit.
166 */
167 if (p->p_stat != SMTX) {
168 printf(
169 "XXX: process %d(%s):%d holds %s but isn't blocked on a mutex\n",
170 p->p_pid, p->p_comm, p->p_stat, m->mtx_description);
171 return;
172 }
173
174 /*
175 * Pick up the mutex that p is blocked on.
176 */
177 m = p->p_blocked;
178 MPASS(m != NULL);
179
180 printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
181 p->p_comm, m->mtx_description);
182 /*
183 * Check if the proc needs to be moved up on
184 * the blocked chain
185 */
186 if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
187 p1->p_priority <= pri) {
188 if (p1)
189 printf(
190 "XXX: previous process %d(%s) has higher priority\n",
191 p->p_pid, p->p_comm);
192 else
193 printf("XXX: process at head of run queue\n");
194 continue;
195 }
196
197 /*
198 * Remove proc from blocked chain
199 */
200 TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
201 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
202 MPASS(p1->p_magic == P_MAGIC);
203 if (p1->p_priority > pri)
204 break;
205 }
206 if (p1)
207 TAILQ_INSERT_BEFORE(p1, p, p_procq);
208 else
209 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
210 CTR4(KTR_LOCK,
211 "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
212 p, p1, m, m->mtx_description);
213 }
214}
215
216void
217mtx_enter_hard(struct mtx *m, int type, int saveintr)
218{
219 struct proc *p = CURPROC;
220 struct timeval new_switchtime;
221
222 KASSERT(p != NULL, ("curproc is NULL in mutex"));
223
224 switch (type) {
225 case MTX_DEF:
226 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
227 m->mtx_recurse++;
228 atomic_set_ptr(&m->mtx_lock, MTX_RECURSE);
229 CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
230 return;
231 }
232 CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%p) [0x%p]",
233 m, m->mtx_lock, RETIP(m));
234 while (!_obtain_lock(m, p)) {
235 int v;
236 struct proc *p1;
237
238 mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
239 /*
240 * check if the lock has been released while
241 * waiting for the schedlock.
242 */
243 if ((v = m->mtx_lock) == MTX_UNOWNED) {
244 mtx_exit(&sched_lock, MTX_SPIN);
245 continue;
246 }
247 /*
248 * The mutex was marked contested on release. This
249 * means that there are processes blocked on it.
250 */
251 if (v == MTX_CONTESTED) {
252 p1 = TAILQ_FIRST(&m->mtx_blocked);
253 KASSERT(p1 != NULL, ("contested mutex has no contesters"));
254 KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
255 m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
256 if (p1->p_priority < p->p_priority) {
257 SET_PRIO(p, p1->p_priority);
258 }
259 mtx_exit(&sched_lock, MTX_SPIN);
260 return;
261 }
262 /*
263 * If the mutex isn't already contested and
264 * a failure occurs setting the contested bit the
265 * mutex was either release or the
266 * state of the RECURSION bit changed.
267 */
268 if ((v & MTX_CONTESTED) == 0 &&
269 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
270 (void *)(v | MTX_CONTESTED))) {
271 mtx_exit(&sched_lock, MTX_SPIN);
272 continue;
273 }
274
275 /* We definitely have to sleep for this lock */
276 mtx_assert(m, MA_NOTOWNED);
277
278#ifdef notyet
279 /*
280 * If we're borrowing an interrupted thread's VM
281 * context must clean up before going to sleep.
282 */
283 if (p->p_flag & (P_ITHD | P_SITHD)) {
284 ithd_t *it = (ithd_t *)p;
285
286 if (it->it_interrupted) {
287 CTR2(KTR_LOCK,
288 "mtx_enter: 0x%x interrupted 0x%x",
289 it, it->it_interrupted);
290 intr_thd_fixup(it);
291 }
292 }
293#endif
294
295 /* Put us on the list of procs blocked on this mutex */
296 if (TAILQ_EMPTY(&m->mtx_blocked)) {
297 p1 = (struct proc *)(m->mtx_lock &
298 MTX_FLAGMASK);
299 LIST_INSERT_HEAD(&p1->p_contested, m,
300 mtx_contested);
301 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
302 } else {
303 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
304 if (p1->p_priority > p->p_priority)
305 break;
306 if (p1)
307 TAILQ_INSERT_BEFORE(p1, p, p_procq);
308 else
309 TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
310 p_procq);
311 }
312
313 p->p_blocked = m; /* Who we're blocked on */
314 p->p_stat = SMTX;
315#if 0
316 propagate_priority(p);
317#endif
318 CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
319 p, m, m->mtx_description);
320 /*
321 * Blatantly copied from mi_switch nearly verbatim.
322 * When Giant goes away and we stop dinking with it
323 * in mi_switch, we can go back to calling mi_switch
324 * directly here.
325 */
326
327 /*
328 * Compute the amount of time during which the current
329 * process was running, and add that to its total so
330 * far.
331 */
332 microuptime(&new_switchtime);
333 if (timevalcmp(&new_switchtime, &switchtime, <)) {
334 printf(
335 "microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
336 switchtime.tv_sec, switchtime.tv_usec,
337 new_switchtime.tv_sec,
338 new_switchtime.tv_usec);
339 new_switchtime = switchtime;
340 } else {
341 p->p_runtime += (new_switchtime.tv_usec -
342 switchtime.tv_usec) +
343 (new_switchtime.tv_sec - switchtime.tv_sec) *
344 (int64_t)1000000;
345 }
346
347 /*
348 * Pick a new current process and record its start time.
349 */
350 cnt.v_swtch++;
351 switchtime = new_switchtime;
352 cpu_switch();
353 if (switchtime.tv_sec == 0)
354 microuptime(&switchtime);
355 switchticks = ticks;
356 CTR3(KTR_LOCK,
357 "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
358 p, m, m->mtx_description);
359 mtx_exit(&sched_lock, MTX_SPIN);
360 }
361 return;
362 case MTX_SPIN:
363 case MTX_SPIN | MTX_FIRST:
364 case MTX_SPIN | MTX_TOPHALF:
365 {
366 int i = 0;
367
368 if (m->mtx_lock == (uintptr_t)p) {
369 m->mtx_recurse++;
370 return;
371 }
372 CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
373 for (;;) {
374 if (_obtain_lock(m, p))
375 break;
376 while (m->mtx_lock != MTX_UNOWNED) {
377 if (i++ < 1000000)
378 continue;
379 if (i++ < 6000000)
380 DELAY (1);
381#ifdef DDB
382 else if (!db_active)
383#else
384 else
385#endif
386 panic(
387 "spin lock %s held by 0x%p for > 5 seconds",
388 m->mtx_description,
389 (void *)m->mtx_lock);
390 }
391 }
392
393#ifdef MUTEX_DEBUG
394 if (type != MTX_SPIN)
395 m->mtx_saveintr = 0xbeefface;
396 else
397#endif
398 m->mtx_saveintr = saveintr;
399 CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
400 return;
401 }
402 }
403}
404
405void
406mtx_exit_hard(struct mtx *m, int type)
407{
408 struct proc *p, *p1;
409 struct mtx *m1;
410 int pri;
411
412 p = CURPROC;
413 switch (type) {
414 case MTX_DEF:
415 case MTX_DEF | MTX_NOSWITCH:
416 if (m->mtx_recurse != 0) {
417 if (--(m->mtx_recurse) == 0)
418 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSE);
419 CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
420 return;
421 }
422 mtx_enter(&sched_lock, MTX_SPIN);
423 CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
424 p1 = TAILQ_FIRST(&m->mtx_blocked);
425 MPASS(p->p_magic == P_MAGIC);
426 MPASS(p1->p_magic == P_MAGIC);
427 TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
428 if (TAILQ_EMPTY(&m->mtx_blocked)) {
429 LIST_REMOVE(m, mtx_contested);
430 _release_lock_quick(m);
431 CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
432 } else
433 m->mtx_lock = MTX_CONTESTED;
434 pri = MAXPRI;
435 LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
436 int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
437 if (cp < pri)
438 pri = cp;
439 }
440 if (pri > p->p_nativepri)
441 pri = p->p_nativepri;
442 SET_PRIO(p, pri);
443 CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
444 m, p1);
445 p1->p_blocked = NULL;
446 p1->p_stat = SRUN;
447 setrunqueue(p1);
448 if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
449#ifdef notyet
450 if (p->p_flag & (P_ITHD | P_SITHD)) {
451 ithd_t *it = (ithd_t *)p;
452
453 if (it->it_interrupted) {
454 CTR2(KTR_LOCK,
455 "mtx_exit: 0x%x interruped 0x%x",
456 it, it->it_interrupted);
457 intr_thd_fixup(it);
458 }
459 }
460#endif
461 setrunqueue(p);
462 CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%p",
463 m, m->mtx_lock);
464 mi_switch();
465 CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%p",
466 m, m->mtx_lock);
467 }
468 mtx_exit(&sched_lock, MTX_SPIN);
469 break;
470 case MTX_SPIN:
471 case MTX_SPIN | MTX_FIRST:
472 if (m->mtx_recurse != 0) {
473 m->mtx_recurse--;
474 return;
475 }
476 MPASS(mtx_owned(m));
477 _release_lock_quick(m);
478 if (type & MTX_FIRST)
479 enable_intr(); /* XXX is this kosher? */
480 else {
481 MPASS(m->mtx_saveintr != 0xbeefface);
482 restore_intr(m->mtx_saveintr);
483 }
484 break;
485 case MTX_SPIN | MTX_TOPHALF:
486 if (m->mtx_recurse != 0) {
487 m->mtx_recurse--;
488 return;
489 }
490 MPASS(mtx_owned(m));
491 _release_lock_quick(m);
492 break;
493 default:
494 panic("mtx_exit_hard: unsupported type 0x%x\n", type);
495 }
496}
497
498#define MV_DESTROY 0 /* validate before destory */
499#define MV_INIT 1 /* validate before init */
500
501#ifdef MUTEX_DEBUG
502
503int mtx_validate __P((struct mtx *, int));
504
505int
506mtx_validate(struct mtx *m, int when)
507{
508 struct mtx *mp;
509 int i;
510 int retval = 0;
511
512 if (m == &all_mtx || cold)
513 return 0;
514
515 mtx_enter(&all_mtx, MTX_DEF);
516/*
517 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
518 * we can re-enable the kernacc() checks.
519 */
520#ifndef __alpha__
521 MPASS(kernacc((caddr_t)all_mtx.mtx_next, sizeof(uintptr_t),
522 VM_PROT_READ) == 1);
523#endif
524 MPASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
525 for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
526#ifndef __alpha__
527 if (kernacc((caddr_t)mp->mtx_next, sizeof(uintptr_t),
528 VM_PROT_READ) != 1) {
529 panic("mtx_validate: mp=%p mp->mtx_next=%p",
530 mp, mp->mtx_next);
531 }
532#endif
533 i++;
534 if (i > mtx_cur_cnt) {
535 panic("mtx_validate: too many in chain, known=%d\n",
536 mtx_cur_cnt);
537 }
538 }
539 MPASS(i == mtx_cur_cnt);
540 switch (when) {
541 case MV_DESTROY:
542 for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
543 if (mp == m)
544 break;
545 MPASS(mp == m);
546 break;
547 case MV_INIT:
548 for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
549 if (mp == m) {
550 /*
551 * Not good. This mutex already exists.
552 */
553 printf("re-initing existing mutex %s\n",
554 m->mtx_description);
555 MPASS(m->mtx_lock == MTX_UNOWNED);
556 retval = 1;
557 }
558 }
559 mtx_exit(&all_mtx, MTX_DEF);
560 return (retval);
561}
562#endif
563
564void
565mtx_init(struct mtx *m, const char *t, int flag)
566{
567#ifdef MUTEX_DEBUG
568 struct mtx_debug *debug;
569#endif
570
571 CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
572#ifdef MUTEX_DEBUG
573 if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
574 return;
575 if (flag & MTX_COLD)
576 debug = m->mtx_debug;
577 else
578 debug = NULL;
579 if (debug == NULL) {
580#ifdef DIAGNOSTIC
581 if(cold && bootverbose)
582 printf("malloc'ing mtx_debug while cold for %s\n", t);
583#endif
584
585 /* XXX - should not use DEVBUF */
586 debug = malloc(sizeof(struct mtx_debug), M_DEVBUF, M_NOWAIT);
587 MPASS(debug != NULL);
588 bzero(debug, sizeof(struct mtx_debug));
589 }
590#endif
591 bzero((void *)m, sizeof *m);
592 TAILQ_INIT(&m->mtx_blocked);
593#ifdef MUTEX_DEBUG
594 m->mtx_debug = debug;
595#endif
596 m->mtx_description = t;
597 m->mtx_lock = MTX_UNOWNED;
598 /* Put on all mutex queue */
599 mtx_enter(&all_mtx, MTX_DEF);
600 m->mtx_next = &all_mtx;
601 m->mtx_prev = all_mtx.mtx_prev;
602 m->mtx_prev->mtx_next = m;
603 all_mtx.mtx_prev = m;
604 if (++mtx_cur_cnt > mtx_max_cnt)
605 mtx_max_cnt = mtx_cur_cnt;
606 mtx_exit(&all_mtx, MTX_DEF);
607 witness_init(m, flag);
608}
609
610void
611mtx_destroy(struct mtx *m)
612{
613
614 CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
615#ifdef MUTEX_DEBUG
616 if (m->mtx_next == NULL)
617 panic("mtx_destroy: %p (%s) already destroyed",
618 m, m->mtx_description);
619
620 if (!mtx_owned(m)) {
621 MPASS(m->mtx_lock == MTX_UNOWNED);
622 } else {
623 MPASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
624 }
625 mtx_validate(m, MV_DESTROY); /* diagnostic */
626#endif
627
628#ifdef WITNESS
629 if (m->mtx_witness)
630 witness_destroy(m);
631#endif /* WITNESS */
632
633 /* Remove from the all mutex queue */
634 mtx_enter(&all_mtx, MTX_DEF);
635 m->mtx_next->mtx_prev = m->mtx_prev;
636 m->mtx_prev->mtx_next = m->mtx_next;
637#ifdef MUTEX_DEBUG
638 m->mtx_next = m->mtx_prev = NULL;
639 free(m->mtx_debug, M_DEVBUF);
640 m->mtx_debug = NULL;
641#endif
642 mtx_cur_cnt--;
643 mtx_exit(&all_mtx, MTX_DEF);
644}
645
646/*
62 * The non-inlined versions of the mtx_*() functions are always built (above),
647 * The non-inlined versions of the mtx_*() functions are always built (above),
63 * but the witness code depends on the SMP_DEBUG and WITNESS kernel options
648 * but the witness code depends on the MUTEX_DEBUG and WITNESS kernel options
64 * being specified.
65 */
649 * being specified.
650 */
66#if (defined(SMP_DEBUG) && defined(WITNESS))
651#if (defined(MUTEX_DEBUG) && defined(WITNESS))
67
68#define WITNESS_COUNT 200
69#define WITNESS_NCHILDREN 2
70
71#ifndef WITNESS
72#define WITNESS 0 /* default off */
73#endif
74

--- 226 unchanged lines hidden (view full) ---

301 goto out;
302 }
303 if (isitmydescendant(m1->mtx_witness, w)) {
304 mtx_exit(&w_mtx, MTX_SPIN);
305 goto out;
306 }
307 for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
308
652
653#define WITNESS_COUNT 200
654#define WITNESS_NCHILDREN 2
655
656#ifndef WITNESS
657#define WITNESS 0 /* default off */
658#endif
659

--- 226 unchanged lines hidden (view full) ---

886 goto out;
887 }
888 if (isitmydescendant(m1->mtx_witness, w)) {
889 mtx_exit(&w_mtx, MTX_SPIN);
890 goto out;
891 }
892 for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
893
309 ASS(i < 200);
894 MPASS(i < 200);
310 w1 = m1->mtx_witness;
311 if (isitmydescendant(w, w1)) {
312 mtx_exit(&w_mtx, MTX_SPIN);
313 if (blessed(w, w1))
314 goto out;
315 if (m1 == &Giant) {
316 if (w1->w_Giant_squawked)
317 goto out;

--- 32 unchanged lines hidden (view full) ---

350 m->mtx_line = line;
351 m->mtx_file = file;
352
353 /*
354 * If this pays off it likely means that a mutex being witnessed
355 * is acquired in hardclock. Put it in the ignore list. It is
356 * likely not the mutex this assert fails on.
357 */
895 w1 = m1->mtx_witness;
896 if (isitmydescendant(w, w1)) {
897 mtx_exit(&w_mtx, MTX_SPIN);
898 if (blessed(w, w1))
899 goto out;
900 if (m1 == &Giant) {
901 if (w1->w_Giant_squawked)
902 goto out;

--- 32 unchanged lines hidden (view full) ---

935 m->mtx_line = line;
936 m->mtx_file = file;
937
938 /*
939 * If this pays off it likely means that a mutex being witnessed
940 * is acquired in hardclock. Put it in the ignore list. It is
941 * likely not the mutex this assert fails on.
942 */
358 ASS(m->mtx_held.le_prev == NULL);
943 MPASS(m->mtx_held.le_prev == NULL);
359 LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
360}
361
362void
363witness_exit(struct mtx *m, int flags, const char *file, int line)
364{
365 struct witness *w;
366

--- 50 unchanged lines hidden (view full) ---

417 if (m->mtx_recurse != 0)
418 return;
419
420 w->w_file = file;
421 w->w_line = line;
422 m->mtx_line = line;
423 m->mtx_file = file;
424 p = CURPROC;
944 LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
945}
946
947void
948witness_exit(struct mtx *m, int flags, const char *file, int line)
949{
950 struct witness *w;
951

--- 50 unchanged lines hidden (view full) ---

1002 if (m->mtx_recurse != 0)
1003 return;
1004
1005 w->w_file = file;
1006 w->w_line = line;
1007 m->mtx_line = line;
1008 m->mtx_file = file;
1009 p = CURPROC;
425 ASS(m->mtx_held.le_prev == NULL);
1010 MPASS(m->mtx_held.le_prev == NULL);
426 LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
427}
428
429void
430witness_display(void(*prnt)(const char *fmt, ...))
431{
432 struct witness *w, *w1;
433

--- 125 unchanged lines hidden (view full) ---

559 while (parent->w_morechildren)
560 parent = parent->w_morechildren;
561
562 if (parent->w_childcnt == WITNESS_NCHILDREN) {
563 if ((parent->w_morechildren = witness_get()) == NULL)
564 return (1);
565 parent = parent->w_morechildren;
566 }
1011 LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
1012}
1013
1014void
1015witness_display(void(*prnt)(const char *fmt, ...))
1016{
1017 struct witness *w, *w1;
1018

--- 125 unchanged lines hidden (view full) ---

1144 while (parent->w_morechildren)
1145 parent = parent->w_morechildren;
1146
1147 if (parent->w_childcnt == WITNESS_NCHILDREN) {
1148 if ((parent->w_morechildren = witness_get()) == NULL)
1149 return (1);
1150 parent = parent->w_morechildren;
1151 }
567 ASS(child != NULL);
1152 MPASS(child != NULL);
568 parent->w_children[parent->w_childcnt++] = child;
569 /*
570 * now prune whole tree
571 */
572 if (recursed)
573 return (0);
574 recursed = 1;
575 for (child = w_all; child != NULL; child = child->w_next) {

--- 22 unchanged lines hidden (view full) ---

598 for (i = 0; i < w->w_childcnt; i++)
599 if (w->w_children[i] == child)
600 goto found;
601 return;
602found:
603 for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren)
604 continue;
605 w->w_children[i] = w1->w_children[--w1->w_childcnt];
1153 parent->w_children[parent->w_childcnt++] = child;
1154 /*
1155 * now prune whole tree
1156 */
1157 if (recursed)
1158 return (0);
1159 recursed = 1;
1160 for (child = w_all; child != NULL; child = child->w_next) {

--- 22 unchanged lines hidden (view full) ---

1183 for (i = 0; i < w->w_childcnt; i++)
1184 if (w->w_children[i] == child)
1185 goto found;
1186 return;
1187found:
1188 for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren)
1189 continue;
1190 w->w_children[i] = w1->w_children[--w1->w_childcnt];
606 ASS(w->w_children[i] != NULL);
1191 MPASS(w->w_children[i] != NULL);
607
608 if (w1->w_childcnt != 0)
609 return;
610
611 if (w1 == parent)
612 return;
613 for (w = parent; w->w_morechildren != w1; w = w->w_morechildren)
614 continue;

--- 19 unchanged lines hidden (view full) ---

634static int
635isitmydescendant(struct witness *parent, struct witness *child)
636{
637 struct witness *w;
638 int i;
639 int j;
640
641 for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) {
1192
1193 if (w1->w_childcnt != 0)
1194 return;
1195
1196 if (w1 == parent)
1197 return;
1198 for (w = parent; w->w_morechildren != w1; w = w->w_morechildren)
1199 continue;

--- 19 unchanged lines hidden (view full) ---

1219static int
1220isitmydescendant(struct witness *parent, struct witness *child)
1221{
1222 struct witness *w;
1223 int i;
1224 int j;
1225
1226 for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) {
642 ASS(j < 1000);
1227 MPASS(j < 1000);
643 for (i = 0; i < w->w_childcnt; i++) {
644 if (w->w_children[i] == child)
645 return (1);
646 }
647 for (i = 0; i < w->w_childcnt; i++) {
648 if (isitmydescendant(w->w_children[i], child))
649 return (1);
650 }

--- 139 unchanged lines hidden (view full) ---

790
791void
792witness_restore(struct mtx *m, const char *file, int line)
793{
794 m->mtx_witness->w_file = file;
795 m->mtx_witness->w_line = line;
796}
797
1228 for (i = 0; i < w->w_childcnt; i++) {
1229 if (w->w_children[i] == child)
1230 return (1);
1231 }
1232 for (i = 0; i < w->w_childcnt; i++) {
1233 if (isitmydescendant(w->w_children[i], child))
1234 return (1);
1235 }

--- 139 unchanged lines hidden (view full) ---

1375
1376void
1377witness_restore(struct mtx *m, const char *file, int line)
1378{
1379 m->mtx_witness->w_file = file;
1380 m->mtx_witness->w_line = line;
1381}
1382
798#endif /* (defined(SMP_DEBUG) && defined(WITNESS)) */
1383#endif /* (defined(MUTEX_DEBUG) && defined(WITNESS)) */