subr_witness.c revision 71320
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/subr_witness.c 71320 2001-01-21 07:52:20Z jasone $ 31 */ 32 33/* 34 * Main Entry: witness 35 * Pronunciation: 'wit-n&s 36 * Function: noun 37 * Etymology: Middle English witnesse, from Old English witnes knowledge, 38 * testimony, witness, from 2wit 39 * Date: before 12th century 40 * 1 : attestation of a fact or event : TESTIMONY 41 * 2 : one that gives evidence; specifically : one who testifies in 42 * a cause or before a judicial tribunal 43 * 3 : one asked to be present at a transaction so as to be able to 44 * testify to its having taken place 45 * 4 : one who has personal knowledge of something 46 * 5 a : something serving as evidence or proof : SIGN 47 * b : public affirmation by word or example of usually 48 * religious faith or conviction <the heroic witness to divine 49 * life -- Pilot> 50 * 6 capitalized : a member of the Jehovah's Witnesses 51 */ 52 53#include "opt_ddb.h" 54#include "opt_witness.h" 55 56/* 57 * Cause non-inlined mtx_*() to be compiled. 58 * Must be defined early because other system headers may include mutex.h. 59 */ 60#define _KERN_MUTEX_C_ 61 62#include <sys/param.h> 63#include <sys/bus.h> 64#include <sys/kernel.h> 65#include <sys/malloc.h> 66#include <sys/proc.h> 67#include <sys/sysctl.h> 68#include <sys/systm.h> 69#include <sys/vmmeter.h> 70#include <sys/ktr.h> 71 72#include <machine/atomic.h> 73#include <machine/bus.h> 74#include <machine/clock.h> 75#include <machine/cpu.h> 76 77#include <ddb/ddb.h> 78 79#include <vm/vm.h> 80#include <vm/vm_extern.h> 81 82#include <sys/mutex.h> 83 84/* 85 * Machine independent bits of the mutex implementation 86 */ 87/* All mutexes in system (used for debug/panic) */ 88#ifdef WITNESS 89static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0, 90 "All mutexes queue head" }; 91static struct mtx all_mtx = { 0, MTX_UNOWNED, 0, 0, {&all_mtx_debug}, 92 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked), 93 { NULL, NULL }, &all_mtx, &all_mtx }; 94/* 95 * Set to 0 once mutexes have been fully initialized so that witness code can be 96 * safely executed. 97 */ 98static int witness_cold = 1; 99#else /* WITNESS */ 100static struct mtx all_mtx = { 0, MTX_UNOWNED, 0, 0, {"All mutexes queue head"}, 101 TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked), 102 { NULL, NULL }, &all_mtx, &all_mtx }; 103#endif /* WITNESS */ 104 105static int mtx_cur_cnt; 106static int mtx_max_cnt; 107 108static void propagate_priority(struct proc *); 109 110#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 111#define mtx_owner(m) (mtx_unowned(m) ? NULL \ 112 : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK)) 113 114#define RETIP(x) *(((uintptr_t *)(&x)) - 1) 115#define SET_PRIO(p, pri) (p)->p_priority = (pri) 116 117static void 118propagate_priority(struct proc *p) 119{ 120 int pri = p->p_priority; 121 struct mtx *m = p->p_blocked; 122 123 mtx_assert(&sched_lock, MA_OWNED); 124 for (;;) { 125 struct proc *p1; 126 127 p = mtx_owner(m); 128 129 if (p == NULL) { 130 /* 131 * This really isn't quite right. Really 132 * ought to bump priority of process that 133 * next acquires the mutex. 134 */ 135 MPASS(m->mtx_lock == MTX_CONTESTED); 136 return; 137 } 138 MPASS(p->p_magic == P_MAGIC); 139 KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex")); 140 if (p->p_priority <= pri) 141 return; 142 143 /* 144 * Bump this process' priority. 145 */ 146 SET_PRIO(p, pri); 147 148 /* 149 * If lock holder is actually running, just bump priority. 150 */ 151#ifdef SMP 152 /* 153 * For SMP, we can check the p_oncpu field to see if we are 154 * running. 155 */ 156 if (p->p_oncpu != 0xff) { 157 MPASS(p->p_stat == SRUN || p->p_stat == SZOMB); 158 return; 159 } 160#else 161 /* 162 * For UP, we check to see if p is curproc (this shouldn't 163 * ever happen however as it would mean we are in a deadlock.) 164 */ 165 if (p == curproc) { 166 panic("Deadlock detected"); 167 return; 168 } 169#endif 170 /* 171 * If on run queue move to new run queue, and 172 * quit. 173 */ 174 if (p->p_stat == SRUN) { 175 printf("XXX: moving process %d(%s) to a new run queue\n", 176 p->p_pid, p->p_comm); 177 MPASS(p->p_blocked == NULL); 178 remrunqueue(p); 179 setrunqueue(p); 180 return; 181 } 182 183 /* 184 * If we aren't blocked on a mutex, we should be. 185 */ 186 KASSERT(p->p_stat == SMTX, ( 187 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 188 p->p_pid, p->p_comm, p->p_stat, 189 m->mtx_description)); 190 191 /* 192 * Pick up the mutex that p is blocked on. 193 */ 194 m = p->p_blocked; 195 MPASS(m != NULL); 196 197 printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid, 198 p->p_comm, m->mtx_description); 199 /* 200 * Check if the proc needs to be moved up on 201 * the blocked chain 202 */ 203 if (p == TAILQ_FIRST(&m->mtx_blocked)) { 204 printf("XXX: process at head of run queue\n"); 205 continue; 206 } 207 p1 = TAILQ_PREV(p, rq, p_procq); 208 if (p1->p_priority <= pri) { 209 printf( 210 "XXX: previous process %d(%s) has higher priority\n", 211 p->p_pid, p->p_comm); 212 continue; 213 } 214 215 /* 216 * Remove proc from blocked chain and determine where 217 * it should be moved up to. Since we know that p1 has 218 * a lower priority than p, we know that at least one 219 * process in the chain has a lower priority and that 220 * p1 will thus not be NULL after the loop. 221 */ 222 TAILQ_REMOVE(&m->mtx_blocked, p, p_procq); 223 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) { 224 MPASS(p1->p_magic == P_MAGIC); 225 if (p1->p_priority > pri) 226 break; 227 } 228 MPASS(p1 != NULL); 229 TAILQ_INSERT_BEFORE(p1, p, p_procq); 230 CTR4(KTR_LOCK, 231 "propagate_priority: p 0x%p moved before 0x%p on [0x%p] %s", 232 p, p1, m, m->mtx_description); 233 } 234} 235 236void 237mtx_enter_hard(struct mtx *m, int type, int saveintr) 238{ 239 struct proc *p = CURPROC; 240 241 KASSERT(p != NULL, ("curproc is NULL in mutex")); 242 243 switch (type) { 244 case MTX_DEF: 245 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) { 246 m->mtx_recurse++; 247 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 248 if ((type & MTX_QUIET) == 0) 249 CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m); 250 return; 251 } 252 if ((type & MTX_QUIET) == 0) 253 CTR3(KTR_LOCK, 254 "mtx_enter: 0x%p contested (lock=%p) [0x%p]", 255 m, (void *)m->mtx_lock, (void *)RETIP(m)); 256 257 /* 258 * Save our priority. Even though p_nativepri is protected 259 * by sched_lock, we don't obtain it here as it can be 260 * expensive. Since this is the only place p_nativepri is 261 * set, and since two CPUs will not be executing the same 262 * process concurrently, we know that no other CPU is going 263 * to be messing with this. Also, p_nativepri is only read 264 * when we are blocked on a mutex, so that can't be happening 265 * right now either. 266 */ 267 p->p_nativepri = p->p_priority; 268 while (!_obtain_lock(m, p)) { 269 uintptr_t v; 270 struct proc *p1; 271 272 mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY); 273 /* 274 * check if the lock has been released while 275 * waiting for the schedlock. 276 */ 277 if ((v = m->mtx_lock) == MTX_UNOWNED) { 278 mtx_exit(&sched_lock, MTX_SPIN); 279 continue; 280 } 281 /* 282 * The mutex was marked contested on release. This 283 * means that there are processes blocked on it. 284 */ 285 if (v == MTX_CONTESTED) { 286 p1 = TAILQ_FIRST(&m->mtx_blocked); 287 KASSERT(p1 != NULL, ("contested mutex has no contesters")); 288 KASSERT(p != NULL, ("curproc is NULL for contested mutex")); 289 m->mtx_lock = (uintptr_t)p | MTX_CONTESTED; 290 if (p1->p_priority < p->p_priority) { 291 SET_PRIO(p, p1->p_priority); 292 } 293 mtx_exit(&sched_lock, MTX_SPIN); 294 return; 295 } 296 /* 297 * If the mutex isn't already contested and 298 * a failure occurs setting the contested bit the 299 * mutex was either release or the 300 * state of the RECURSION bit changed. 301 */ 302 if ((v & MTX_CONTESTED) == 0 && 303 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 304 (void *)(v | MTX_CONTESTED))) { 305 mtx_exit(&sched_lock, MTX_SPIN); 306 continue; 307 } 308 309 /* We definitely have to sleep for this lock */ 310 mtx_assert(m, MA_NOTOWNED); 311 312#ifdef notyet 313 /* 314 * If we're borrowing an interrupted thread's VM 315 * context must clean up before going to sleep. 316 */ 317 if (p->p_flag & (P_ITHD | P_SITHD)) { 318 ithd_t *it = (ithd_t *)p; 319 320 if (it->it_interrupted) { 321 if ((type & MTX_QUIET) == 0) 322 CTR2(KTR_LOCK, 323 "mtx_enter: 0x%x interrupted 0x%x", 324 it, it->it_interrupted); 325 intr_thd_fixup(it); 326 } 327 } 328#endif 329 330 /* Put us on the list of procs blocked on this mutex */ 331 if (TAILQ_EMPTY(&m->mtx_blocked)) { 332 p1 = (struct proc *)(m->mtx_lock & 333 MTX_FLAGMASK); 334 LIST_INSERT_HEAD(&p1->p_contested, m, 335 mtx_contested); 336 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq); 337 } else { 338 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) 339 if (p1->p_priority > p->p_priority) 340 break; 341 if (p1) 342 TAILQ_INSERT_BEFORE(p1, p, p_procq); 343 else 344 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, 345 p_procq); 346 } 347 348 p->p_blocked = m; /* Who we're blocked on */ 349 p->p_mtxname = m->mtx_description; 350 p->p_stat = SMTX; 351#if 0 352 propagate_priority(p); 353#endif 354 if ((type & MTX_QUIET) == 0) 355 CTR3(KTR_LOCK, 356 "mtx_enter: p 0x%p blocked on [0x%p] %s", 357 p, m, m->mtx_description); 358 mi_switch(); 359 if ((type & MTX_QUIET) == 0) 360 CTR3(KTR_LOCK, 361 "mtx_enter: p 0x%p free from blocked on [0x%p] %s", 362 p, m, m->mtx_description); 363 mtx_exit(&sched_lock, MTX_SPIN); 364 } 365 return; 366 case MTX_SPIN: 367 case MTX_SPIN | MTX_FIRST: 368 case MTX_SPIN | MTX_TOPHALF: 369 { 370 int i = 0; 371 372 if (m->mtx_lock == (uintptr_t)p) { 373 m->mtx_recurse++; 374 return; 375 } 376 if ((type & MTX_QUIET) == 0) 377 CTR1(KTR_LOCK, "mtx_enter: %p spinning", m); 378 for (;;) { 379 if (_obtain_lock(m, p)) 380 break; 381 while (m->mtx_lock != MTX_UNOWNED) { 382 if (i++ < 1000000) 383 continue; 384 if (i++ < 6000000) 385 DELAY (1); 386#ifdef DDB 387 else if (!db_active) 388#else 389 else 390#endif 391 panic( 392 "spin lock %s held by 0x%p for > 5 seconds", 393 m->mtx_description, 394 (void *)m->mtx_lock); 395 } 396 } 397 398#ifdef MUTEX_DEBUG 399 if (type != MTX_SPIN) 400 m->mtx_saveintr = 0xbeefface; 401 else 402#endif 403 m->mtx_saveintr = saveintr; 404 if ((type & MTX_QUIET) == 0) 405 CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m); 406 return; 407 } 408 } 409} 410 411void 412mtx_exit_hard(struct mtx *m, int type) 413{ 414 struct proc *p, *p1; 415 struct mtx *m1; 416 int pri; 417 418 p = CURPROC; 419 switch (type) { 420 case MTX_DEF: 421 case MTX_DEF | MTX_NOSWITCH: 422 if (mtx_recursed(m)) { 423 if (--(m->mtx_recurse) == 0) 424 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 425 if ((type & MTX_QUIET) == 0) 426 CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m); 427 return; 428 } 429 mtx_enter(&sched_lock, MTX_SPIN); 430 if ((type & MTX_QUIET) == 0) 431 CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m); 432 p1 = TAILQ_FIRST(&m->mtx_blocked); 433 MPASS(p->p_magic == P_MAGIC); 434 MPASS(p1->p_magic == P_MAGIC); 435 TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq); 436 if (TAILQ_EMPTY(&m->mtx_blocked)) { 437 LIST_REMOVE(m, mtx_contested); 438 _release_lock_quick(m); 439 if ((type & MTX_QUIET) == 0) 440 CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m); 441 } else 442 atomic_store_rel_ptr(&m->mtx_lock, 443 (void *)MTX_CONTESTED); 444 pri = MAXPRI; 445 LIST_FOREACH(m1, &p->p_contested, mtx_contested) { 446 int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority; 447 if (cp < pri) 448 pri = cp; 449 } 450 if (pri > p->p_nativepri) 451 pri = p->p_nativepri; 452 SET_PRIO(p, pri); 453 if ((type & MTX_QUIET) == 0) 454 CTR2(KTR_LOCK, 455 "mtx_exit: 0x%p contested setrunqueue 0x%p", m, p1); 456 p1->p_blocked = NULL; 457 p1->p_mtxname = NULL; 458 p1->p_stat = SRUN; 459 setrunqueue(p1); 460 if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) { 461#ifdef notyet 462 if (p->p_flag & (P_ITHD | P_SITHD)) { 463 ithd_t *it = (ithd_t *)p; 464 465 if (it->it_interrupted) { 466 if ((type & MTX_QUIET) == 0) 467 CTR2(KTR_LOCK, 468 "mtx_exit: 0x%x interruped 0x%x", 469 it, it->it_interrupted); 470 intr_thd_fixup(it); 471 } 472 } 473#endif 474 setrunqueue(p); 475 if ((type & MTX_QUIET) == 0) 476 CTR2(KTR_LOCK, 477 "mtx_exit: 0x%p switching out lock=0x%p", 478 m, (void *)m->mtx_lock); 479 mi_switch(); 480 if ((type & MTX_QUIET) == 0) 481 CTR2(KTR_LOCK, 482 "mtx_exit: 0x%p resuming lock=0x%p", 483 m, (void *)m->mtx_lock); 484 } 485 mtx_exit(&sched_lock, MTX_SPIN); 486 break; 487 case MTX_SPIN: 488 case MTX_SPIN | MTX_FIRST: 489 if (mtx_recursed(m)) { 490 m->mtx_recurse--; 491 return; 492 } 493 MPASS(mtx_owned(m)); 494 _release_lock_quick(m); 495 if (type & MTX_FIRST) 496 enable_intr(); /* XXX is this kosher? */ 497 else { 498 MPASS(m->mtx_saveintr != 0xbeefface); 499 restore_intr(m->mtx_saveintr); 500 } 501 break; 502 case MTX_SPIN | MTX_TOPHALF: 503 if (mtx_recursed(m)) { 504 m->mtx_recurse--; 505 return; 506 } 507 MPASS(mtx_owned(m)); 508 _release_lock_quick(m); 509 break; 510 default: 511 panic("mtx_exit_hard: unsupported type 0x%x\n", type); 512 } 513} 514 515#define MV_DESTROY 0 /* validate before destory */ 516#define MV_INIT 1 /* validate before init */ 517 518#ifdef MUTEX_DEBUG 519 520int mtx_validate __P((struct mtx *, int)); 521 522int 523mtx_validate(struct mtx *m, int when) 524{ 525 struct mtx *mp; 526 int i; 527 int retval = 0; 528 529#ifdef WITNESS 530 if (witness_cold) 531 return 0; 532#endif 533 if (m == &all_mtx || cold) 534 return 0; 535 536 mtx_enter(&all_mtx, MTX_DEF); 537/* 538 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 539 * we can re-enable the kernacc() checks. 540 */ 541#ifndef __alpha__ 542 MPASS(kernacc((caddr_t)all_mtx.mtx_next, sizeof(uintptr_t), 543 VM_PROT_READ) == 1); 544#endif 545 MPASS(all_mtx.mtx_next->mtx_prev == &all_mtx); 546 for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) { 547#ifndef __alpha__ 548 if (kernacc((caddr_t)mp->mtx_next, sizeof(uintptr_t), 549 VM_PROT_READ) != 1) { 550 panic("mtx_validate: mp=%p mp->mtx_next=%p", 551 mp, mp->mtx_next); 552 } 553#endif 554 i++; 555 if (i > mtx_cur_cnt) { 556 panic("mtx_validate: too many in chain, known=%d\n", 557 mtx_cur_cnt); 558 } 559 } 560 MPASS(i == mtx_cur_cnt); 561 switch (when) { 562 case MV_DESTROY: 563 for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) 564 if (mp == m) 565 break; 566 MPASS(mp == m); 567 break; 568 case MV_INIT: 569 for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) 570 if (mp == m) { 571 /* 572 * Not good. This mutex already exists. 573 */ 574 printf("re-initing existing mutex %s\n", 575 m->mtx_description); 576 MPASS(m->mtx_lock == MTX_UNOWNED); 577 retval = 1; 578 } 579 } 580 mtx_exit(&all_mtx, MTX_DEF); 581 return (retval); 582} 583#endif 584 585void 586mtx_init(struct mtx *m, const char *t, int flag) 587{ 588 if ((flag & MTX_QUIET) == 0) 589 CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t); 590#ifdef MUTEX_DEBUG 591 if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */ 592 return; 593#endif 594 595 bzero((void *)m, sizeof *m); 596 TAILQ_INIT(&m->mtx_blocked); 597#ifdef WITNESS 598 if (!witness_cold) { 599 /* XXX - should not use DEVBUF */ 600 m->mtx_union.mtxu_debug = malloc(sizeof(struct mtx_debug), 601 M_DEVBUF, M_NOWAIT | M_ZERO); 602 MPASS(m->mtx_union.mtxu_debug != NULL); 603 604 m->mtx_description = t; 605 } else { 606 /* 607 * Save a pointer to the description so that witness_fixup() 608 * can properly initialize this mutex later on. 609 */ 610 m->mtx_union.mtxu_description = t; 611 } 612#else 613 m->mtx_description = t; 614#endif 615 616 m->mtx_flags = flag; 617 m->mtx_lock = MTX_UNOWNED; 618 /* Put on all mutex queue */ 619 mtx_enter(&all_mtx, MTX_DEF); 620 m->mtx_next = &all_mtx; 621 m->mtx_prev = all_mtx.mtx_prev; 622 m->mtx_prev->mtx_next = m; 623 all_mtx.mtx_prev = m; 624 if (++mtx_cur_cnt > mtx_max_cnt) 625 mtx_max_cnt = mtx_cur_cnt; 626 mtx_exit(&all_mtx, MTX_DEF); 627#ifdef WITNESS 628 if (!witness_cold) 629 witness_init(m, flag); 630#endif 631} 632 633void 634mtx_destroy(struct mtx *m) 635{ 636 637#ifdef WITNESS 638 KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n", 639 __FUNCTION__)); 640#endif 641 CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description); 642#ifdef MUTEX_DEBUG 643 if (m->mtx_next == NULL) 644 panic("mtx_destroy: %p (%s) already destroyed", 645 m, m->mtx_description); 646 647 if (!mtx_owned(m)) { 648 MPASS(m->mtx_lock == MTX_UNOWNED); 649 } else { 650 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 651 } 652 mtx_validate(m, MV_DESTROY); /* diagnostic */ 653#endif 654 655#ifdef WITNESS 656 if (m->mtx_witness) 657 witness_destroy(m); 658#endif /* WITNESS */ 659 660 /* Remove from the all mutex queue */ 661 mtx_enter(&all_mtx, MTX_DEF); 662 m->mtx_next->mtx_prev = m->mtx_prev; 663 m->mtx_prev->mtx_next = m->mtx_next; 664#ifdef MUTEX_DEBUG 665 m->mtx_next = m->mtx_prev = NULL; 666#endif 667#ifdef WITNESS 668 free(m->mtx_union.mtxu_debug, M_DEVBUF); 669 m->mtx_union.mtxu_debug = NULL; 670#endif 671 mtx_cur_cnt--; 672 mtx_exit(&all_mtx, MTX_DEF); 673} 674 675static void 676witness_fixup(void *dummy __unused) 677{ 678#ifdef WITNESS 679 struct mtx *mp; 680 const char *description; 681 682 /* Iterate through all mutexes and finish up mutex initialization. */ 683 for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) { 684 description = mp->mtx_union.mtxu_description; 685 686 /* XXX - should not use DEVBUF */ 687 mp->mtx_union.mtxu_debug = malloc(sizeof(struct mtx_debug), 688 M_DEVBUF, M_NOWAIT | M_ZERO); 689 MPASS(mp->mtx_union.mtxu_debug != NULL); 690 691 mp->mtx_description = description; 692 693 witness_init(mp, mp->mtx_flags); 694 } 695 696 /* Mark the witness code as being ready for use. */ 697 atomic_store_rel_int(&witness_cold, 0); 698#endif 699} 700SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL) 701 702/* 703 * The non-inlined versions of the mtx_*() functions are always built (above), 704 * but the witness code depends on the WITNESS kernel option being specified. 705 */ 706#ifdef WITNESS 707 708#define WITNESS_COUNT 200 709#define WITNESS_NCHILDREN 2 710 711int witness_watch = 1; 712 713struct witness { 714 struct witness *w_next; 715 const char *w_description; 716 const char *w_file; 717 int w_line; 718 struct witness *w_morechildren; 719 u_char w_childcnt; 720 u_char w_Giant_squawked:1; 721 u_char w_other_squawked:1; 722 u_char w_same_squawked:1; 723 u_char w_sleep:1; /* MTX_DEF type mutex. */ 724 u_char w_spin:1; /* MTX_SPIN type mutex. */ 725 u_char w_recurse:1; /* MTX_RECURSE mutex option. */ 726 u_int w_level; 727 struct witness *w_children[WITNESS_NCHILDREN]; 728}; 729 730struct witness_blessed { 731 char *b_lock1; 732 char *b_lock2; 733}; 734 735#ifdef DDB 736/* 737 * When DDB is enabled and witness_ddb is set to 1, it will cause the system to 738 * drop into kdebug() when: 739 * - a lock heirarchy violation occurs 740 * - locks are held when going to sleep. 741 */ 742#ifdef WITNESS_DDB 743int witness_ddb = 1; 744#else 745int witness_ddb = 0; 746#endif 747SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, ""); 748#endif /* DDB */ 749 750#ifdef WITNESS_SKIPSPIN 751int witness_skipspin = 1; 752#else 753int witness_skipspin = 0; 754#endif 755SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0, 756 ""); 757 758static struct mtx w_mtx; 759static struct witness *w_free; 760static struct witness *w_all; 761static int w_inited; 762static int witness_dead; /* fatal error, probably no memory */ 763 764static struct witness w_data[WITNESS_COUNT]; 765 766static struct witness *enroll __P((const char *description, int flag)); 767static int itismychild __P((struct witness *parent, struct witness *child)); 768static void removechild __P((struct witness *parent, struct witness *child)); 769static int isitmychild __P((struct witness *parent, struct witness *child)); 770static int isitmydescendant __P((struct witness *parent, struct witness *child)); 771static int dup_ok __P((struct witness *)); 772static int blessed __P((struct witness *, struct witness *)); 773static void witness_displaydescendants 774 __P((void(*)(const char *fmt, ...), struct witness *)); 775static void witness_leveldescendents __P((struct witness *parent, int level)); 776static void witness_levelall __P((void)); 777static struct witness * witness_get __P((void)); 778static void witness_free __P((struct witness *m)); 779 780 781static char *ignore_list[] = { 782 "witness lock", 783 NULL 784}; 785 786static char *spin_order_list[] = { 787 "sio", 788 "sched lock", 789#ifdef __i386__ 790 "clk", 791#endif 792 "callout", 793 /* 794 * leaf locks 795 */ 796 NULL 797}; 798 799static char *order_list[] = { 800 "uidinfo hash", "uidinfo struct", NULL, 801 NULL 802}; 803 804static char *dup_list[] = { 805 NULL 806}; 807 808static char *sleep_list[] = { 809 "Giant", 810 NULL 811}; 812 813/* 814 * Pairs of locks which have been blessed 815 * Don't complain about order problems with blessed locks 816 */ 817static struct witness_blessed blessed_list[] = { 818}; 819static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed); 820 821void 822witness_init(struct mtx *m, int flag) 823{ 824 m->mtx_witness = enroll(m->mtx_description, flag); 825} 826 827void 828witness_destroy(struct mtx *m) 829{ 830 struct mtx *m1; 831 struct proc *p; 832 p = CURPROC; 833 for ((m1 = LIST_FIRST(&p->p_heldmtx)); m1 != NULL; 834 m1 = LIST_NEXT(m1, mtx_held)) { 835 if (m1 == m) { 836 LIST_REMOVE(m, mtx_held); 837 break; 838 } 839 } 840 return; 841 842} 843 844void 845witness_enter(struct mtx *m, int flags, const char *file, int line) 846{ 847 struct witness *w, *w1; 848 struct mtx *m1; 849 struct proc *p; 850 int i; 851#ifdef DDB 852 int go_into_ddb = 0; 853#endif /* DDB */ 854 855 if (witness_cold) 856 return; 857 if (panicstr) 858 return; 859 w = m->mtx_witness; 860 p = CURPROC; 861 862 if (flags & MTX_SPIN) { 863 if (!(w->w_spin)) 864 panic("mutex_enter: MTX_SPIN on MTX_DEF mutex %s @" 865 " %s:%d", m->mtx_description, file, line); 866 if (mtx_recursed(m)) { 867 if (!(w->w_recurse)) 868 panic("mutex_enter: recursion on non-recursive" 869 " mutex %s @ %s:%d", m->mtx_description, 870 file, line); 871 return; 872 } 873 mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); 874 i = PCPU_GET(witness_spin_check); 875 if (i != 0 && w->w_level < i) { 876 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 877 panic("mutex_enter(%s:%x, MTX_SPIN) out of order @" 878 " %s:%d already holding %s:%x", 879 m->mtx_description, w->w_level, file, line, 880 spin_order_list[ffs(i)-1], i); 881 } 882 PCPU_SET(witness_spin_check, i | w->w_level); 883 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 884 w->w_file = file; 885 w->w_line = line; 886 m->mtx_line = line; 887 m->mtx_file = file; 888 return; 889 } 890 if (w->w_spin) 891 panic("mutex_enter: MTX_DEF on MTX_SPIN mutex %s @ %s:%d", 892 m->mtx_description, file, line); 893 894 if (mtx_recursed(m)) { 895 if (!(w->w_recurse)) 896 panic("mutex_enter: recursion on non-recursive" 897 " mutex %s @ %s:%d", m->mtx_description, 898 file, line); 899 return; 900 } 901 if (witness_dead) 902 goto out; 903 if (cold) 904 goto out; 905 906 if (!mtx_legal2block()) 907 panic("blockable mtx_enter() of %s when not legal @ %s:%d", 908 m->mtx_description, file, line); 909 /* 910 * Is this the first mutex acquired 911 */ 912 if ((m1 = LIST_FIRST(&p->p_heldmtx)) == NULL) 913 goto out; 914 915 if ((w1 = m1->mtx_witness) == w) { 916 if (w->w_same_squawked || dup_ok(w)) 917 goto out; 918 w->w_same_squawked = 1; 919 printf("acquring duplicate lock of same type: \"%s\"\n", 920 m->mtx_description); 921 printf(" 1st @ %s:%d\n", w->w_file, w->w_line); 922 printf(" 2nd @ %s:%d\n", file, line); 923#ifdef DDB 924 go_into_ddb = 1; 925#endif /* DDB */ 926 goto out; 927 } 928 MPASS(!mtx_owned(&w_mtx)); 929 mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); 930 /* 931 * If we have a known higher number just say ok 932 */ 933 if (witness_watch > 1 && w->w_level > w1->w_level) { 934 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 935 goto out; 936 } 937 if (isitmydescendant(m1->mtx_witness, w)) { 938 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 939 goto out; 940 } 941 for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) { 942 943 MPASS(i < 200); 944 w1 = m1->mtx_witness; 945 if (isitmydescendant(w, w1)) { 946 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 947 if (blessed(w, w1)) 948 goto out; 949 if (m1 == &Giant) { 950 if (w1->w_Giant_squawked) 951 goto out; 952 else 953 w1->w_Giant_squawked = 1; 954 } else { 955 if (w1->w_other_squawked) 956 goto out; 957 else 958 w1->w_other_squawked = 1; 959 } 960 printf("lock order reversal\n"); 961 printf(" 1st %s last acquired @ %s:%d\n", 962 w->w_description, w->w_file, w->w_line); 963 printf(" 2nd %p %s @ %s:%d\n", 964 m1, w1->w_description, w1->w_file, w1->w_line); 965 printf(" 3rd %p %s @ %s:%d\n", 966 m, w->w_description, file, line); 967#ifdef DDB 968 go_into_ddb = 1; 969#endif /* DDB */ 970 goto out; 971 } 972 } 973 m1 = LIST_FIRST(&p->p_heldmtx); 974 if (!itismychild(m1->mtx_witness, w)) 975 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 976 977out: 978#ifdef DDB 979 if (witness_ddb && go_into_ddb) 980 Debugger("witness_enter"); 981#endif /* DDB */ 982 w->w_file = file; 983 w->w_line = line; 984 m->mtx_line = line; 985 m->mtx_file = file; 986 987 /* 988 * If this pays off it likely means that a mutex being witnessed 989 * is acquired in hardclock. Put it in the ignore list. It is 990 * likely not the mutex this assert fails on. 991 */ 992 MPASS(m->mtx_held.le_prev == NULL); 993 LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held); 994} 995 996void 997witness_exit(struct mtx *m, int flags, const char *file, int line) 998{ 999 struct witness *w; 1000 1001 if (witness_cold) 1002 return; 1003 if (panicstr) 1004 return; 1005 w = m->mtx_witness; 1006 1007 if (flags & MTX_SPIN) { 1008 if (!(w->w_spin)) 1009 panic("mutex_exit: MTX_SPIN on MTX_DEF mutex %s @" 1010 " %s:%d", m->mtx_description, file, line); 1011 if (mtx_recursed(m)) { 1012 if (!(w->w_recurse)) 1013 panic("mutex_exit: recursion on non-recursive" 1014 " mutex %s @ %s:%d", m->mtx_description, 1015 file, line); 1016 return; 1017 } 1018 mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); 1019 PCPU_SET(witness_spin_check, 1020 PCPU_GET(witness_spin_check) & ~w->w_level); 1021 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 1022 return; 1023 } 1024 if (w->w_spin) 1025 panic("mutex_exit: MTX_DEF on MTX_SPIN mutex %s @ %s:%d", 1026 m->mtx_description, file, line); 1027 1028 if (mtx_recursed(m)) { 1029 if (!(w->w_recurse)) 1030 panic("mutex_exit: recursion on non-recursive" 1031 " mutex %s @ %s:%d", m->mtx_description, 1032 file, line); 1033 return; 1034 } 1035 1036 if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold) 1037 panic("switchable mtx_exit() of %s when not legal @ %s:%d", 1038 m->mtx_description, file, line); 1039 LIST_REMOVE(m, mtx_held); 1040 m->mtx_held.le_prev = NULL; 1041} 1042 1043void 1044witness_try_enter(struct mtx *m, int flags, const char *file, int line) 1045{ 1046 struct proc *p; 1047 struct witness *w = m->mtx_witness; 1048 1049 if (witness_cold) 1050 return; 1051 if (panicstr) 1052 return; 1053 if (flags & MTX_SPIN) { 1054 if (!(w->w_spin)) 1055 panic("mutex_try_enter: " 1056 "MTX_SPIN on MTX_DEF mutex %s @ %s:%d", 1057 m->mtx_description, file, line); 1058 if (mtx_recursed(m)) { 1059 if (!(w->w_recurse)) 1060 panic("mutex_try_enter: recursion on" 1061 " non-recursive mutex %s @ %s:%d", 1062 m->mtx_description, file, line); 1063 return; 1064 } 1065 mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); 1066 PCPU_SET(witness_spin_check, 1067 PCPU_GET(witness_spin_check) | w->w_level); 1068 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 1069 w->w_file = file; 1070 w->w_line = line; 1071 m->mtx_line = line; 1072 m->mtx_file = file; 1073 return; 1074 } 1075 1076 if (w->w_spin) 1077 panic("mutex_try_enter: MTX_DEF on MTX_SPIN mutex %s @ %s:%d", 1078 m->mtx_description, file, line); 1079 1080 if (mtx_recursed(m)) { 1081 if (!(w->w_recurse)) 1082 panic("mutex_try_enter: recursion on non-recursive" 1083 " mutex %s @ %s:%d", m->mtx_description, file, 1084 line); 1085 return; 1086 } 1087 w->w_file = file; 1088 w->w_line = line; 1089 m->mtx_line = line; 1090 m->mtx_file = file; 1091 p = CURPROC; 1092 MPASS(m->mtx_held.le_prev == NULL); 1093 LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held); 1094} 1095 1096void 1097witness_display(void(*prnt)(const char *fmt, ...)) 1098{ 1099 struct witness *w, *w1; 1100 1101 KASSERT(!witness_cold, ("%s: witness_cold\n", __FUNCTION__)); 1102 witness_levelall(); 1103 1104 for (w = w_all; w; w = w->w_next) { 1105 if (w->w_file == NULL) 1106 continue; 1107 for (w1 = w_all; w1; w1 = w1->w_next) { 1108 if (isitmychild(w1, w)) 1109 break; 1110 } 1111 if (w1 != NULL) 1112 continue; 1113 /* 1114 * This lock has no anscestors, display its descendants. 1115 */ 1116 witness_displaydescendants(prnt, w); 1117 } 1118 prnt("\nMutex which were never acquired\n"); 1119 for (w = w_all; w; w = w->w_next) { 1120 if (w->w_file != NULL) 1121 continue; 1122 prnt("%s\n", w->w_description); 1123 } 1124} 1125 1126int 1127witness_sleep(int check_only, struct mtx *mtx, const char *file, int line) 1128{ 1129 struct mtx *m; 1130 struct proc *p; 1131 char **sleep; 1132 int n = 0; 1133 1134 KASSERT(!witness_cold, ("%s: witness_cold\n", __FUNCTION__)); 1135 p = CURPROC; 1136 for ((m = LIST_FIRST(&p->p_heldmtx)); m != NULL; 1137 m = LIST_NEXT(m, mtx_held)) { 1138 if (m == mtx) 1139 continue; 1140 for (sleep = sleep_list; *sleep!= NULL; sleep++) 1141 if (strcmp(m->mtx_description, *sleep) == 0) 1142 goto next; 1143 printf("%s:%d: %s with \"%s\" locked from %s:%d\n", 1144 file, line, check_only ? "could sleep" : "sleeping", 1145 m->mtx_description, 1146 m->mtx_witness->w_file, m->mtx_witness->w_line); 1147 n++; 1148 next: 1149 } 1150#ifdef DDB 1151 if (witness_ddb && n) 1152 Debugger("witness_sleep"); 1153#endif /* DDB */ 1154 return (n); 1155} 1156 1157static struct witness * 1158enroll(const char *description, int flag) 1159{ 1160 int i; 1161 struct witness *w, *w1; 1162 char **ignore; 1163 char **order; 1164 1165 if (!witness_watch) 1166 return (NULL); 1167 for (ignore = ignore_list; *ignore != NULL; ignore++) 1168 if (strcmp(description, *ignore) == 0) 1169 return (NULL); 1170 1171 if (w_inited == 0) { 1172 mtx_init(&w_mtx, "witness lock", MTX_SPIN); 1173 for (i = 0; i < WITNESS_COUNT; i++) { 1174 w = &w_data[i]; 1175 witness_free(w); 1176 } 1177 w_inited = 1; 1178 for (order = order_list; *order != NULL; order++) { 1179 w = enroll(*order, MTX_DEF); 1180 w->w_file = "order list"; 1181 for (order++; *order != NULL; order++) { 1182 w1 = enroll(*order, MTX_DEF); 1183 w1->w_file = "order list"; 1184 itismychild(w, w1); 1185 w = w1; 1186 } 1187 } 1188 } 1189 if ((flag & MTX_SPIN) && witness_skipspin) 1190 return (NULL); 1191 mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); 1192 for (w = w_all; w; w = w->w_next) { 1193 if (strcmp(description, w->w_description) == 0) { 1194 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 1195 return (w); 1196 } 1197 } 1198 if ((w = witness_get()) == NULL) 1199 return (NULL); 1200 w->w_next = w_all; 1201 w_all = w; 1202 w->w_description = description; 1203 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 1204 if (flag & MTX_SPIN) { 1205 w->w_spin = 1; 1206 1207 i = 1; 1208 for (order = spin_order_list; *order != NULL; order++) { 1209 if (strcmp(description, *order) == 0) 1210 break; 1211 i <<= 1; 1212 } 1213 if (*order == NULL) 1214 panic("spin lock %s not in order list", description); 1215 w->w_level = i; 1216 } else 1217 w->w_sleep = 1; 1218 1219 if (flag & MTX_RECURSE) 1220 w->w_recurse = 1; 1221 1222 return (w); 1223} 1224 1225static int 1226itismychild(struct witness *parent, struct witness *child) 1227{ 1228 static int recursed; 1229 1230 /* 1231 * Insert "child" after "parent" 1232 */ 1233 while (parent->w_morechildren) 1234 parent = parent->w_morechildren; 1235 1236 if (parent->w_childcnt == WITNESS_NCHILDREN) { 1237 if ((parent->w_morechildren = witness_get()) == NULL) 1238 return (1); 1239 parent = parent->w_morechildren; 1240 } 1241 MPASS(child != NULL); 1242 parent->w_children[parent->w_childcnt++] = child; 1243 /* 1244 * now prune whole tree 1245 */ 1246 if (recursed) 1247 return (0); 1248 recursed = 1; 1249 for (child = w_all; child != NULL; child = child->w_next) { 1250 for (parent = w_all; parent != NULL; 1251 parent = parent->w_next) { 1252 if (!isitmychild(parent, child)) 1253 continue; 1254 removechild(parent, child); 1255 if (isitmydescendant(parent, child)) 1256 continue; 1257 itismychild(parent, child); 1258 } 1259 } 1260 recursed = 0; 1261 witness_levelall(); 1262 return (0); 1263} 1264 1265static void 1266removechild(struct witness *parent, struct witness *child) 1267{ 1268 struct witness *w, *w1; 1269 int i; 1270 1271 for (w = parent; w != NULL; w = w->w_morechildren) 1272 for (i = 0; i < w->w_childcnt; i++) 1273 if (w->w_children[i] == child) 1274 goto found; 1275 return; 1276found: 1277 for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren) 1278 continue; 1279 w->w_children[i] = w1->w_children[--w1->w_childcnt]; 1280 MPASS(w->w_children[i] != NULL); 1281 1282 if (w1->w_childcnt != 0) 1283 return; 1284 1285 if (w1 == parent) 1286 return; 1287 for (w = parent; w->w_morechildren != w1; w = w->w_morechildren) 1288 continue; 1289 w->w_morechildren = 0; 1290 witness_free(w1); 1291} 1292 1293static int 1294isitmychild(struct witness *parent, struct witness *child) 1295{ 1296 struct witness *w; 1297 int i; 1298 1299 for (w = parent; w != NULL; w = w->w_morechildren) { 1300 for (i = 0; i < w->w_childcnt; i++) { 1301 if (w->w_children[i] == child) 1302 return (1); 1303 } 1304 } 1305 return (0); 1306} 1307 1308static int 1309isitmydescendant(struct witness *parent, struct witness *child) 1310{ 1311 struct witness *w; 1312 int i; 1313 int j; 1314 1315 for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) { 1316 MPASS(j < 1000); 1317 for (i = 0; i < w->w_childcnt; i++) { 1318 if (w->w_children[i] == child) 1319 return (1); 1320 } 1321 for (i = 0; i < w->w_childcnt; i++) { 1322 if (isitmydescendant(w->w_children[i], child)) 1323 return (1); 1324 } 1325 } 1326 return (0); 1327} 1328 1329void 1330witness_levelall (void) 1331{ 1332 struct witness *w, *w1; 1333 1334 for (w = w_all; w; w = w->w_next) 1335 if (!(w->w_spin)) 1336 w->w_level = 0; 1337 for (w = w_all; w; w = w->w_next) { 1338 if (w->w_spin) 1339 continue; 1340 for (w1 = w_all; w1; w1 = w1->w_next) { 1341 if (isitmychild(w1, w)) 1342 break; 1343 } 1344 if (w1 != NULL) 1345 continue; 1346 witness_leveldescendents(w, 0); 1347 } 1348} 1349 1350static void 1351witness_leveldescendents(struct witness *parent, int level) 1352{ 1353 int i; 1354 struct witness *w; 1355 1356 if (parent->w_level < level) 1357 parent->w_level = level; 1358 level++; 1359 for (w = parent; w != NULL; w = w->w_morechildren) 1360 for (i = 0; i < w->w_childcnt; i++) 1361 witness_leveldescendents(w->w_children[i], level); 1362} 1363 1364static void 1365witness_displaydescendants(void(*prnt)(const char *fmt, ...), 1366 struct witness *parent) 1367{ 1368 struct witness *w; 1369 int i; 1370 int level = parent->w_level; 1371 1372 prnt("%d", level); 1373 if (level < 10) 1374 prnt(" "); 1375 for (i = 0; i < level; i++) 1376 prnt(" "); 1377 prnt("%s", parent->w_description); 1378 if (parent->w_file != NULL) { 1379 prnt(" -- last acquired @ %s", parent->w_file); 1380#ifndef W_USE_WHERE 1381 prnt(":%d", parent->w_line); 1382#endif 1383 prnt("\n"); 1384 } 1385 1386 for (w = parent; w != NULL; w = w->w_morechildren) 1387 for (i = 0; i < w->w_childcnt; i++) 1388 witness_displaydescendants(prnt, w->w_children[i]); 1389 } 1390 1391static int 1392dup_ok(struct witness *w) 1393{ 1394 char **dup; 1395 1396 for (dup = dup_list; *dup!= NULL; dup++) 1397 if (strcmp(w->w_description, *dup) == 0) 1398 return (1); 1399 return (0); 1400} 1401 1402static int 1403blessed(struct witness *w1, struct witness *w2) 1404{ 1405 int i; 1406 struct witness_blessed *b; 1407 1408 for (i = 0; i < blessed_count; i++) { 1409 b = &blessed_list[i]; 1410 if (strcmp(w1->w_description, b->b_lock1) == 0) { 1411 if (strcmp(w2->w_description, b->b_lock2) == 0) 1412 return (1); 1413 continue; 1414 } 1415 if (strcmp(w1->w_description, b->b_lock2) == 0) 1416 if (strcmp(w2->w_description, b->b_lock1) == 0) 1417 return (1); 1418 } 1419 return (0); 1420} 1421 1422static struct witness * 1423witness_get() 1424{ 1425 struct witness *w; 1426 1427 if ((w = w_free) == NULL) { 1428 witness_dead = 1; 1429 mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); 1430 printf("witness exhausted\n"); 1431 return (NULL); 1432 } 1433 w_free = w->w_next; 1434 bzero(w, sizeof(*w)); 1435 return (w); 1436} 1437 1438static void 1439witness_free(struct witness *w) 1440{ 1441 w->w_next = w_free; 1442 w_free = w; 1443} 1444 1445int 1446witness_list(struct proc *p) 1447{ 1448 struct mtx *m; 1449 int nheld; 1450 1451 KASSERT(!witness_cold, ("%s: witness_cold\n", __FUNCTION__)); 1452 nheld = 0; 1453 for ((m = LIST_FIRST(&p->p_heldmtx)); m != NULL; 1454 m = LIST_NEXT(m, mtx_held)) { 1455 printf("\t\"%s\" (%p) locked at %s:%d\n", 1456 m->mtx_description, m, 1457 m->mtx_witness->w_file, m->mtx_witness->w_line); 1458 nheld++; 1459 } 1460 1461 return (nheld); 1462} 1463 1464void 1465witness_save(struct mtx *m, const char **filep, int *linep) 1466{ 1467 1468 KASSERT(!witness_cold, ("%s: witness_cold\n", __FUNCTION__)); 1469 *filep = m->mtx_witness->w_file; 1470 *linep = m->mtx_witness->w_line; 1471} 1472 1473void 1474witness_restore(struct mtx *m, const char *file, int line) 1475{ 1476 1477 KASSERT(!witness_cold, ("%s: witness_cold\n", __FUNCTION__)); 1478 m->mtx_witness->w_file = file; 1479 m->mtx_witness->w_line = line; 1480} 1481 1482#endif /* WITNESS */ 1483