subr_turnstile.c revision 89392
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/subr_turnstile.c 89392 2002-01-15 14:20:33Z jhb $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_ddb.h" 38 39#include <sys/param.h> 40#include <sys/bus.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/malloc.h> 44#include <sys/mutex.h> 45#include <sys/proc.h> 46#include <sys/resourcevar.h> 47#include <sys/sysctl.h> 48#include <sys/systm.h> 49#include <sys/vmmeter.h> 50#include <sys/ktr.h> 51 52#include <machine/atomic.h> 53#include <machine/bus.h> 54#include <machine/clock.h> 55#include <machine/cpu.h> 56 57#include <ddb/ddb.h> 58 59#include <vm/vm.h> 60#include <vm/vm_extern.h> 61 62/* 63 * Internal utility macros. 64 */ 65#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 66 67#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 68 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 69 70#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri) 71 72/* 73 * Lock classes for sleep and spin mutexes. 74 */ 75struct lock_class lock_class_mtx_sleep = { 76 "sleep mutex", 77 LC_SLEEPLOCK | LC_RECURSABLE 78}; 79struct lock_class lock_class_mtx_spin = { 80 "spin mutex", 81 LC_SPINLOCK | LC_RECURSABLE 82}; 83 84/* 85 * Prototypes for non-exported routines. 86 */ 87static void propagate_priority(struct thread *); 88 89static void 90propagate_priority(struct thread *td) 91{ 92 struct ksegrp *kg = td->td_ksegrp; 93 int pri = kg->kg_pri.pri_level; 94 struct mtx *m = td->td_blocked; 95 96 mtx_assert(&sched_lock, MA_OWNED); 97 for (;;) { 98 struct thread *td1; 99 100 td = mtx_owner(m); 101 102 if (td == NULL) { 103 /* 104 * This really isn't quite right. Really 105 * ought to bump priority of thread that 106 * next acquires the mutex. 107 */ 108 MPASS(m->mtx_lock == MTX_CONTESTED); 109 return; 110 } 111 kg = td->td_ksegrp; 112 113 MPASS(td->td_proc->p_magic == P_MAGIC); 114 KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 115 if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */ 116 return; 117 118 /* 119 * Bump this thread's priority. 120 */ 121 SET_PRIO(td, pri); 122 123 /* 124 * If lock holder is actually running, just bump priority. 125 */ 126 /* XXXKSE this test is not sufficient */ 127 if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 128 MPASS(td->td_proc->p_stat == SRUN 129 || td->td_proc->p_stat == SZOMB 130 || td->td_proc->p_stat == SSTOP); 131 return; 132 } 133 134#ifndef SMP 135 /* 136 * For UP, we check to see if td is curthread (this shouldn't 137 * ever happen however as it would mean we are in a deadlock.) 138 */ 139 KASSERT(td != curthread, ("Deadlock detected")); 140#endif 141 142 /* 143 * If on run queue move to new run queue, and quit. 144 * XXXKSE this gets a lot more complicated under threads 145 * but try anyhow. 146 */ 147 if (td->td_proc->p_stat == SRUN) { 148 MPASS(td->td_blocked == NULL); 149 remrunqueue(td); 150 setrunqueue(td); 151 return; 152 } 153 154 /* 155 * If we aren't blocked on a mutex, we should be. 156 */ 157 KASSERT(td->td_proc->p_stat == SMTX, ( 158 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 159 td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 160 m->mtx_object.lo_name)); 161 162 /* 163 * Pick up the mutex that td is blocked on. 164 */ 165 m = td->td_blocked; 166 MPASS(m != NULL); 167 168 /* 169 * Check if the thread needs to be moved up on 170 * the blocked chain 171 */ 172 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 173 continue; 174 } 175 176 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 177 if (td1->td_ksegrp->kg_pri.pri_level <= pri) { 178 continue; 179 } 180 181 /* 182 * Remove thread from blocked chain and determine where 183 * it should be moved up to. Since we know that td1 has 184 * a lower priority than td, we know that at least one 185 * thread in the chain has a lower priority and that 186 * td1 will thus not be NULL after the loop. 187 */ 188 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 189 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 190 MPASS(td1->td_proc->p_magic == P_MAGIC); 191 if (td1->td_ksegrp->kg_pri.pri_level > pri) 192 break; 193 } 194 195 MPASS(td1 != NULL); 196 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 197 CTR4(KTR_LOCK, 198 "propagate_priority: p %p moved before %p on [%p] %s", 199 td, td1, m, m->mtx_object.lo_name); 200 } 201} 202 203/* 204 * Function versions of the inlined __mtx_* macros. These are used by 205 * modules and can also be called from assembly language if needed. 206 */ 207void 208_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 209{ 210 211 MPASS(curthread != NULL); 212 _get_sleep_lock(m, curthread, opts, file, line); 213 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 214 line); 215 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 216} 217 218void 219_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 220{ 221 222 MPASS(curthread != NULL); 223 mtx_assert(m, MA_OWNED); 224 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 225 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 226 line); 227 _rel_sleep_lock(m, curthread, opts, file, line); 228} 229 230void 231_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 232{ 233 234 MPASS(curthread != NULL); 235 _get_spin_lock(m, curthread, opts, file, line); 236 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 237 line); 238 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 239} 240 241void 242_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 243{ 244 245 MPASS(curthread != NULL); 246 mtx_assert(m, MA_OWNED); 247 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 248 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 249 line); 250 _rel_spin_lock(m); 251} 252 253/* 254 * The important part of mtx_trylock{,_flags}() 255 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 256 * if we're called, it's because we know we don't already own this lock. 257 */ 258int 259_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 260{ 261 int rval; 262 263 MPASS(curthread != NULL); 264 265 rval = _obtain_lock(m, curthread); 266 267 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 268 if (rval) { 269 /* 270 * We do not handle recursion in _mtx_trylock; see the 271 * note at the top of the routine. 272 */ 273 KASSERT(!mtx_recursed(m), 274 ("mtx_trylock() called on a recursed mutex")); 275 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 276 file, line); 277 } 278 279 return (rval); 280} 281 282/* 283 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 284 * 285 * We call this if the lock is either contested (i.e. we need to go to 286 * sleep waiting for it), or if we need to recurse on it. 287 */ 288void 289_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 290{ 291 struct thread *td = curthread; 292 struct ksegrp *kg = td->td_ksegrp; 293 294 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 295 m->mtx_recurse++; 296 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 297 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 298 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 299 return; 300 } 301 302 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 303 CTR4(KTR_LOCK, 304 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 305 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 306 307 while (!_obtain_lock(m, td)) { 308 uintptr_t v; 309 struct thread *td1; 310 311 mtx_lock_spin(&sched_lock); 312 /* 313 * Check if the lock has been released while spinning for 314 * the sched_lock. 315 */ 316 if ((v = m->mtx_lock) == MTX_UNOWNED) { 317 mtx_unlock_spin(&sched_lock); 318 continue; 319 } 320 321 /* 322 * The mutex was marked contested on release. This means that 323 * there are threads blocked on it. 324 */ 325 if (v == MTX_CONTESTED) { 326 td1 = TAILQ_FIRST(&m->mtx_blocked); 327 MPASS(td1 != NULL); 328 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 329 330 if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level) 331 SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level); 332 mtx_unlock_spin(&sched_lock); 333 return; 334 } 335 336 /* 337 * If the mutex isn't already contested and a failure occurs 338 * setting the contested bit, the mutex was either released 339 * or the state of the MTX_RECURSED bit changed. 340 */ 341 if ((v & MTX_CONTESTED) == 0 && 342 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 343 (void *)(v | MTX_CONTESTED))) { 344 mtx_unlock_spin(&sched_lock); 345 continue; 346 } 347 348 /* 349 * We deffinately must sleep for this lock. 350 */ 351 mtx_assert(m, MA_NOTOWNED); 352 353#ifdef notyet 354 /* 355 * If we're borrowing an interrupted thread's VM context, we 356 * must clean up before going to sleep. 357 */ 358 if (td->td_ithd != NULL) { 359 struct ithd *it = td->td_ithd; 360 361 if (it->it_interrupted) { 362 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 363 CTR2(KTR_LOCK, 364 "_mtx_lock_sleep: %p interrupted %p", 365 it, it->it_interrupted); 366 intr_thd_fixup(it); 367 } 368 } 369#endif 370 371 /* 372 * Put us on the list of threads blocked on this mutex. 373 */ 374 if (TAILQ_EMPTY(&m->mtx_blocked)) { 375 td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK); 376 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 377 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 378 } else { 379 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 380 if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level) 381 break; 382 if (td1) 383 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 384 else 385 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 386 } 387 388 /* 389 * Save who we're blocked on. 390 */ 391 td->td_blocked = m; 392 td->td_mtxname = m->mtx_object.lo_name; 393 td->td_proc->p_stat = SMTX; 394 propagate_priority(td); 395 396 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 397 CTR3(KTR_LOCK, 398 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 399 m->mtx_object.lo_name); 400 401 td->td_proc->p_stats->p_ru.ru_nvcsw++; 402 mi_switch(); 403 404 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 405 CTR3(KTR_LOCK, 406 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 407 td, m, m->mtx_object.lo_name); 408 409 mtx_unlock_spin(&sched_lock); 410 } 411 412 return; 413} 414 415/* 416 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 417 * 418 * This is only called if we need to actually spin for the lock. Recursion 419 * is handled inline. 420 */ 421void 422_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 423{ 424 int i = 0; 425 426 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 427 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 428 429 for (;;) { 430 if (_obtain_lock(m, curthread)) 431 break; 432 433 /* Give interrupts a chance while we spin. */ 434 critical_exit(); 435 while (m->mtx_lock != MTX_UNOWNED) { 436 if (i++ < 10000000) 437 continue; 438 if (i++ < 60000000) 439 DELAY(1); 440#ifdef DDB 441 else if (!db_active) 442#else 443 else 444#endif 445 panic("spin lock %s held by %p for > 5 seconds", 446 m->mtx_object.lo_name, (void *)m->mtx_lock); 447 } 448 critical_enter(); 449 } 450 451 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 452 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 453 454 return; 455} 456 457/* 458 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 459 * 460 * We are only called here if the lock is recursed or contested (i.e. we 461 * need to wake up a blocked thread). 462 */ 463void 464_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 465{ 466 struct thread *td, *td1; 467 struct mtx *m1; 468 int pri; 469 struct ksegrp *kg; 470 471 td = curthread; 472 kg = td->td_ksegrp; 473 474 if (mtx_recursed(m)) { 475 if (--(m->mtx_recurse) == 0) 476 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 477 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 478 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 479 return; 480 } 481 482 mtx_lock_spin(&sched_lock); 483 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 484 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 485 486 td1 = TAILQ_FIRST(&m->mtx_blocked); 487 MPASS(td->td_proc->p_magic == P_MAGIC); 488 MPASS(td1->td_proc->p_magic == P_MAGIC); 489 490 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 491 492 if (TAILQ_EMPTY(&m->mtx_blocked)) { 493 LIST_REMOVE(m, mtx_contested); 494 _release_lock_quick(m); 495 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 496 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 497 } else 498 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 499 500 pri = PRI_MAX; 501 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 502 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level; 503 if (cp < pri) 504 pri = cp; 505 } 506 507 if (pri > kg->kg_pri.pri_native) 508 pri = kg->kg_pri.pri_native; 509 SET_PRIO(td, pri); 510 511 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 512 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 513 m, td1); 514 515 td1->td_blocked = NULL; 516 td1->td_proc->p_stat = SRUN; 517 setrunqueue(td1); 518 519 if (td->td_critnest == 1 && td1->td_ksegrp->kg_pri.pri_level < pri) { 520#ifdef notyet 521 if (td->td_ithd != NULL) { 522 struct ithd *it = td->td_ithd; 523 524 if (it->it_interrupted) { 525 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 526 CTR2(KTR_LOCK, 527 "_mtx_unlock_sleep: %p interrupted %p", 528 it, it->it_interrupted); 529 intr_thd_fixup(it); 530 } 531 } 532#endif 533 setrunqueue(td); 534 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 535 CTR2(KTR_LOCK, 536 "_mtx_unlock_sleep: %p switching out lock=%p", m, 537 (void *)m->mtx_lock); 538 539 td->td_proc->p_stats->p_ru.ru_nivcsw++; 540 mi_switch(); 541 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 542 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 543 m, (void *)m->mtx_lock); 544 } 545 546 mtx_unlock_spin(&sched_lock); 547 548 return; 549} 550 551/* 552 * All the unlocking of MTX_SPIN locks is done inline. 553 * See the _rel_spin_lock() macro for the details. 554 */ 555 556/* 557 * The backing function for the INVARIANTS-enabled mtx_assert() 558 */ 559#ifdef INVARIANT_SUPPORT 560void 561_mtx_assert(struct mtx *m, int what, const char *file, int line) 562{ 563 564 if (panicstr != NULL) 565 return; 566 switch (what) { 567 case MA_OWNED: 568 case MA_OWNED | MA_RECURSED: 569 case MA_OWNED | MA_NOTRECURSED: 570 if (!mtx_owned(m)) 571 panic("mutex %s not owned at %s:%d", 572 m->mtx_object.lo_name, file, line); 573 if (mtx_recursed(m)) { 574 if ((what & MA_NOTRECURSED) != 0) 575 panic("mutex %s recursed at %s:%d", 576 m->mtx_object.lo_name, file, line); 577 } else if ((what & MA_RECURSED) != 0) { 578 panic("mutex %s unrecursed at %s:%d", 579 m->mtx_object.lo_name, file, line); 580 } 581 break; 582 case MA_NOTOWNED: 583 if (mtx_owned(m)) 584 panic("mutex %s owned at %s:%d", 585 m->mtx_object.lo_name, file, line); 586 break; 587 default: 588 panic("unknown mtx_assert at %s:%d", file, line); 589 } 590} 591#endif 592 593/* 594 * The MUTEX_DEBUG-enabled mtx_validate() 595 * 596 * Most of these checks have been moved off into the LO_INITIALIZED flag 597 * maintained by the witness code. 598 */ 599#ifdef MUTEX_DEBUG 600 601void mtx_validate __P((struct mtx *)); 602 603void 604mtx_validate(struct mtx *m) 605{ 606 607/* 608 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 609 * we can re-enable the kernacc() checks. 610 */ 611#ifndef __alpha__ 612 /* 613 * Can't call kernacc() from early init386(), especially when 614 * initializing Giant mutex, because some stuff in kernacc() 615 * requires Giant itself. 616 */ 617 if (!cold) 618 if (!kernacc((caddr_t)m, sizeof(m), 619 VM_PROT_READ | VM_PROT_WRITE)) 620 panic("Can't read and write to mutex %p", m); 621#endif 622} 623#endif 624 625/* 626 * Mutex initialization routine; initialize lock `m' of type contained in 627 * `opts' with options contained in `opts' and description `description.' 628 */ 629void 630mtx_init(struct mtx *m, const char *description, int opts) 631{ 632 struct lock_object *lock; 633 634 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 635 MTX_SLEEPABLE | MTX_NOWITNESS)) == 0); 636 637#ifdef MUTEX_DEBUG 638 /* Diagnostic and error correction */ 639 mtx_validate(m); 640#endif 641 642 lock = &m->mtx_object; 643 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 644 ("mutex %s %p already initialized", description, m)); 645 bzero(m, sizeof(*m)); 646 if (opts & MTX_SPIN) 647 lock->lo_class = &lock_class_mtx_spin; 648 else 649 lock->lo_class = &lock_class_mtx_sleep; 650 lock->lo_name = description; 651 if (opts & MTX_QUIET) 652 lock->lo_flags = LO_QUIET; 653 if (opts & MTX_RECURSE) 654 lock->lo_flags |= LO_RECURSABLE; 655 if (opts & MTX_SLEEPABLE) 656 lock->lo_flags |= LO_SLEEPABLE; 657 if ((opts & MTX_NOWITNESS) == 0) 658 lock->lo_flags |= LO_WITNESS; 659 660 m->mtx_lock = MTX_UNOWNED; 661 TAILQ_INIT(&m->mtx_blocked); 662 663 LOCK_LOG_INIT(lock, opts); 664 665 WITNESS_INIT(lock); 666} 667 668/* 669 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 670 * passed in as a flag here because if the corresponding mtx_init() was 671 * called with MTX_QUIET set, then it will already be set in the mutex's 672 * flags. 673 */ 674void 675mtx_destroy(struct mtx *m) 676{ 677 678 LOCK_LOG_DESTROY(&m->mtx_object, 0); 679 680 if (!mtx_owned(m)) 681 MPASS(mtx_unowned(m)); 682 else { 683 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 684 685 /* Tell witness this isn't locked to make it happy. */ 686 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 687 __LINE__); 688 } 689 690 WITNESS_DESTROY(&m->mtx_object); 691} 692 693/* 694 * Encapsulated Giant mutex routines. These routines provide encapsulation 695 * control for the Giant mutex, allowing sysctls to be used to turn on and 696 * off Giant around certain subsystems. The default value for the sysctls 697 * are set to what developers believe is stable and working in regards to 698 * the Giant pushdown. Developers should not turn off Giant via these 699 * sysctls unless they know what they are doing. 700 * 701 * Callers of mtx_lock_giant() are expected to pass the return value to an 702 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 703 * effected by a Giant wrap, all related sysctl variables must be zero for 704 * the subsystem call to operate without Giant (as determined by the caller). 705 */ 706 707SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 708 709static int kern_giant_all = 0; 710SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 711 712int kern_giant_proc = 1; /* Giant around PROC locks */ 713int kern_giant_file = 1; /* Giant around struct file & filedesc */ 714SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 715SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 716 717int 718mtx_lock_giant(int sysctlvar) 719{ 720 if (sysctlvar || kern_giant_all) { 721 mtx_lock(&Giant); 722 return(1); 723 } 724 return(0); 725} 726 727void 728mtx_unlock_giant(int s) 729{ 730 if (s) 731 mtx_unlock(&Giant); 732} 733 734