subr_turnstile.c revision 102907
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/subr_turnstile.c 102907 2002-09-03 18:25:16Z jhb $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_adaptive_mutexes.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/ktr.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/sbuf.h> 51#include <sys/stdint.h> 52#include <sys/sysctl.h> 53#include <sys/vmmeter.h> 54 55#include <machine/atomic.h> 56#include <machine/bus.h> 57#include <machine/clock.h> 58#include <machine/cpu.h> 59 60#include <ddb/ddb.h> 61 62#include <vm/vm.h> 63#include <vm/vm_extern.h> 64 65/* 66 * Internal utility macros. 67 */ 68#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 69 70#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 71 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 72 73/* XXXKSE This test will change. */ 74#define thread_running(td) \ 75 ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) 76 77/* 78 * Lock classes for sleep and spin mutexes. 79 */ 80struct lock_class lock_class_mtx_sleep = { 81 "sleep mutex", 82 LC_SLEEPLOCK | LC_RECURSABLE 83}; 84struct lock_class lock_class_mtx_spin = { 85 "spin mutex", 86 LC_SPINLOCK | LC_RECURSABLE 87}; 88 89/* 90 * System-wide mutexes 91 */ 92struct mtx sched_lock; 93struct mtx Giant; 94 95/* 96 * Prototypes for non-exported routines. 97 */ 98static void propagate_priority(struct thread *); 99 100static void 101propagate_priority(struct thread *td) 102{ 103 int pri = td->td_priority; 104 struct mtx *m = td->td_blocked; 105 106 mtx_assert(&sched_lock, MA_OWNED); 107 for (;;) { 108 struct thread *td1; 109 110 td = mtx_owner(m); 111 112 if (td == NULL) { 113 /* 114 * This really isn't quite right. Really 115 * ought to bump priority of thread that 116 * next acquires the mutex. 117 */ 118 MPASS(m->mtx_lock == MTX_CONTESTED); 119 return; 120 } 121 122 KASSERT(td->td_state != TDS_SURPLUS, ("Mutex owner SURPLUS")); 123 MPASS(td->td_proc != NULL); 124 MPASS(td->td_proc->p_magic == P_MAGIC); 125 KASSERT(td->td_state != TDS_SLP, 126 ("sleeping thread owns a mutex")); 127 if (td->td_priority <= pri) /* lower is higher priority */ 128 return; 129 130 131 /* 132 * If lock holder is actually running, just bump priority. 133 */ 134 if (td->td_state == TDS_RUNNING) { 135 td->td_priority = pri; 136 return; 137 } 138 139#ifndef SMP 140 /* 141 * For UP, we check to see if td is curthread (this shouldn't 142 * ever happen however as it would mean we are in a deadlock.) 143 */ 144 KASSERT(td != curthread, ("Deadlock detected")); 145#endif 146 147 /* 148 * If on run queue move to new run queue, and quit. 149 * XXXKSE this gets a lot more complicated under threads 150 * but try anyhow. 151 * We should have a special call to do this more efficiently. 152 */ 153 if (td->td_state == TDS_RUNQ) { 154 MPASS(td->td_blocked == NULL); 155 remrunqueue(td); 156 td->td_priority = pri; 157 setrunqueue(td); 158 return; 159 } 160 /* 161 * Adjust for any other cases. 162 */ 163 td->td_priority = pri; 164 165 /* 166 * If we aren't blocked on a mutex, we should be. 167 */ 168 KASSERT(td->td_state == TDS_MTX, ( 169 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 170 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 171 m->mtx_object.lo_name)); 172 173 /* 174 * Pick up the mutex that td is blocked on. 175 */ 176 m = td->td_blocked; 177 MPASS(m != NULL); 178 179 /* 180 * Check if the thread needs to be moved up on 181 * the blocked chain 182 */ 183 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 184 continue; 185 } 186 187 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 188 if (td1->td_priority <= pri) { 189 continue; 190 } 191 192 /* 193 * Remove thread from blocked chain and determine where 194 * it should be moved up to. Since we know that td1 has 195 * a lower priority than td, we know that at least one 196 * thread in the chain has a lower priority and that 197 * td1 will thus not be NULL after the loop. 198 */ 199 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 200 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 201 MPASS(td1->td_proc->p_magic == P_MAGIC); 202 if (td1->td_priority > pri) 203 break; 204 } 205 206 MPASS(td1 != NULL); 207 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 208 CTR4(KTR_LOCK, 209 "propagate_priority: p %p moved before %p on [%p] %s", 210 td, td1, m, m->mtx_object.lo_name); 211 } 212} 213 214#ifdef MUTEX_PROFILING 215SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 216SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 217static int mutex_prof_enable = 0; 218SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 219 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 220 221struct mutex_prof { 222 const char *name; 223 const char *file; 224 int line; 225#define MPROF_MAX 0 226#define MPROF_TOT 1 227#define MPROF_CNT 2 228#define MPROF_AVG 3 229 uintmax_t counter[4]; 230 struct mutex_prof *next; 231}; 232 233/* 234 * mprof_buf is a static pool of profiling records to avoid possible 235 * reentrance of the memory allocation functions. 236 * 237 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 238 */ 239#define NUM_MPROF_BUFFERS 1000 240static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 241static int first_free_mprof_buf; 242#define MPROF_HASH_SIZE 1009 243static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 244 245static int mutex_prof_acquisitions; 246SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 247 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 248static int mutex_prof_records; 249SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 250 &mutex_prof_records, 0, "Number of profiling records"); 251static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 252SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 253 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 254static int mutex_prof_rejected; 255SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 256 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 257static int mutex_prof_hashsize = MPROF_HASH_SIZE; 258SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 259 &mutex_prof_hashsize, 0, "Hash size"); 260static int mutex_prof_collisions = 0; 261SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 262 &mutex_prof_collisions, 0, "Number of hash collisions"); 263 264/* 265 * mprof_mtx protects the profiling buffers and the hash. 266 */ 267static struct mtx mprof_mtx; 268MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 269 270static u_int64_t 271nanoseconds(void) 272{ 273 struct timespec tv; 274 275 nanotime(&tv); 276 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 277} 278 279static int 280dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 281{ 282 struct sbuf *sb; 283 int error, i; 284 285 if (first_free_mprof_buf == 0) 286 return SYSCTL_OUT(req, "No locking recorded", 287 sizeof("No locking recorded")); 288 289 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 290 sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 291 "max", "total", "count", "average", "name"); 292 mtx_lock_spin(&mprof_mtx); 293 for (i = 0; i < first_free_mprof_buf; ++i) 294 sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n", 295 mprof_buf[i].counter[MPROF_MAX] / 1000, 296 mprof_buf[i].counter[MPROF_TOT] / 1000, 297 mprof_buf[i].counter[MPROF_CNT], 298 mprof_buf[i].counter[MPROF_AVG] / 1000, 299 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 300 mtx_unlock_spin(&mprof_mtx); 301 sbuf_finish(sb); 302 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 303 sbuf_delete(sb); 304 return (error); 305} 306SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 307 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 308#endif 309 310/* 311 * Function versions of the inlined __mtx_* macros. These are used by 312 * modules and can also be called from assembly language if needed. 313 */ 314void 315_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 316{ 317 318 MPASS(curthread != NULL); 319 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 320 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 321 file, line)); 322 _get_sleep_lock(m, curthread, opts, file, line); 323 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 324 line); 325 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 326#ifdef MUTEX_PROFILING 327 /* don't reset the timer when/if recursing */ 328 if (m->mtx_acqtime == 0) { 329 m->mtx_filename = file; 330 m->mtx_lineno = line; 331 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 332 ++mutex_prof_acquisitions; 333 } 334#endif 335} 336 337void 338_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 339{ 340 341 MPASS(curthread != NULL); 342 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 343 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 344 file, line)); 345 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 346 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 347 line); 348 mtx_assert(m, MA_OWNED); 349#ifdef MUTEX_PROFILING 350 if (m->mtx_acqtime != 0) { 351 static const char *unknown = "(unknown)"; 352 struct mutex_prof *mpp; 353 u_int64_t acqtime, now; 354 const char *p, *q; 355 volatile u_int hash; 356 357 now = nanoseconds(); 358 acqtime = m->mtx_acqtime; 359 m->mtx_acqtime = 0; 360 if (now <= acqtime) 361 goto out; 362 for (p = m->mtx_filename; strncmp(p, "../", 3) == 0; p += 3) 363 /* nothing */ ; 364 if (p == NULL || *p == '\0') 365 p = unknown; 366 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 367 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 368 mtx_lock_spin(&mprof_mtx); 369 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 370 if (mpp->line == m->mtx_lineno && 371 strcmp(mpp->file, p) == 0) 372 break; 373 if (mpp == NULL) { 374 /* Just exit if we cannot get a trace buffer */ 375 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 376 ++mutex_prof_rejected; 377 goto unlock; 378 } 379 mpp = &mprof_buf[first_free_mprof_buf++]; 380 mpp->name = mtx_name(m); 381 mpp->file = p; 382 mpp->line = m->mtx_lineno; 383 mpp->next = mprof_hash[hash]; 384 if (mprof_hash[hash] != NULL) 385 ++mutex_prof_collisions; 386 mprof_hash[hash] = mpp; 387 ++mutex_prof_records; 388 } 389 /* 390 * Record if the mutex has been held longer now than ever 391 * before 392 */ 393 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 394 mpp->counter[MPROF_MAX] = now - acqtime; 395 mpp->counter[MPROF_TOT] += now - acqtime; 396 mpp->counter[MPROF_CNT] += 1; 397 mpp->counter[MPROF_AVG] = 398 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 399unlock: 400 mtx_unlock_spin(&mprof_mtx); 401 } 402out: 403#endif 404 _rel_sleep_lock(m, curthread, opts, file, line); 405} 406 407void 408_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 409{ 410 411 MPASS(curthread != NULL); 412 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 413 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 414 m->mtx_object.lo_name, file, line)); 415#if defined(SMP) || LOCK_DEBUG > 0 || 1 416 _get_spin_lock(m, curthread, opts, file, line); 417#else 418 critical_enter(); 419#endif 420 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 421 line); 422 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 423} 424 425void 426_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 427{ 428 429 MPASS(curthread != NULL); 430 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 431 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 432 m->mtx_object.lo_name, file, line)); 433 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 434 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 435 line); 436 mtx_assert(m, MA_OWNED); 437#if defined(SMP) || LOCK_DEBUG > 0 || 1 438 _rel_spin_lock(m); 439#else 440 critical_exit(); 441#endif 442} 443 444/* 445 * The important part of mtx_trylock{,_flags}() 446 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 447 * if we're called, it's because we know we don't already own this lock. 448 */ 449int 450_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 451{ 452 int rval; 453 454 MPASS(curthread != NULL); 455 456 rval = _obtain_lock(m, curthread); 457 458 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 459 if (rval) { 460 /* 461 * We do not handle recursion in _mtx_trylock; see the 462 * note at the top of the routine. 463 */ 464 KASSERT(!mtx_recursed(m), 465 ("mtx_trylock() called on a recursed mutex")); 466 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 467 file, line); 468 } 469 470 return (rval); 471} 472 473/* 474 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 475 * 476 * We call this if the lock is either contested (i.e. we need to go to 477 * sleep waiting for it), or if we need to recurse on it. 478 */ 479void 480_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 481{ 482 struct thread *td = curthread; 483#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 484 struct thread *owner; 485#endif 486#ifdef KTR 487 int cont_logged = 0; 488#endif 489 490 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 491 m->mtx_recurse++; 492 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 493 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 494 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 495 return; 496 } 497 498 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 499 CTR4(KTR_LOCK, 500 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 501 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 502 503 while (!_obtain_lock(m, td)) { 504 uintptr_t v; 505 struct thread *td1; 506 507 mtx_lock_spin(&sched_lock); 508 /* 509 * Check if the lock has been released while spinning for 510 * the sched_lock. 511 */ 512 if ((v = m->mtx_lock) == MTX_UNOWNED) { 513 mtx_unlock_spin(&sched_lock); 514#ifdef __i386__ 515 ia32_pause(); 516#endif 517 continue; 518 } 519 520 /* 521 * The mutex was marked contested on release. This means that 522 * there are threads blocked on it. 523 */ 524 if (v == MTX_CONTESTED) { 525 td1 = TAILQ_FIRST(&m->mtx_blocked); 526 MPASS(td1 != NULL); 527 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 528 529 if (td1->td_priority < td->td_priority) 530 td->td_priority = td1->td_priority; 531 mtx_unlock_spin(&sched_lock); 532 return; 533 } 534 535 /* 536 * If the mutex isn't already contested and a failure occurs 537 * setting the contested bit, the mutex was either released 538 * or the state of the MTX_RECURSED bit changed. 539 */ 540 if ((v & MTX_CONTESTED) == 0 && 541 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 542 (void *)(v | MTX_CONTESTED))) { 543 mtx_unlock_spin(&sched_lock); 544#ifdef __i386__ 545 ia32_pause(); 546#endif 547 continue; 548 } 549 550#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 551 /* 552 * If the current owner of the lock is executing on another 553 * CPU, spin instead of blocking. 554 */ 555 owner = (struct thread *)(v & MTX_FLAGMASK); 556 if (m != &Giant && thread_running(owner)) { 557 mtx_unlock_spin(&sched_lock); 558 while (mtx_owner(m) == owner && thread_running(owner)) { 559#ifdef __i386__ 560 ia32_pause(); 561#endif 562 } 563 continue; 564 } 565#endif /* SMP && ADAPTIVE_MUTEXES */ 566 567 /* 568 * We definitely must sleep for this lock. 569 */ 570 mtx_assert(m, MA_NOTOWNED); 571 572#ifdef notyet 573 /* 574 * If we're borrowing an interrupted thread's VM context, we 575 * must clean up before going to sleep. 576 */ 577 if (td->td_ithd != NULL) { 578 struct ithd *it = td->td_ithd; 579 580 if (it->it_interrupted) { 581 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 582 CTR2(KTR_LOCK, 583 "_mtx_lock_sleep: %p interrupted %p", 584 it, it->it_interrupted); 585 intr_thd_fixup(it); 586 } 587 } 588#endif 589 590 /* 591 * Put us on the list of threads blocked on this mutex. 592 */ 593 if (TAILQ_EMPTY(&m->mtx_blocked)) { 594 td1 = mtx_owner(m); 595 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 596 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 597 } else { 598 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 599 if (td1->td_priority > td->td_priority) 600 break; 601 if (td1) 602 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 603 else 604 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 605 } 606#ifdef KTR 607 if (!cont_logged) { 608 CTR6(KTR_CONTENTION, 609 "contention: %p at %s:%d wants %s, taken by %s:%d", 610 td, file, line, m->mtx_object.lo_name, 611 WITNESS_FILE(&m->mtx_object), 612 WITNESS_LINE(&m->mtx_object)); 613 cont_logged = 1; 614 } 615#endif 616 617 /* 618 * Save who we're blocked on. 619 */ 620 td->td_blocked = m; 621 td->td_mtxname = m->mtx_object.lo_name; 622 td->td_state = TDS_MTX; 623 propagate_priority(td); 624 625 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 626 CTR3(KTR_LOCK, 627 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 628 m->mtx_object.lo_name); 629 630 td->td_proc->p_stats->p_ru.ru_nvcsw++; 631 mi_switch(); 632 633 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 634 CTR3(KTR_LOCK, 635 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 636 td, m, m->mtx_object.lo_name); 637 638 mtx_unlock_spin(&sched_lock); 639 } 640 641#ifdef KTR 642 if (cont_logged) { 643 CTR4(KTR_CONTENTION, 644 "contention end: %s acquired by %p at %s:%d", 645 m->mtx_object.lo_name, td, file, line); 646 } 647#endif 648 return; 649} 650 651/* 652 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 653 * 654 * This is only called if we need to actually spin for the lock. Recursion 655 * is handled inline. 656 */ 657void 658_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 659{ 660 int i = 0; 661 662 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 663 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 664 665 for (;;) { 666 if (_obtain_lock(m, curthread)) 667 break; 668 669 /* Give interrupts a chance while we spin. */ 670 critical_exit(); 671 while (m->mtx_lock != MTX_UNOWNED) { 672 if (i++ < 10000000) { 673#ifdef __i386__ 674 ia32_pause(); 675#endif 676 continue; 677 } 678 if (i < 60000000) 679 DELAY(1); 680#ifdef DDB 681 else if (!db_active) 682#else 683 else 684#endif 685 panic("spin lock %s held by %p for > 5 seconds", 686 m->mtx_object.lo_name, (void *)m->mtx_lock); 687#ifdef __i386__ 688 ia32_pause(); 689#endif 690 } 691 critical_enter(); 692 } 693 694 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 695 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 696 697 return; 698} 699 700/* 701 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 702 * 703 * We are only called here if the lock is recursed or contested (i.e. we 704 * need to wake up a blocked thread). 705 */ 706void 707_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 708{ 709 struct thread *td, *td1; 710 struct mtx *m1; 711 int pri; 712 713 td = curthread; 714 715 if (mtx_recursed(m)) { 716 if (--(m->mtx_recurse) == 0) 717 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 718 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 719 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 720 return; 721 } 722 723 mtx_lock_spin(&sched_lock); 724 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 725 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 726 727 td1 = TAILQ_FIRST(&m->mtx_blocked); 728#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 729 if (td1 == NULL) { 730 _release_lock_quick(m); 731 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 732 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 733 mtx_unlock_spin(&sched_lock); 734 return; 735 } 736#endif 737 MPASS(td->td_proc->p_magic == P_MAGIC); 738 MPASS(td1->td_proc->p_magic == P_MAGIC); 739 740 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 741 742 if (TAILQ_EMPTY(&m->mtx_blocked)) { 743 LIST_REMOVE(m, mtx_contested); 744 _release_lock_quick(m); 745 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 746 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 747 } else 748 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 749 750 pri = PRI_MAX; 751 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 752 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 753 if (cp < pri) 754 pri = cp; 755 } 756 757 if (pri > td->td_base_pri) 758 pri = td->td_base_pri; 759 td->td_priority = pri; 760 761 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 762 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 763 m, td1); 764 765 td1->td_blocked = NULL; 766 setrunqueue(td1); 767 768 if (td->td_critnest == 1 && td1->td_priority < pri) { 769#ifdef notyet 770 if (td->td_ithd != NULL) { 771 struct ithd *it = td->td_ithd; 772 773 if (it->it_interrupted) { 774 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 775 CTR2(KTR_LOCK, 776 "_mtx_unlock_sleep: %p interrupted %p", 777 it, it->it_interrupted); 778 intr_thd_fixup(it); 779 } 780 } 781#endif 782 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 783 CTR2(KTR_LOCK, 784 "_mtx_unlock_sleep: %p switching out lock=%p", m, 785 (void *)m->mtx_lock); 786 787 td->td_proc->p_stats->p_ru.ru_nivcsw++; 788 mi_switch(); 789 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 790 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 791 m, (void *)m->mtx_lock); 792 } 793 794 mtx_unlock_spin(&sched_lock); 795 796 return; 797} 798 799/* 800 * All the unlocking of MTX_SPIN locks is done inline. 801 * See the _rel_spin_lock() macro for the details. 802 */ 803 804/* 805 * The backing function for the INVARIANTS-enabled mtx_assert() 806 */ 807#ifdef INVARIANT_SUPPORT 808void 809_mtx_assert(struct mtx *m, int what, const char *file, int line) 810{ 811 812 if (panicstr != NULL) 813 return; 814 switch (what) { 815 case MA_OWNED: 816 case MA_OWNED | MA_RECURSED: 817 case MA_OWNED | MA_NOTRECURSED: 818 if (!mtx_owned(m)) 819 panic("mutex %s not owned at %s:%d", 820 m->mtx_object.lo_name, file, line); 821 if (mtx_recursed(m)) { 822 if ((what & MA_NOTRECURSED) != 0) 823 panic("mutex %s recursed at %s:%d", 824 m->mtx_object.lo_name, file, line); 825 } else if ((what & MA_RECURSED) != 0) { 826 panic("mutex %s unrecursed at %s:%d", 827 m->mtx_object.lo_name, file, line); 828 } 829 break; 830 case MA_NOTOWNED: 831 if (mtx_owned(m)) 832 panic("mutex %s owned at %s:%d", 833 m->mtx_object.lo_name, file, line); 834 break; 835 default: 836 panic("unknown mtx_assert at %s:%d", file, line); 837 } 838} 839#endif 840 841/* 842 * The MUTEX_DEBUG-enabled mtx_validate() 843 * 844 * Most of these checks have been moved off into the LO_INITIALIZED flag 845 * maintained by the witness code. 846 */ 847#ifdef MUTEX_DEBUG 848 849void mtx_validate(struct mtx *); 850 851void 852mtx_validate(struct mtx *m) 853{ 854 855/* 856 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 857 * we can re-enable the kernacc() checks. 858 */ 859#ifndef __alpha__ 860 /* 861 * Can't call kernacc() from early init386(), especially when 862 * initializing Giant mutex, because some stuff in kernacc() 863 * requires Giant itself. 864 */ 865 if (!cold) 866 if (!kernacc((caddr_t)m, sizeof(m), 867 VM_PROT_READ | VM_PROT_WRITE)) 868 panic("Can't read and write to mutex %p", m); 869#endif 870} 871#endif 872 873/* 874 * General init routine used by the MTX_SYSINIT() macro. 875 */ 876void 877mtx_sysinit(void *arg) 878{ 879 struct mtx_args *margs = arg; 880 881 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 882} 883 884/* 885 * Mutex initialization routine; initialize lock `m' of type contained in 886 * `opts' with options contained in `opts' and name `name.' The optional 887 * lock type `type' is used as a general lock category name for use with 888 * witness. 889 */ 890void 891mtx_init(struct mtx *m, const char *name, const char *type, int opts) 892{ 893 struct lock_object *lock; 894 895 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 896 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 897 898#ifdef MUTEX_DEBUG 899 /* Diagnostic and error correction */ 900 mtx_validate(m); 901#endif 902 903 lock = &m->mtx_object; 904 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 905 ("mutex %s %p already initialized", name, m)); 906 bzero(m, sizeof(*m)); 907 if (opts & MTX_SPIN) 908 lock->lo_class = &lock_class_mtx_spin; 909 else 910 lock->lo_class = &lock_class_mtx_sleep; 911 lock->lo_name = name; 912 lock->lo_type = type != NULL ? type : name; 913 if (opts & MTX_QUIET) 914 lock->lo_flags = LO_QUIET; 915 if (opts & MTX_RECURSE) 916 lock->lo_flags |= LO_RECURSABLE; 917 if (opts & MTX_SLEEPABLE) 918 lock->lo_flags |= LO_SLEEPABLE; 919 if ((opts & MTX_NOWITNESS) == 0) 920 lock->lo_flags |= LO_WITNESS; 921 if (opts & MTX_DUPOK) 922 lock->lo_flags |= LO_DUPOK; 923 924 m->mtx_lock = MTX_UNOWNED; 925 TAILQ_INIT(&m->mtx_blocked); 926 927 LOCK_LOG_INIT(lock, opts); 928 929 WITNESS_INIT(lock); 930} 931 932/* 933 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 934 * passed in as a flag here because if the corresponding mtx_init() was 935 * called with MTX_QUIET set, then it will already be set in the mutex's 936 * flags. 937 */ 938void 939mtx_destroy(struct mtx *m) 940{ 941 942 LOCK_LOG_DESTROY(&m->mtx_object, 0); 943 944 if (!mtx_owned(m)) 945 MPASS(mtx_unowned(m)); 946 else { 947 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 948 949 /* Tell witness this isn't locked to make it happy. */ 950 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 951 __LINE__); 952 } 953 954 WITNESS_DESTROY(&m->mtx_object); 955} 956 957/* 958 * Intialize the mutex code and system mutexes. This is called from the MD 959 * startup code prior to mi_startup(). The per-CPU data space needs to be 960 * setup before this is called. 961 */ 962void 963mutex_init(void) 964{ 965 966 /* Setup thread0 so that mutexes work. */ 967 LIST_INIT(&thread0.td_contested); 968 969 /* 970 * Initialize mutexes. 971 */ 972 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 973 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 974 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 975 mtx_lock(&Giant); 976} 977 978/* 979 * Encapsulated Giant mutex routines. These routines provide encapsulation 980 * control for the Giant mutex, allowing sysctls to be used to turn on and 981 * off Giant around certain subsystems. The default value for the sysctls 982 * are set to what developers believe is stable and working in regards to 983 * the Giant pushdown. Developers should not turn off Giant via these 984 * sysctls unless they know what they are doing. 985 * 986 * Callers of mtx_lock_giant() are expected to pass the return value to an 987 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 988 * effected by a Giant wrap, all related sysctl variables must be zero for 989 * the subsystem call to operate without Giant (as determined by the caller). 990 */ 991 992SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 993 994static int kern_giant_all = 0; 995SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 996 997int kern_giant_proc = 1; /* Giant around PROC locks */ 998int kern_giant_file = 1; /* Giant around struct file & filedesc */ 999int kern_giant_ucred = 1; /* Giant around ucred */ 1000SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 1001SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 1002SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 1003 1004int 1005mtx_lock_giant(int sysctlvar) 1006{ 1007 if (sysctlvar || kern_giant_all) { 1008 mtx_lock(&Giant); 1009 return(1); 1010 } 1011 return(0); 1012} 1013 1014void 1015mtx_unlock_giant(int s) 1016{ 1017 if (s) 1018 mtx_unlock(&Giant); 1019} 1020 1021