kern_mutex.c revision 158651
11558Srgrimes/*- 298542Smckusick * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 398542Smckusick * 498542Smckusick * Redistribution and use in source and binary forms, with or without 598542Smckusick * modification, are permitted provided that the following conditions 698542Smckusick * are met: 798542Smckusick * 1. Redistributions of source code must retain the above copyright 898542Smckusick * notice, this list of conditions and the following disclaimer. 998542Smckusick * 2. Redistributions in binary form must reproduce the above copyright 1098542Smckusick * notice, this list of conditions and the following disclaimer in the 1198542Smckusick * documentation and/or other materials provided with the distribution. 1298542Smckusick * 3. Berkeley Software Design Inc's name may not be used to endorse or 1398542Smckusick * promote products derived from this software without specific prior 141558Srgrimes * written permission. 151558Srgrimes * 161558Srgrimes * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 171558Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 181558Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 191558Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 201558Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 211558Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 221558Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 231558Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 241558Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 251558Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 261558Srgrimes * SUCH DAMAGE. 271558Srgrimes * 281558Srgrimes * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 291558Srgrimes * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 301558Srgrimes */ 311558Srgrimes 321558Srgrimes/* 331558Srgrimes * Machine independent bits of mutex implementation. 341558Srgrimes */ 351558Srgrimes 361558Srgrimes#include <sys/cdefs.h> 371558Srgrimes__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 158651 2006-05-16 14:37:58Z phk $"); 381558Srgrimes 391558Srgrimes#include "opt_adaptive_mutexes.h" 401558Srgrimes#include "opt_ddb.h" 411558Srgrimes#include "opt_mprof.h" 421558Srgrimes#include "opt_mutex_wake_all.h" 431558Srgrimes#include "opt_sched.h" 441558Srgrimes 451558Srgrimes#include <sys/param.h> 461558Srgrimes#include <sys/systm.h> 4736998Scharnier#include <sys/bus.h> 481558Srgrimes#include <sys/conf.h> 491558Srgrimes#include <sys/kdb.h> 501558Srgrimes#include <sys/kernel.h> 511558Srgrimes#include <sys/ktr.h> 521558Srgrimes#include <sys/lock.h> 5336998Scharnier#include <sys/malloc.h> 5423673Speter#include <sys/mutex.h> 5536998Scharnier#include <sys/proc.h> 5636998Scharnier#include <sys/resourcevar.h> 5750476Speter#include <sys/sched.h> 581558Srgrimes#include <sys/sbuf.h> 591558Srgrimes#include <sys/sysctl.h> 601558Srgrimes#include <sys/turnstile.h> 611558Srgrimes#include <sys/vmmeter.h> 6296478Sphk 631558Srgrimes#include <machine/atomic.h> 6498542Smckusick#include <machine/bus.h> 651558Srgrimes#include <machine/cpu.h> 661558Srgrimes 6723673Speter#include <ddb/ddb.h> 681558Srgrimes 691558Srgrimes#include <fs/devfs/devfs_int.h> 7099826Sjmallett 711558Srgrimes#include <vm/vm.h> 721558Srgrimes#include <vm/vm_extern.h> 7323673Speter 741558Srgrimes/* 7599826Sjmallett * Force MUTEX_WAKE_ALL for now. 761558Srgrimes * single thread wakeup needs fixes to avoid race conditions with 771558Srgrimes * priority inheritance. 781558Srgrimes */ 791558Srgrimes#ifndef MUTEX_WAKE_ALL 801558Srgrimes#define MUTEX_WAKE_ALL 811558Srgrimes#endif 821558Srgrimes 8399826Sjmallett/* 841558Srgrimes * Internal utility macros. 8592839Simp */ 86101688Sjmallett#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 8792839Simp 8892839Simp#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 891558Srgrimes : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 901558Srgrimes 9192839Simp#ifdef DDB 921558Srgrimesstatic void db_show_mtx(struct lock_object *lock); 9392806Sobrien#endif 941558Srgrimes 951558Srgrimes/* 9623673Speter * Lock classes for sleep and spin mutexes. 971558Srgrimes */ 981558Srgrimesstruct lock_class lock_class_mtx_sleep = { 991558Srgrimes "sleep mutex", 1001558Srgrimes LC_SLEEPLOCK | LC_RECURSABLE, 1011558Srgrimes#ifdef DDB 1021558Srgrimes db_show_mtx 1031558Srgrimes#endif 1041558Srgrimes}; 1051558Srgrimesstruct lock_class lock_class_mtx_spin = { 1061558Srgrimes "spin mutex", 1071558Srgrimes LC_SPINLOCK | LC_RECURSABLE, 1081558Srgrimes#ifdef DDB 1091558Srgrimes db_show_mtx 1101558Srgrimes#endif 1111558Srgrimes}; 1121558Srgrimes 1131558Srgrimes/* 1141558Srgrimes * System-wide mutexes 1151558Srgrimes */ 1161558Srgrimesstruct mtx sched_lock; 11792839Simpstruct mtx Giant; 1181558Srgrimes 11998542Smckusick#ifdef MUTEX_PROFILING 12098542SmckusickSYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 12199827SjmallettSYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 1221558Srgrimesstatic int mutex_prof_enable = 0; 12399826SjmallettSYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 12498542Smckusick &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 12523673Speter 126101688Sjmallettstruct mutex_prof { 127101688Sjmallett const char *name; 12898542Smckusick const char *file; 12998542Smckusick int line; 13098542Smckusick uintmax_t cnt_max; 13198542Smckusick uintmax_t cnt_tot; 13298542Smckusick uintmax_t cnt_cur; 13398542Smckusick uintmax_t cnt_contest_holding; 13498542Smckusick uintmax_t cnt_contest_locking; 13598542Smckusick struct mutex_prof *next; 136101688Sjmallett}; 137101688Sjmallett 13898542Smckusick/* 13998542Smckusick * mprof_buf is a static pool of profiling records to avoid possible 14098542Smckusick * reentrance of the memory allocation functions. 14198542Smckusick * 14298542Smckusick * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 14398542Smckusick */ 144101688Sjmallett#ifdef MPROF_BUFFERS 145101688Sjmallett#define NUM_MPROF_BUFFERS MPROF_BUFFERS 146101688Sjmallett#else 14798542Smckusick#define NUM_MPROF_BUFFERS 1000 1481558Srgrimes#endif 1491558Srgrimesstatic struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 1501558Srgrimesstatic int first_free_mprof_buf; 1511558Srgrimes#ifndef MPROF_HASH_SIZE 1521558Srgrimes#define MPROF_HASH_SIZE 1009 1531558Srgrimes#endif 15498542Smckusick#if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE 1551558Srgrimes#error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE 15698542Smckusick#endif 157101688Sjmallettstatic struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 158101688Sjmallett/* SWAG: sbuf size = avg stat. line size * number of locks */ 15998542Smckusick#define MPROF_SBUF_SIZE 256 * 400 16098542Smckusick 16198542Smckusickstatic int mutex_prof_acquisitions; 16298542SmckusickSYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 16398542Smckusick &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 16498542Smckusickstatic int mutex_prof_records; 16598542SmckusickSYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 16698542Smckusick &mutex_prof_records, 0, "Number of profiling records"); 16798542Smckusickstatic int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 16898542SmckusickSYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 16998542Smckusick &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 17098542Smckusickstatic int mutex_prof_rejected; 171101688SjmallettSYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 172101688Sjmallett &mutex_prof_rejected, 0, "Number of rejected profiling records"); 17398542Smckusickstatic int mutex_prof_hashsize = MPROF_HASH_SIZE; 17498542SmckusickSYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 17598542Smckusick &mutex_prof_hashsize, 0, "Hash size"); 17698542Smckusickstatic int mutex_prof_collisions = 0; 17798542SmckusickSYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 17898542Smckusick &mutex_prof_collisions, 0, "Number of hash collisions"); 17998542Smckusick 18098542Smckusick/* 18198542Smckusick * mprof_mtx protects the profiling buffers and the hash. 18298542Smckusick */ 18398542Smckusickstatic struct mtx mprof_mtx; 18498542SmckusickMTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 18598542Smckusick 18698542Smckusickstatic u_int64_t 18798542Smckusicknanoseconds(void) 18898542Smckusick{ 18998542Smckusick struct timespec tv; 19098542Smckusick 19198542Smckusick nanotime(&tv); 19298542Smckusick return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 19398542Smckusick} 194101688Sjmallett 195101688Sjmallettstatic int 196101688Sjmallettdump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 19798542Smckusick{ 1981558Srgrimes struct sbuf *sb; 1991558Srgrimes int error, i; 2002154Sdg static int multiplier = 1; 2012154Sdg 20248875Smpp if (first_free_mprof_buf == 0) 20348875Smpp return (SYSCTL_OUT(req, "No locking recorded", 20448875Smpp sizeof("No locking recorded"))); 20548875Smpp 20648875Smppretry_sbufops: 20748875Smpp sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN); 20848875Smpp sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n", 20998542Smckusick "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name"); 21098542Smckusick /* 21198542Smckusick * XXX this spinlock seems to be by far the largest perpetrator 21298542Smckusick * of spinlock latency (1.6 msec on an Athlon1600 was recorded 21398542Smckusick * even before I pessimized it further by moving the average 21498542Smckusick * computation here). 21598542Smckusick */ 21698542Smckusick mtx_lock_spin(&mprof_mtx); 21798542Smckusick for (i = 0; i < first_free_mprof_buf; ++i) { 21848875Smpp sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n", 2191558Srgrimes mprof_buf[i].cnt_max / 1000, 22071073Siedowse mprof_buf[i].cnt_tot / 1000, 221101688Sjmallett mprof_buf[i].cnt_cur, 22298542Smckusick mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 : 2231558Srgrimes mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000), 2241558Srgrimes mprof_buf[i].cnt_contest_holding, 2251558Srgrimes mprof_buf[i].cnt_contest_locking, 2261558Srgrimes mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 2271558Srgrimes if (sbuf_overflowed(sb)) { 2281558Srgrimes mtx_unlock_spin(&mprof_mtx); 2291558Srgrimes sbuf_delete(sb); 2301558Srgrimes multiplier++; 23198542Smckusick goto retry_sbufops; 232101688Sjmallett } 23398542Smckusick } 23498542Smckusick mtx_unlock_spin(&mprof_mtx); 23598542Smckusick sbuf_finish(sb); 23698542Smckusick error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 23798542Smckusick sbuf_delete(sb); 2381558Srgrimes return (error); 2391558Srgrimes} 240101688SjmallettSYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 2411558Srgrimes NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 24299826Sjmallett 2431558Srgrimesstatic int 2441558Srgrimesreset_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 245105738Sjmallett{ 246105738Sjmallett int error, v; 24723673Speter 2481558Srgrimes if (first_free_mprof_buf == 0) 24999827Sjmallett return (0); 2501558Srgrimes 2511558Srgrimes v = 0; 252101688Sjmallett error = sysctl_handle_int(oidp, &v, 0, req); 2531558Srgrimes if (error) 25498542Smckusick return (error); 2551558Srgrimes if (req->newptr == NULL) 2561558Srgrimes return (error); 2571558Srgrimes if (v == 0) 2581558Srgrimes return (0); 259101688Sjmallett 260101688Sjmallett mtx_lock_spin(&mprof_mtx); 2611558Srgrimes bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf); 262101688Sjmallett bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE); 263101688Sjmallett first_free_mprof_buf = 0; 26498542Smckusick mtx_unlock_spin(&mprof_mtx); 26598542Smckusick return (0); 26698542Smckusick} 26798542SmckusickSYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 26898542Smckusick NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics"); 269101688Sjmallett#endif 270101688Sjmallett 27198542Smckusick/* 27298542Smckusick * Function versions of the inlined __mtx_* macros. These are used by 27398542Smckusick * modules and can also be called from assembly language if needed. 27498542Smckusick */ 27598542Smckusickvoid 276101688Sjmallett_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 277101688Sjmallett{ 278101688Sjmallett 27998542Smckusick MPASS(curthread != NULL); 2801558Srgrimes KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 2811558Srgrimes ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 2821558Srgrimes file, line)); 2831558Srgrimes WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 2841558Srgrimes file, line); 2851558Srgrimes _get_sleep_lock(m, curthread, opts, file, line); 2861558Srgrimes LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 2871558Srgrimes line); 2881558Srgrimes WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 2891558Srgrimes#ifdef MUTEX_PROFILING 2901558Srgrimes /* don't reset the timer when/if recursing */ 2911558Srgrimes if (m->mtx_acqtime == 0) { 2921558Srgrimes m->mtx_filename = file; 2931558Srgrimes m->mtx_lineno = line; 2941558Srgrimes m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 2951558Srgrimes ++mutex_prof_acquisitions; 2961558Srgrimes } 2971558Srgrimes#endif 2981558Srgrimes} 2991558Srgrimes 3001558Srgrimesvoid 3011558Srgrimes_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 3021558Srgrimes{ 3031558Srgrimes 3041558Srgrimes MPASS(curthread != NULL); 30598542Smckusick KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 3061558Srgrimes ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 30798542Smckusick file, line)); 3081558Srgrimes WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 3091558Srgrimes LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 31099827Sjmallett line); 3111558Srgrimes mtx_assert(m, MA_OWNED); 3121558Srgrimes#ifdef MUTEX_PROFILING 31392839Simp if (m->mtx_acqtime != 0) { 3141558Srgrimes static const char *unknown = "(unknown)"; 31592806Sobrien struct mutex_prof *mpp; 31692806Sobrien u_int64_t acqtime, now; 3171558Srgrimes const char *p, *q; 3181558Srgrimes volatile u_int hash; 3191558Srgrimes 3201558Srgrimes now = nanoseconds(); 3211558Srgrimes acqtime = m->mtx_acqtime; 3221558Srgrimes m->mtx_acqtime = 0; 3231558Srgrimes if (now <= acqtime) 3241558Srgrimes goto out; 3251558Srgrimes for (p = m->mtx_filename; 3261558Srgrimes p != NULL && strncmp(p, "../", 3) == 0; p += 3) 3271558Srgrimes /* nothing */ ; 3281558Srgrimes if (p == NULL || *p == '\0') 3291558Srgrimes p = unknown; 3301558Srgrimes for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 3311558Srgrimes hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 3321558Srgrimes mtx_lock_spin(&mprof_mtx); 3331558Srgrimes for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 3341558Srgrimes if (mpp->line == m->mtx_lineno && 33592839Simp strcmp(mpp->file, p) == 0) 3361558Srgrimes break; 3371558Srgrimes if (mpp == NULL) { 3381558Srgrimes /* Just exit if we cannot get a trace buffer */ 3391558Srgrimes if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 340 ++mutex_prof_rejected; 341 goto unlock; 342 } 343 mpp = &mprof_buf[first_free_mprof_buf++]; 344 mpp->name = mtx_name(m); 345 mpp->file = p; 346 mpp->line = m->mtx_lineno; 347 mpp->next = mprof_hash[hash]; 348 if (mprof_hash[hash] != NULL) 349 ++mutex_prof_collisions; 350 mprof_hash[hash] = mpp; 351 ++mutex_prof_records; 352 } 353 /* 354 * Record if the mutex has been held longer now than ever 355 * before. 356 */ 357 if (now - acqtime > mpp->cnt_max) 358 mpp->cnt_max = now - acqtime; 359 mpp->cnt_tot += now - acqtime; 360 mpp->cnt_cur++; 361 /* 362 * There's a small race, really we should cmpxchg 363 * 0 with the current value, but that would bill 364 * the contention to the wrong lock instance if 365 * it followed this also. 366 */ 367 mpp->cnt_contest_holding += m->mtx_contest_holding; 368 m->mtx_contest_holding = 0; 369 mpp->cnt_contest_locking += m->mtx_contest_locking; 370 m->mtx_contest_locking = 0; 371unlock: 372 mtx_unlock_spin(&mprof_mtx); 373 } 374out: 375#endif 376 _rel_sleep_lock(m, curthread, opts, file, line); 377} 378 379void 380_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 381{ 382 383 MPASS(curthread != NULL); 384 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 385 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 386 m->mtx_object.lo_name, file, line)); 387 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 388 file, line); 389 _get_spin_lock(m, curthread, opts, file, line); 390 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 391 line); 392 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 393} 394 395void 396_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 397{ 398 399 MPASS(curthread != NULL); 400 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin, 401 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 402 m->mtx_object.lo_name, file, line)); 403 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 404 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 405 line); 406 mtx_assert(m, MA_OWNED); 407 _rel_spin_lock(m); 408} 409 410/* 411 * The important part of mtx_trylock{,_flags}() 412 * Tries to acquire lock `m.' If this function is called on a mutex that 413 * is already owned, it will recursively acquire the lock. 414 */ 415int 416_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 417{ 418 int rval; 419 420 MPASS(curthread != NULL); 421 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep, 422 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 423 file, line)); 424 425 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) { 426 m->mtx_recurse++; 427 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 428 rval = 1; 429 } else 430 rval = _obtain_lock(m, (uintptr_t)curthread); 431 432 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 433 if (rval) 434 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 435 file, line); 436 437 return (rval); 438} 439 440/* 441 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 442 * 443 * We call this if the lock is either contested (i.e. we need to go to 444 * sleep waiting for it), or if we need to recurse on it. 445 */ 446void 447_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, 448 int line) 449{ 450#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 451 volatile struct thread *owner; 452#endif 453 uintptr_t v; 454#ifdef KTR 455 int cont_logged = 0; 456#endif 457#ifdef MUTEX_PROFILING 458 int contested; 459#endif 460 461 if (mtx_owned(m)) { 462 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0, 463 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 464 m->mtx_object.lo_name, file, line)); 465 m->mtx_recurse++; 466 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 467 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 468 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 469 return; 470 } 471 472 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 473 CTR4(KTR_LOCK, 474 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 475 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 476 477#ifdef MUTEX_PROFILING 478 contested = 0; 479#endif 480 while (!_obtain_lock(m, tid)) { 481#ifdef MUTEX_PROFILING 482 contested = 1; 483 atomic_add_int(&m->mtx_contest_holding, 1); 484#endif 485 turnstile_lock(&m->mtx_object); 486 v = m->mtx_lock; 487 488 /* 489 * Check if the lock has been released while spinning for 490 * the turnstile chain lock. 491 */ 492 if (v == MTX_UNOWNED) { 493 turnstile_release(&m->mtx_object); 494 cpu_spinwait(); 495 continue; 496 } 497 498#ifdef MUTEX_WAKE_ALL 499 MPASS(v != MTX_CONTESTED); 500#else 501 /* 502 * The mutex was marked contested on release. This means that 503 * there are other threads blocked on it. Grab ownership of 504 * it and propagate its priority to the current thread if 505 * necessary. 506 */ 507 if (v == MTX_CONTESTED) { 508 m->mtx_lock = tid | MTX_CONTESTED; 509 turnstile_claim(&m->mtx_object); 510 break; 511 } 512#endif 513 514 /* 515 * If the mutex isn't already contested and a failure occurs 516 * setting the contested bit, the mutex was either released 517 * or the state of the MTX_RECURSED bit changed. 518 */ 519 if ((v & MTX_CONTESTED) == 0 && 520 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 521 turnstile_release(&m->mtx_object); 522 cpu_spinwait(); 523 continue; 524 } 525 526#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 527 /* 528 * If the current owner of the lock is executing on another 529 * CPU, spin instead of blocking. 530 */ 531 owner = (struct thread *)(v & MTX_FLAGMASK); 532#ifdef ADAPTIVE_GIANT 533 if (TD_IS_RUNNING(owner)) { 534#else 535 if (m != &Giant && TD_IS_RUNNING(owner)) { 536#endif 537 turnstile_release(&m->mtx_object); 538 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 539 cpu_spinwait(); 540 } 541 continue; 542 } 543#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ 544 545 /* 546 * We definitely must sleep for this lock. 547 */ 548 mtx_assert(m, MA_NOTOWNED); 549 550#ifdef KTR 551 if (!cont_logged) { 552 CTR6(KTR_CONTENTION, 553 "contention: %p at %s:%d wants %s, taken by %s:%d", 554 (void *)tid, file, line, m->mtx_object.lo_name, 555 WITNESS_FILE(&m->mtx_object), 556 WITNESS_LINE(&m->mtx_object)); 557 cont_logged = 1; 558 } 559#endif 560 561 /* 562 * Block on the turnstile. 563 */ 564 turnstile_wait(&m->mtx_object, mtx_owner(m), 565 TS_EXCLUSIVE_QUEUE); 566 } 567 568#ifdef KTR 569 if (cont_logged) { 570 CTR4(KTR_CONTENTION, 571 "contention end: %s acquired by %p at %s:%d", 572 m->mtx_object.lo_name, (void *)tid, file, line); 573 } 574#endif 575#ifdef MUTEX_PROFILING 576 if (contested) 577 m->mtx_contest_locking++; 578 m->mtx_contest_holding = 0; 579#endif 580 return; 581} 582 583#ifdef SMP 584/* 585 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 586 * 587 * This is only called if we need to actually spin for the lock. Recursion 588 * is handled inline. 589 */ 590void 591_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, 592 int line) 593{ 594 int i = 0; 595 596 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 597 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 598 599 for (;;) { 600 if (_obtain_lock(m, tid)) 601 break; 602 603 /* Give interrupts a chance while we spin. */ 604 spinlock_exit(); 605 while (m->mtx_lock != MTX_UNOWNED) { 606 if (i++ < 10000000) { 607 cpu_spinwait(); 608 continue; 609 } 610 if (i < 60000000) 611 DELAY(1); 612 else if (!kdb_active && !panicstr) { 613 printf("spin lock %s held by %p for > 5 seconds\n", 614 m->mtx_object.lo_name, (void *)m->mtx_lock); 615#ifdef WITNESS 616 witness_display_spinlock(&m->mtx_object, 617 mtx_owner(m)); 618#endif 619 panic("spin lock held too long"); 620 } 621 cpu_spinwait(); 622 } 623 spinlock_enter(); 624 } 625 626 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 627 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 628 629 return; 630} 631#endif /* SMP */ 632 633/* 634 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 635 * 636 * We are only called here if the lock is recursed or contested (i.e. we 637 * need to wake up a blocked thread). 638 */ 639void 640_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 641{ 642 struct turnstile *ts; 643#ifndef PREEMPTION 644 struct thread *td, *td1; 645#endif 646 647 if (mtx_recursed(m)) { 648 if (--(m->mtx_recurse) == 0) 649 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 650 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 651 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 652 return; 653 } 654 655 turnstile_lock(&m->mtx_object); 656 ts = turnstile_lookup(&m->mtx_object); 657 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 658 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 659 660#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 661 if (ts == NULL) { 662 _release_lock_quick(m); 663 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 664 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 665 turnstile_release(&m->mtx_object); 666 return; 667 } 668#else 669 MPASS(ts != NULL); 670#endif 671#ifndef PREEMPTION 672 /* XXX */ 673 td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); 674#endif 675#ifdef MUTEX_WAKE_ALL 676 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 677 _release_lock_quick(m); 678#else 679 if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) { 680 _release_lock_quick(m); 681 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 682 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 683 } else { 684 m->mtx_lock = MTX_CONTESTED; 685 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 686 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 687 m); 688 } 689#endif 690 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 691 692#ifndef PREEMPTION 693 /* 694 * XXX: This is just a hack until preemption is done. However, 695 * once preemption is done we need to either wrap the 696 * turnstile_signal() and release of the actual lock in an 697 * extra critical section or change the preemption code to 698 * always just set a flag and never do instant-preempts. 699 */ 700 td = curthread; 701 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) 702 return; 703 mtx_lock_spin(&sched_lock); 704 if (!TD_IS_RUNNING(td1)) { 705#ifdef notyet 706 if (td->td_ithd != NULL) { 707 struct ithd *it = td->td_ithd; 708 709 if (it->it_interrupted) { 710 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 711 CTR2(KTR_LOCK, 712 "_mtx_unlock_sleep: %p interrupted %p", 713 it, it->it_interrupted); 714 intr_thd_fixup(it); 715 } 716 } 717#endif 718 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 719 CTR2(KTR_LOCK, 720 "_mtx_unlock_sleep: %p switching out lock=%p", m, 721 (void *)m->mtx_lock); 722 723 mi_switch(SW_INVOL, NULL); 724 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 725 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 726 m, (void *)m->mtx_lock); 727 } 728 mtx_unlock_spin(&sched_lock); 729#endif 730 731 return; 732} 733 734/* 735 * All the unlocking of MTX_SPIN locks is done inline. 736 * See the _rel_spin_lock() macro for the details. 737 */ 738 739/* 740 * The backing function for the INVARIANTS-enabled mtx_assert() 741 */ 742#ifdef INVARIANT_SUPPORT 743void 744_mtx_assert(struct mtx *m, int what, const char *file, int line) 745{ 746 747 if (panicstr != NULL || dumping) 748 return; 749 switch (what) { 750 case MA_OWNED: 751 case MA_OWNED | MA_RECURSED: 752 case MA_OWNED | MA_NOTRECURSED: 753 if (!mtx_owned(m)) 754 panic("mutex %s not owned at %s:%d", 755 m->mtx_object.lo_name, file, line); 756 if (mtx_recursed(m)) { 757 if ((what & MA_NOTRECURSED) != 0) 758 panic("mutex %s recursed at %s:%d", 759 m->mtx_object.lo_name, file, line); 760 } else if ((what & MA_RECURSED) != 0) { 761 panic("mutex %s unrecursed at %s:%d", 762 m->mtx_object.lo_name, file, line); 763 } 764 break; 765 case MA_NOTOWNED: 766 if (mtx_owned(m)) 767 panic("mutex %s owned at %s:%d", 768 m->mtx_object.lo_name, file, line); 769 break; 770 default: 771 panic("unknown mtx_assert at %s:%d", file, line); 772 } 773} 774#endif 775 776/* 777 * The MUTEX_DEBUG-enabled mtx_validate() 778 * 779 * Most of these checks have been moved off into the LO_INITIALIZED flag 780 * maintained by the witness code. 781 */ 782#ifdef MUTEX_DEBUG 783 784void mtx_validate(struct mtx *); 785 786void 787mtx_validate(struct mtx *m) 788{ 789 790/* 791 * XXX: When kernacc() does not require Giant we can reenable this check 792 */ 793#ifdef notyet 794 /* 795 * Can't call kernacc() from early init386(), especially when 796 * initializing Giant mutex, because some stuff in kernacc() 797 * requires Giant itself. 798 */ 799 if (!cold) 800 if (!kernacc((caddr_t)m, sizeof(m), 801 VM_PROT_READ | VM_PROT_WRITE)) 802 panic("Can't read and write to mutex %p", m); 803#endif 804} 805#endif 806 807/* 808 * General init routine used by the MTX_SYSINIT() macro. 809 */ 810void 811mtx_sysinit(void *arg) 812{ 813 struct mtx_args *margs = arg; 814 815 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 816} 817 818/* 819 * Mutex initialization routine; initialize lock `m' of type contained in 820 * `opts' with options contained in `opts' and name `name.' The optional 821 * lock type `type' is used as a general lock category name for use with 822 * witness. 823 */ 824void 825mtx_init(struct mtx *m, const char *name, const char *type, int opts) 826{ 827 struct lock_class *class; 828 int flags; 829 830 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 831 MTX_NOWITNESS | MTX_DUPOK)) == 0); 832 833#ifdef MUTEX_DEBUG 834 /* Diagnostic and error correction */ 835 mtx_validate(m); 836#endif 837 838 /* Determine lock class and lock flags. */ 839 if (opts & MTX_SPIN) 840 class = &lock_class_mtx_spin; 841 else 842 class = &lock_class_mtx_sleep; 843 flags = 0; 844 if (opts & MTX_QUIET) 845 flags |= LO_QUIET; 846 if (opts & MTX_RECURSE) 847 flags |= LO_RECURSABLE; 848 if ((opts & MTX_NOWITNESS) == 0) 849 flags |= LO_WITNESS; 850 if (opts & MTX_DUPOK) 851 flags |= LO_DUPOK; 852 853 /* Initialize mutex. */ 854 m->mtx_lock = MTX_UNOWNED; 855 m->mtx_recurse = 0; 856#ifdef MUTEX_PROFILING 857 m->mtx_acqtime = 0; 858 m->mtx_filename = NULL; 859 m->mtx_lineno = 0; 860 m->mtx_contest_holding = 0; 861 m->mtx_contest_locking = 0; 862#endif 863 864 lock_init(&m->mtx_object, class, name, type, flags); 865} 866 867/* 868 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 869 * passed in as a flag here because if the corresponding mtx_init() was 870 * called with MTX_QUIET set, then it will already be set in the mutex's 871 * flags. 872 */ 873void 874mtx_destroy(struct mtx *m) 875{ 876 877 if (!mtx_owned(m)) 878 MPASS(mtx_unowned(m)); 879 else { 880 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 881 882 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 883 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin) 884 spinlock_exit(); 885 886 /* Tell witness this isn't locked to make it happy. */ 887 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 888 __LINE__); 889 } 890 891 lock_destroy(&m->mtx_object); 892} 893 894/* 895 * Intialize the mutex code and system mutexes. This is called from the MD 896 * startup code prior to mi_startup(). The per-CPU data space needs to be 897 * setup before this is called. 898 */ 899void 900mutex_init(void) 901{ 902 903 /* Setup turnstiles so that sleep mutexes work. */ 904 init_turnstiles(); 905 906 /* 907 * Initialize mutexes. 908 */ 909 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 910 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 911 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 912 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 913 mtx_lock(&Giant); 914} 915 916#ifdef DDB 917void 918db_show_mtx(struct lock_object *lock) 919{ 920 struct thread *td; 921 struct mtx *m; 922 923 m = (struct mtx *)lock; 924 925 db_printf(" flags: {"); 926 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 927 db_printf("SPIN"); 928 else 929 db_printf("DEF"); 930 if (m->mtx_object.lo_flags & LO_RECURSABLE) 931 db_printf(", RECURSE"); 932 if (m->mtx_object.lo_flags & LO_DUPOK) 933 db_printf(", DUPOK"); 934 db_printf("}\n"); 935 db_printf(" state: {"); 936 if (mtx_unowned(m)) 937 db_printf("UNOWNED"); 938 else { 939 db_printf("OWNED"); 940 if (m->mtx_lock & MTX_CONTESTED) 941 db_printf(", CONTESTED"); 942 if (m->mtx_lock & MTX_RECURSED) 943 db_printf(", RECURSED"); 944 } 945 db_printf("}\n"); 946 if (!mtx_unowned(m)) { 947 td = mtx_owner(m); 948 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 949 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 950 if (mtx_recursed(m)) 951 db_printf(" recursed: %d\n", m->mtx_recurse); 952 } 953} 954#endif 955