kern_mutex.c revision 103216
19126SWyllys.Ingersoll@Sun.COM/*- 29126SWyllys.Ingersoll@Sun.COM * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 39126SWyllys.Ingersoll@Sun.COM * 49126SWyllys.Ingersoll@Sun.COM * Redistribution and use in source and binary forms, with or without 59126SWyllys.Ingersoll@Sun.COM * modification, are permitted provided that the following conditions 69126SWyllys.Ingersoll@Sun.COM * are met: 79126SWyllys.Ingersoll@Sun.COM * 1. Redistributions of source code must retain the above copyright 89126SWyllys.Ingersoll@Sun.COM * notice, this list of conditions and the following disclaimer. 99126SWyllys.Ingersoll@Sun.COM * 2. Redistributions in binary form must reproduce the above copyright 109126SWyllys.Ingersoll@Sun.COM * notice, this list of conditions and the following disclaimer in the 119126SWyllys.Ingersoll@Sun.COM * documentation and/or other materials provided with the distribution. 129126SWyllys.Ingersoll@Sun.COM * 3. Berkeley Software Design Inc's name may not be used to endorse or 139126SWyllys.Ingersoll@Sun.COM * promote products derived from this software without specific prior 149126SWyllys.Ingersoll@Sun.COM * written permission. 159126SWyllys.Ingersoll@Sun.COM * 169126SWyllys.Ingersoll@Sun.COM * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 179126SWyllys.Ingersoll@Sun.COM * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 189126SWyllys.Ingersoll@Sun.COM * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 199126SWyllys.Ingersoll@Sun.COM * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 209126SWyllys.Ingersoll@Sun.COM * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2112692SAli.Bahrami@Oracle.COM * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 229126SWyllys.Ingersoll@Sun.COM * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 239126SWyllys.Ingersoll@Sun.COM * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 249126SWyllys.Ingersoll@Sun.COM * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 259126SWyllys.Ingersoll@Sun.COM * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 269126SWyllys.Ingersoll@Sun.COM * SUCH DAMAGE. 279126SWyllys.Ingersoll@Sun.COM * 289126SWyllys.Ingersoll@Sun.COM * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 299126SWyllys.Ingersoll@Sun.COM * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 309126SWyllys.Ingersoll@Sun.COM * $FreeBSD: head/sys/kern/kern_mutex.c 103216 2002-09-11 08:13:56Z julian $ 319126SWyllys.Ingersoll@Sun.COM */ 329126SWyllys.Ingersoll@Sun.COM 339126SWyllys.Ingersoll@Sun.COM/* 349126SWyllys.Ingersoll@Sun.COM * Machine independent bits of mutex implementation. 359126SWyllys.Ingersoll@Sun.COM */ 3612692SAli.Bahrami@Oracle.COM 3712692SAli.Bahrami@Oracle.COM#include "opt_adaptive_mutexes.h" 3812692SAli.Bahrami@Oracle.COM#include "opt_ddb.h" 399126SWyllys.Ingersoll@Sun.COM 409126SWyllys.Ingersoll@Sun.COM#include <sys/param.h> 419126SWyllys.Ingersoll@Sun.COM#include <sys/systm.h> 429126SWyllys.Ingersoll@Sun.COM#include <sys/bus.h> 439126SWyllys.Ingersoll@Sun.COM#include <sys/kernel.h> 449126SWyllys.Ingersoll@Sun.COM#include <sys/ktr.h> 459126SWyllys.Ingersoll@Sun.COM#include <sys/lock.h> 469126SWyllys.Ingersoll@Sun.COM#include <sys/malloc.h> 479126SWyllys.Ingersoll@Sun.COM#include <sys/mutex.h> 489126SWyllys.Ingersoll@Sun.COM#include <sys/proc.h> 499126SWyllys.Ingersoll@Sun.COM#include <sys/resourcevar.h> 509126SWyllys.Ingersoll@Sun.COM#include <sys/sbuf.h> 519126SWyllys.Ingersoll@Sun.COM#include <sys/stdint.h> 529126SWyllys.Ingersoll@Sun.COM#include <sys/sysctl.h> 539126SWyllys.Ingersoll@Sun.COM#include <sys/vmmeter.h> 549126SWyllys.Ingersoll@Sun.COM 559126SWyllys.Ingersoll@Sun.COM#include <machine/atomic.h> 569126SWyllys.Ingersoll@Sun.COM#include <machine/bus.h> 579126SWyllys.Ingersoll@Sun.COM#include <machine/clock.h> 589126SWyllys.Ingersoll@Sun.COM#include <machine/cpu.h> 599126SWyllys.Ingersoll@Sun.COM 609126SWyllys.Ingersoll@Sun.COM#include <ddb/ddb.h> 619126SWyllys.Ingersoll@Sun.COM 629126SWyllys.Ingersoll@Sun.COM#include <vm/vm.h> 639126SWyllys.Ingersoll@Sun.COM#include <vm/vm_extern.h> 649126SWyllys.Ingersoll@Sun.COM 659126SWyllys.Ingersoll@Sun.COM/* 669126SWyllys.Ingersoll@Sun.COM * Internal utility macros. 679126SWyllys.Ingersoll@Sun.COM */ 689126SWyllys.Ingersoll@Sun.COM#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 699126SWyllys.Ingersoll@Sun.COM 709126SWyllys.Ingersoll@Sun.COM#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 719126SWyllys.Ingersoll@Sun.COM : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 729126SWyllys.Ingersoll@Sun.COM 739126SWyllys.Ingersoll@Sun.COM/* XXXKSE This test will change. */ 749126SWyllys.Ingersoll@Sun.COM#define thread_running(td) \ 759126SWyllys.Ingersoll@Sun.COM ((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU) 769126SWyllys.Ingersoll@Sun.COM 779126SWyllys.Ingersoll@Sun.COM/* 789126SWyllys.Ingersoll@Sun.COM * Lock classes for sleep and spin mutexes. 799126SWyllys.Ingersoll@Sun.COM */ 809126SWyllys.Ingersoll@Sun.COMstruct lock_class lock_class_mtx_sleep = { 819126SWyllys.Ingersoll@Sun.COM "sleep mutex", 829126SWyllys.Ingersoll@Sun.COM LC_SLEEPLOCK | LC_RECURSABLE 839126SWyllys.Ingersoll@Sun.COM}; 849126SWyllys.Ingersoll@Sun.COMstruct lock_class lock_class_mtx_spin = { 859126SWyllys.Ingersoll@Sun.COM "spin mutex", 869126SWyllys.Ingersoll@Sun.COM LC_SPINLOCK | LC_RECURSABLE 879126SWyllys.Ingersoll@Sun.COM}; 889126SWyllys.Ingersoll@Sun.COM 899126SWyllys.Ingersoll@Sun.COM/* 909126SWyllys.Ingersoll@Sun.COM * System-wide mutexes 919126SWyllys.Ingersoll@Sun.COM */ 929126SWyllys.Ingersoll@Sun.COMstruct mtx sched_lock; 939126SWyllys.Ingersoll@Sun.COMstruct mtx Giant; 949126SWyllys.Ingersoll@Sun.COM 959126SWyllys.Ingersoll@Sun.COM/* 969126SWyllys.Ingersoll@Sun.COM * Prototypes for non-exported routines. 979126SWyllys.Ingersoll@Sun.COM */ 989126SWyllys.Ingersoll@Sun.COMstatic void propagate_priority(struct thread *); 999126SWyllys.Ingersoll@Sun.COM 1009126SWyllys.Ingersoll@Sun.COMstatic void 1019126SWyllys.Ingersoll@Sun.COMpropagate_priority(struct thread *td) 1029126SWyllys.Ingersoll@Sun.COM{ 1039126SWyllys.Ingersoll@Sun.COM int pri = td->td_priority; 1049126SWyllys.Ingersoll@Sun.COM struct mtx *m = td->td_blocked; 1059126SWyllys.Ingersoll@Sun.COM 1069126SWyllys.Ingersoll@Sun.COM mtx_assert(&sched_lock, MA_OWNED); 1079126SWyllys.Ingersoll@Sun.COM for (;;) { 1089126SWyllys.Ingersoll@Sun.COM struct thread *td1; 1099126SWyllys.Ingersoll@Sun.COM 1109126SWyllys.Ingersoll@Sun.COM td = mtx_owner(m); 111 112 if (td == NULL) { 113 /* 114 * This really isn't quite right. Really 115 * ought to bump priority of thread that 116 * next acquires the mutex. 117 */ 118 MPASS(m->mtx_lock == MTX_CONTESTED); 119 return; 120 } 121 122 MPASS(td->td_proc != NULL); 123 MPASS(td->td_proc->p_magic == P_MAGIC); 124 KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex")); 125 if (td->td_priority <= pri) /* lower is higher priority */ 126 return; 127 128 129 /* 130 * If lock holder is actually running, just bump priority. 131 */ 132 if (TD_IS_RUNNING(td)) { 133 td->td_priority = pri; 134 return; 135 } 136 137#ifndef SMP 138 /* 139 * For UP, we check to see if td is curthread (this shouldn't 140 * ever happen however as it would mean we are in a deadlock.) 141 */ 142 KASSERT(td != curthread, ("Deadlock detected")); 143#endif 144 145 /* 146 * If on run queue move to new run queue, and quit. 147 * XXXKSE this gets a lot more complicated under threads 148 * but try anyhow. 149 * We should have a special call to do this more efficiently. 150 */ 151 if (TD_ON_RUNQ(td)) { 152 MPASS(td->td_blocked == NULL); 153 remrunqueue(td); 154 td->td_priority = pri; 155 setrunqueue(td); 156 return; 157 } 158 /* 159 * Adjust for any other cases. 160 */ 161 td->td_priority = pri; 162 163 /* 164 * If we aren't blocked on a mutex, we should be. 165 */ 166 KASSERT(TD_ON_MUTEX(td), ( 167 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 168 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 169 m->mtx_object.lo_name)); 170 171 /* 172 * Pick up the mutex that td is blocked on. 173 */ 174 m = td->td_blocked; 175 MPASS(m != NULL); 176 177 /* 178 * Check if the thread needs to be moved up on 179 * the blocked chain 180 */ 181 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 182 continue; 183 } 184 185 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 186 if (td1->td_priority <= pri) { 187 continue; 188 } 189 190 /* 191 * Remove thread from blocked chain and determine where 192 * it should be moved up to. Since we know that td1 has 193 * a lower priority than td, we know that at least one 194 * thread in the chain has a lower priority and that 195 * td1 will thus not be NULL after the loop. 196 */ 197 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 198 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 199 MPASS(td1->td_proc->p_magic == P_MAGIC); 200 if (td1->td_priority > pri) 201 break; 202 } 203 204 MPASS(td1 != NULL); 205 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 206 CTR4(KTR_LOCK, 207 "propagate_priority: p %p moved before %p on [%p] %s", 208 td, td1, m, m->mtx_object.lo_name); 209 } 210} 211 212#ifdef MUTEX_PROFILING 213SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 214SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 215static int mutex_prof_enable = 0; 216SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 217 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 218 219struct mutex_prof { 220 const char *name; 221 const char *file; 222 int line; 223#define MPROF_MAX 0 224#define MPROF_TOT 1 225#define MPROF_CNT 2 226#define MPROF_AVG 3 227 uintmax_t counter[4]; 228 struct mutex_prof *next; 229}; 230 231/* 232 * mprof_buf is a static pool of profiling records to avoid possible 233 * reentrance of the memory allocation functions. 234 * 235 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 236 */ 237#define NUM_MPROF_BUFFERS 1000 238static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 239static int first_free_mprof_buf; 240#define MPROF_HASH_SIZE 1009 241static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 242 243static int mutex_prof_acquisitions; 244SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 245 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 246static int mutex_prof_records; 247SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 248 &mutex_prof_records, 0, "Number of profiling records"); 249static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 250SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 251 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 252static int mutex_prof_rejected; 253SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 254 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 255static int mutex_prof_hashsize = MPROF_HASH_SIZE; 256SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 257 &mutex_prof_hashsize, 0, "Hash size"); 258static int mutex_prof_collisions = 0; 259SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 260 &mutex_prof_collisions, 0, "Number of hash collisions"); 261 262/* 263 * mprof_mtx protects the profiling buffers and the hash. 264 */ 265static struct mtx mprof_mtx; 266MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 267 268static u_int64_t 269nanoseconds(void) 270{ 271 struct timespec tv; 272 273 nanotime(&tv); 274 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 275} 276 277static int 278dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 279{ 280 struct sbuf *sb; 281 int error, i; 282 283 if (first_free_mprof_buf == 0) 284 return SYSCTL_OUT(req, "No locking recorded", 285 sizeof("No locking recorded")); 286 287 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 288 sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 289 "max", "total", "count", "average", "name"); 290 mtx_lock_spin(&mprof_mtx); 291 for (i = 0; i < first_free_mprof_buf; ++i) 292 sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n", 293 mprof_buf[i].counter[MPROF_MAX] / 1000, 294 mprof_buf[i].counter[MPROF_TOT] / 1000, 295 mprof_buf[i].counter[MPROF_CNT], 296 mprof_buf[i].counter[MPROF_AVG] / 1000, 297 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 298 mtx_unlock_spin(&mprof_mtx); 299 sbuf_finish(sb); 300 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 301 sbuf_delete(sb); 302 return (error); 303} 304SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 305 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 306#endif 307 308/* 309 * Function versions of the inlined __mtx_* macros. These are used by 310 * modules and can also be called from assembly language if needed. 311 */ 312void 313_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 314{ 315 316 MPASS(curthread != NULL); 317 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 318 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 319 file, line)); 320 _get_sleep_lock(m, curthread, opts, file, line); 321 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 322 line); 323 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 324#ifdef MUTEX_PROFILING 325 /* don't reset the timer when/if recursing */ 326 if (m->mtx_acqtime == 0) { 327 m->mtx_filename = file; 328 m->mtx_lineno = line; 329 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0; 330 ++mutex_prof_acquisitions; 331 } 332#endif 333} 334 335void 336_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 337{ 338 339 MPASS(curthread != NULL); 340 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep, 341 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name, 342 file, line)); 343 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 344 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 345 line); 346 mtx_assert(m, MA_OWNED); 347#ifdef MUTEX_PROFILING 348 if (m->mtx_acqtime != 0) { 349 static const char *unknown = "(unknown)"; 350 struct mutex_prof *mpp; 351 u_int64_t acqtime, now; 352 const char *p, *q; 353 volatile u_int hash; 354 355 now = nanoseconds(); 356 acqtime = m->mtx_acqtime; 357 m->mtx_acqtime = 0; 358 if (now <= acqtime) 359 goto out; 360 for (p = m->mtx_filename; strncmp(p, "../", 3) == 0; p += 3) 361 /* nothing */ ; 362 if (p == NULL || *p == '\0') 363 p = unknown; 364 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q) 365 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 366 mtx_lock_spin(&mprof_mtx); 367 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 368 if (mpp->line == m->mtx_lineno && 369 strcmp(mpp->file, p) == 0) 370 break; 371 if (mpp == NULL) { 372 /* Just exit if we cannot get a trace buffer */ 373 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 374 ++mutex_prof_rejected; 375 goto unlock; 376 } 377 mpp = &mprof_buf[first_free_mprof_buf++]; 378 mpp->name = mtx_name(m); 379 mpp->file = p; 380 mpp->line = m->mtx_lineno; 381 mpp->next = mprof_hash[hash]; 382 if (mprof_hash[hash] != NULL) 383 ++mutex_prof_collisions; 384 mprof_hash[hash] = mpp; 385 ++mutex_prof_records; 386 } 387 /* 388 * Record if the mutex has been held longer now than ever 389 * before 390 */ 391 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 392 mpp->counter[MPROF_MAX] = now - acqtime; 393 mpp->counter[MPROF_TOT] += now - acqtime; 394 mpp->counter[MPROF_CNT] += 1; 395 mpp->counter[MPROF_AVG] = 396 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 397unlock: 398 mtx_unlock_spin(&mprof_mtx); 399 } 400out: 401#endif 402 _rel_sleep_lock(m, curthread, opts, file, line); 403} 404 405void 406_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 407{ 408 409 MPASS(curthread != NULL); 410 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 411 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 412 m->mtx_object.lo_name, file, line)); 413#if defined(SMP) || LOCK_DEBUG > 0 || 1 414 _get_spin_lock(m, curthread, opts, file, line); 415#else 416 critical_enter(); 417#endif 418 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 419 line); 420 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 421} 422 423void 424_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 425{ 426 427 MPASS(curthread != NULL); 428 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin, 429 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 430 m->mtx_object.lo_name, file, line)); 431 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 432 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 433 line); 434 mtx_assert(m, MA_OWNED); 435#if defined(SMP) || LOCK_DEBUG > 0 || 1 436 _rel_spin_lock(m); 437#else 438 critical_exit(); 439#endif 440} 441 442/* 443 * The important part of mtx_trylock{,_flags}() 444 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 445 * if we're called, it's because we know we don't already own this lock. 446 */ 447int 448_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 449{ 450 int rval; 451 452 MPASS(curthread != NULL); 453 454 rval = _obtain_lock(m, curthread); 455 456 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 457 if (rval) { 458 /* 459 * We do not handle recursion in _mtx_trylock; see the 460 * note at the top of the routine. 461 */ 462 KASSERT(!mtx_recursed(m), 463 ("mtx_trylock() called on a recursed mutex")); 464 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 465 file, line); 466 } 467 468 return (rval); 469} 470 471/* 472 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 473 * 474 * We call this if the lock is either contested (i.e. we need to go to 475 * sleep waiting for it), or if we need to recurse on it. 476 */ 477void 478_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 479{ 480 struct thread *td = curthread; 481#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 482 struct thread *owner; 483#endif 484#ifdef KTR 485 int cont_logged = 0; 486#endif 487 488 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 489 m->mtx_recurse++; 490 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 491 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 492 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 493 return; 494 } 495 496 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 497 CTR4(KTR_LOCK, 498 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 499 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 500 501 while (!_obtain_lock(m, td)) { 502 uintptr_t v; 503 struct thread *td1; 504 505 mtx_lock_spin(&sched_lock); 506 /* 507 * Check if the lock has been released while spinning for 508 * the sched_lock. 509 */ 510 if ((v = m->mtx_lock) == MTX_UNOWNED) { 511 mtx_unlock_spin(&sched_lock); 512#ifdef __i386__ 513 ia32_pause(); 514#endif 515 continue; 516 } 517 518 /* 519 * The mutex was marked contested on release. This means that 520 * there are threads blocked on it. 521 */ 522 if (v == MTX_CONTESTED) { 523 td1 = TAILQ_FIRST(&m->mtx_blocked); 524 MPASS(td1 != NULL); 525 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 526 527 if (td1->td_priority < td->td_priority) 528 td->td_priority = td1->td_priority; 529 mtx_unlock_spin(&sched_lock); 530 return; 531 } 532 533 /* 534 * If the mutex isn't already contested and a failure occurs 535 * setting the contested bit, the mutex was either released 536 * or the state of the MTX_RECURSED bit changed. 537 */ 538 if ((v & MTX_CONTESTED) == 0 && 539 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 540 (void *)(v | MTX_CONTESTED))) { 541 mtx_unlock_spin(&sched_lock); 542#ifdef __i386__ 543 ia32_pause(); 544#endif 545 continue; 546 } 547 548#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 549 /* 550 * If the current owner of the lock is executing on another 551 * CPU, spin instead of blocking. 552 */ 553 owner = (struct thread *)(v & MTX_FLAGMASK); 554 if (m != &Giant && thread_running(owner)) { 555 mtx_unlock_spin(&sched_lock); 556 while (mtx_owner(m) == owner && thread_running(owner)) { 557#ifdef __i386__ 558 ia32_pause(); 559#endif 560 } 561 continue; 562 } 563#endif /* SMP && ADAPTIVE_MUTEXES */ 564 565 /* 566 * We definitely must sleep for this lock. 567 */ 568 mtx_assert(m, MA_NOTOWNED); 569 570#ifdef notyet 571 /* 572 * If we're borrowing an interrupted thread's VM context, we 573 * must clean up before going to sleep. 574 */ 575 if (td->td_ithd != NULL) { 576 struct ithd *it = td->td_ithd; 577 578 if (it->it_interrupted) { 579 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 580 CTR2(KTR_LOCK, 581 "_mtx_lock_sleep: %p interrupted %p", 582 it, it->it_interrupted); 583 intr_thd_fixup(it); 584 } 585 } 586#endif 587 588 /* 589 * Put us on the list of threads blocked on this mutex. 590 */ 591 if (TAILQ_EMPTY(&m->mtx_blocked)) { 592 td1 = mtx_owner(m); 593 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 594 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 595 } else { 596 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 597 if (td1->td_priority > td->td_priority) 598 break; 599 if (td1) 600 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 601 else 602 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 603 } 604#ifdef KTR 605 if (!cont_logged) { 606 CTR6(KTR_CONTENTION, 607 "contention: %p at %s:%d wants %s, taken by %s:%d", 608 td, file, line, m->mtx_object.lo_name, 609 WITNESS_FILE(&m->mtx_object), 610 WITNESS_LINE(&m->mtx_object)); 611 cont_logged = 1; 612 } 613#endif 614 615 /* 616 * Save who we're blocked on. 617 */ 618 td->td_blocked = m; 619 td->td_mtxname = m->mtx_object.lo_name; 620 TD_SET_MUTEX(td); 621 propagate_priority(td); 622 623 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 624 CTR3(KTR_LOCK, 625 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 626 m->mtx_object.lo_name); 627 628 td->td_proc->p_stats->p_ru.ru_nvcsw++; 629 mi_switch(); 630 631 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 632 CTR3(KTR_LOCK, 633 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 634 td, m, m->mtx_object.lo_name); 635 636 mtx_unlock_spin(&sched_lock); 637 } 638 639#ifdef KTR 640 if (cont_logged) { 641 CTR4(KTR_CONTENTION, 642 "contention end: %s acquired by %p at %s:%d", 643 m->mtx_object.lo_name, td, file, line); 644 } 645#endif 646 return; 647} 648 649/* 650 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 651 * 652 * This is only called if we need to actually spin for the lock. Recursion 653 * is handled inline. 654 */ 655void 656_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 657{ 658 int i = 0; 659 660 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 661 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 662 663 for (;;) { 664 if (_obtain_lock(m, curthread)) 665 break; 666 667 /* Give interrupts a chance while we spin. */ 668 critical_exit(); 669 while (m->mtx_lock != MTX_UNOWNED) { 670 if (i++ < 10000000) { 671#ifdef __i386__ 672 ia32_pause(); 673#endif 674 continue; 675 } 676 if (i < 60000000) 677 DELAY(1); 678#ifdef DDB 679 else if (!db_active) 680#else 681 else 682#endif 683 panic("spin lock %s held by %p for > 5 seconds", 684 m->mtx_object.lo_name, (void *)m->mtx_lock); 685#ifdef __i386__ 686 ia32_pause(); 687#endif 688 } 689 critical_enter(); 690 } 691 692 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 693 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 694 695 return; 696} 697 698/* 699 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 700 * 701 * We are only called here if the lock is recursed or contested (i.e. we 702 * need to wake up a blocked thread). 703 */ 704void 705_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 706{ 707 struct thread *td, *td1; 708 struct mtx *m1; 709 int pri; 710 711 td = curthread; 712 713 if (mtx_recursed(m)) { 714 if (--(m->mtx_recurse) == 0) 715 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 716 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 717 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 718 return; 719 } 720 721 mtx_lock_spin(&sched_lock); 722 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 723 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 724 725 td1 = TAILQ_FIRST(&m->mtx_blocked); 726#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 727 if (td1 == NULL) { 728 _release_lock_quick(m); 729 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 730 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 731 mtx_unlock_spin(&sched_lock); 732 return; 733 } 734#endif 735 MPASS(td->td_proc->p_magic == P_MAGIC); 736 MPASS(td1->td_proc->p_magic == P_MAGIC); 737 738 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 739 740 if (TAILQ_EMPTY(&m->mtx_blocked)) { 741 LIST_REMOVE(m, mtx_contested); 742 _release_lock_quick(m); 743 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 744 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 745 } else 746 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 747 748 pri = PRI_MAX; 749 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 750 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 751 if (cp < pri) 752 pri = cp; 753 } 754 755 if (pri > td->td_base_pri) 756 pri = td->td_base_pri; 757 td->td_priority = pri; 758 759 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 760 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 761 m, td1); 762 763 td1->td_blocked = NULL; 764 TD_CLR_MUTEX(td1); 765 setrunqueue(td1); 766 767 if (td->td_critnest == 1 && td1->td_priority < pri) { 768#ifdef notyet 769 if (td->td_ithd != NULL) { 770 struct ithd *it = td->td_ithd; 771 772 if (it->it_interrupted) { 773 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 774 CTR2(KTR_LOCK, 775 "_mtx_unlock_sleep: %p interrupted %p", 776 it, it->it_interrupted); 777 intr_thd_fixup(it); 778 } 779 } 780#endif 781 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 782 CTR2(KTR_LOCK, 783 "_mtx_unlock_sleep: %p switching out lock=%p", m, 784 (void *)m->mtx_lock); 785 786 td->td_proc->p_stats->p_ru.ru_nivcsw++; 787 mi_switch(); 788 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 789 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 790 m, (void *)m->mtx_lock); 791 } 792 793 mtx_unlock_spin(&sched_lock); 794 795 return; 796} 797 798/* 799 * All the unlocking of MTX_SPIN locks is done inline. 800 * See the _rel_spin_lock() macro for the details. 801 */ 802 803/* 804 * The backing function for the INVARIANTS-enabled mtx_assert() 805 */ 806#ifdef INVARIANT_SUPPORT 807void 808_mtx_assert(struct mtx *m, int what, const char *file, int line) 809{ 810 811 if (panicstr != NULL) 812 return; 813 switch (what) { 814 case MA_OWNED: 815 case MA_OWNED | MA_RECURSED: 816 case MA_OWNED | MA_NOTRECURSED: 817 if (!mtx_owned(m)) 818 panic("mutex %s not owned at %s:%d", 819 m->mtx_object.lo_name, file, line); 820 if (mtx_recursed(m)) { 821 if ((what & MA_NOTRECURSED) != 0) 822 panic("mutex %s recursed at %s:%d", 823 m->mtx_object.lo_name, file, line); 824 } else if ((what & MA_RECURSED) != 0) { 825 panic("mutex %s unrecursed at %s:%d", 826 m->mtx_object.lo_name, file, line); 827 } 828 break; 829 case MA_NOTOWNED: 830 if (mtx_owned(m)) 831 panic("mutex %s owned at %s:%d", 832 m->mtx_object.lo_name, file, line); 833 break; 834 default: 835 panic("unknown mtx_assert at %s:%d", file, line); 836 } 837} 838#endif 839 840/* 841 * The MUTEX_DEBUG-enabled mtx_validate() 842 * 843 * Most of these checks have been moved off into the LO_INITIALIZED flag 844 * maintained by the witness code. 845 */ 846#ifdef MUTEX_DEBUG 847 848void mtx_validate(struct mtx *); 849 850void 851mtx_validate(struct mtx *m) 852{ 853 854/* 855 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 856 * we can re-enable the kernacc() checks. 857 */ 858#ifndef __alpha__ 859 /* 860 * Can't call kernacc() from early init386(), especially when 861 * initializing Giant mutex, because some stuff in kernacc() 862 * requires Giant itself. 863 */ 864 if (!cold) 865 if (!kernacc((caddr_t)m, sizeof(m), 866 VM_PROT_READ | VM_PROT_WRITE)) 867 panic("Can't read and write to mutex %p", m); 868#endif 869} 870#endif 871 872/* 873 * General init routine used by the MTX_SYSINIT() macro. 874 */ 875void 876mtx_sysinit(void *arg) 877{ 878 struct mtx_args *margs = arg; 879 880 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 881} 882 883/* 884 * Mutex initialization routine; initialize lock `m' of type contained in 885 * `opts' with options contained in `opts' and name `name.' The optional 886 * lock type `type' is used as a general lock category name for use with 887 * witness. 888 */ 889void 890mtx_init(struct mtx *m, const char *name, const char *type, int opts) 891{ 892 struct lock_object *lock; 893 894 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 895 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 896 897#ifdef MUTEX_DEBUG 898 /* Diagnostic and error correction */ 899 mtx_validate(m); 900#endif 901 902 lock = &m->mtx_object; 903 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 904 ("mutex %s %p already initialized", name, m)); 905 bzero(m, sizeof(*m)); 906 if (opts & MTX_SPIN) 907 lock->lo_class = &lock_class_mtx_spin; 908 else 909 lock->lo_class = &lock_class_mtx_sleep; 910 lock->lo_name = name; 911 lock->lo_type = type != NULL ? type : name; 912 if (opts & MTX_QUIET) 913 lock->lo_flags = LO_QUIET; 914 if (opts & MTX_RECURSE) 915 lock->lo_flags |= LO_RECURSABLE; 916 if (opts & MTX_SLEEPABLE) 917 lock->lo_flags |= LO_SLEEPABLE; 918 if ((opts & MTX_NOWITNESS) == 0) 919 lock->lo_flags |= LO_WITNESS; 920 if (opts & MTX_DUPOK) 921 lock->lo_flags |= LO_DUPOK; 922 923 m->mtx_lock = MTX_UNOWNED; 924 TAILQ_INIT(&m->mtx_blocked); 925 926 LOCK_LOG_INIT(lock, opts); 927 928 WITNESS_INIT(lock); 929} 930 931/* 932 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 933 * passed in as a flag here because if the corresponding mtx_init() was 934 * called with MTX_QUIET set, then it will already be set in the mutex's 935 * flags. 936 */ 937void 938mtx_destroy(struct mtx *m) 939{ 940 941 LOCK_LOG_DESTROY(&m->mtx_object, 0); 942 943 if (!mtx_owned(m)) 944 MPASS(mtx_unowned(m)); 945 else { 946 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 947 948 /* Tell witness this isn't locked to make it happy. */ 949 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 950 __LINE__); 951 } 952 953 WITNESS_DESTROY(&m->mtx_object); 954} 955 956/* 957 * Intialize the mutex code and system mutexes. This is called from the MD 958 * startup code prior to mi_startup(). The per-CPU data space needs to be 959 * setup before this is called. 960 */ 961void 962mutex_init(void) 963{ 964 965 /* Setup thread0 so that mutexes work. */ 966 LIST_INIT(&thread0.td_contested); 967 968 /* 969 * Initialize mutexes. 970 */ 971 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 972 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 973 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 974 mtx_lock(&Giant); 975} 976 977/* 978 * Encapsulated Giant mutex routines. These routines provide encapsulation 979 * control for the Giant mutex, allowing sysctls to be used to turn on and 980 * off Giant around certain subsystems. The default value for the sysctls 981 * are set to what developers believe is stable and working in regards to 982 * the Giant pushdown. Developers should not turn off Giant via these 983 * sysctls unless they know what they are doing. 984 * 985 * Callers of mtx_lock_giant() are expected to pass the return value to an 986 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 987 * effected by a Giant wrap, all related sysctl variables must be zero for 988 * the subsystem call to operate without Giant (as determined by the caller). 989 */ 990 991SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 992 993static int kern_giant_all = 0; 994SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 995 996int kern_giant_proc = 1; /* Giant around PROC locks */ 997int kern_giant_file = 1; /* Giant around struct file & filedesc */ 998int kern_giant_ucred = 1; /* Giant around ucred */ 999SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 1000SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 1001SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 1002 1003int 1004mtx_lock_giant(int sysctlvar) 1005{ 1006 if (sysctlvar || kern_giant_all) { 1007 mtx_lock(&Giant); 1008 return(1); 1009 } 1010 return(0); 1011} 1012 1013void 1014mtx_unlock_giant(int s) 1015{ 1016 if (s) 1017 mtx_unlock(&Giant); 1018} 1019 1020