subr_turnstile.c revision 93813
1234285Sdim/*- 2234285Sdim * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3234285Sdim * 4234285Sdim * Redistribution and use in source and binary forms, with or without 5234285Sdim * modification, are permitted provided that the following conditions 6234285Sdim * are met: 7234285Sdim * 1. Redistributions of source code must retain the above copyright 8234285Sdim * notice, this list of conditions and the following disclaimer. 9234285Sdim * 2. Redistributions in binary form must reproduce the above copyright 10234285Sdim * notice, this list of conditions and the following disclaimer in the 11234285Sdim * documentation and/or other materials provided with the distribution. 12234285Sdim * 3. Berkeley Software Design Inc's name may not be used to endorse or 13234285Sdim * promote products derived from this software without specific prior 14234285Sdim * written permission. 15234285Sdim * 16234285Sdim * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17234285Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18234285Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19239462Sdim * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20249423Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21249423Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22234285Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23234285Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24234285Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25249423Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26249423Sdim * SUCH DAMAGE. 27249423Sdim * 28234285Sdim * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29234285Sdim * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30234285Sdim * $FreeBSD: head/sys/kern/subr_turnstile.c 93813 2002-04-04 20:52:27Z jhb $ 31234285Sdim */ 32234285Sdim 33243830Sdim/* 34234285Sdim * Machine independent bits of mutex implementation. 35234285Sdim */ 36234285Sdim 37234285Sdim#include "opt_ddb.h" 38234285Sdim 39234285Sdim#include <sys/param.h> 40234285Sdim#include <sys/systm.h> 41234285Sdim#include <sys/bus.h> 42234285Sdim#include <sys/kernel.h> 43243830Sdim#include <sys/ktr.h> 44234285Sdim#include <sys/lock.h> 45234285Sdim#include <sys/malloc.h> 46234285Sdim#include <sys/mutex.h> 47249423Sdim#include <sys/proc.h> 48249423Sdim#include <sys/resourcevar.h> 49249423Sdim#include <sys/sbuf.h> 50249423Sdim#include <sys/sysctl.h> 51249423Sdim#include <sys/vmmeter.h> 52234285Sdim 53243830Sdim#include <machine/atomic.h> 54243830Sdim#include <machine/bus.h> 55234285Sdim#include <machine/clock.h> 56234285Sdim#include <machine/cpu.h> 57234285Sdim 58234285Sdim#include <ddb/ddb.h> 59234285Sdim 60234285Sdim#include <vm/vm.h> 61234285Sdim#include <vm/vm_extern.h> 62234285Sdim 63234285Sdim/* 64234285Sdim * Internal utility macros. 65243830Sdim */ 66243830Sdim#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 67243830Sdim 68243830Sdim#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 69234285Sdim : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 70234285Sdim 71234285Sdim/* 72234285Sdim * Lock classes for sleep and spin mutexes. 73234285Sdim */ 74234285Sdimstruct lock_class lock_class_mtx_sleep = { 75234285Sdim "sleep mutex", 76234285Sdim LC_SLEEPLOCK | LC_RECURSABLE 77234285Sdim}; 78234285Sdimstruct lock_class lock_class_mtx_spin = { 79234285Sdim "spin mutex", 80234285Sdim LC_SPINLOCK | LC_RECURSABLE 81234285Sdim}; 82234285Sdim 83249423Sdim/* 84249423Sdim * System-wide mutexes 85249423Sdim */ 86249423Sdimstruct mtx sched_lock; 87249423Sdimstruct mtx Giant; 88249423Sdim 89249423Sdim/* 90249423Sdim * Prototypes for non-exported routines. 91249423Sdim */ 92249423Sdimstatic void propagate_priority(struct thread *); 93249423Sdim 94234285Sdimstatic void 95234285Sdimpropagate_priority(struct thread *td) 96234285Sdim{ 97234285Sdim int pri = td->td_priority; 98234285Sdim struct mtx *m = td->td_blocked; 99234285Sdim 100234285Sdim mtx_assert(&sched_lock, MA_OWNED); 101234285Sdim for (;;) { 102234285Sdim struct thread *td1; 103234285Sdim 104234285Sdim td = mtx_owner(m); 105234285Sdim 106234285Sdim if (td == NULL) { 107234285Sdim /* 108234285Sdim * This really isn't quite right. Really 109234285Sdim * ought to bump priority of thread that 110234285Sdim * next acquires the mutex. 111234285Sdim */ 112234285Sdim MPASS(m->mtx_lock == MTX_CONTESTED); 113234285Sdim return; 114234285Sdim } 115234285Sdim 116234285Sdim MPASS(td->td_proc->p_magic == P_MAGIC); 117234285Sdim KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 118234285Sdim if (td->td_priority <= pri) /* lower is higher priority */ 119234285Sdim return; 120243830Sdim 121234285Sdim /* 122234285Sdim * Bump this thread's priority. 123234285Sdim */ 124234285Sdim td->td_priority = pri; 125234285Sdim 126234285Sdim /* 127234285Sdim * If lock holder is actually running, just bump priority. 128234285Sdim */ 129234285Sdim /* XXXKSE this test is not sufficient */ 130234285Sdim if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 131234285Sdim MPASS(td->td_proc->p_stat == SRUN 132234285Sdim || td->td_proc->p_stat == SZOMB 133234285Sdim || td->td_proc->p_stat == SSTOP); 134234285Sdim return; 135234285Sdim } 136234285Sdim 137234285Sdim#ifndef SMP 138234285Sdim /* 139234285Sdim * For UP, we check to see if td is curthread (this shouldn't 140234285Sdim * ever happen however as it would mean we are in a deadlock.) 141234285Sdim */ 142234285Sdim KASSERT(td != curthread, ("Deadlock detected")); 143234285Sdim#endif 144234285Sdim 145234285Sdim /* 146234285Sdim * If on run queue move to new run queue, and quit. 147234285Sdim * XXXKSE this gets a lot more complicated under threads 148234285Sdim * but try anyhow. 149234285Sdim */ 150234285Sdim if (td->td_proc->p_stat == SRUN) { 151234285Sdim MPASS(td->td_blocked == NULL); 152234285Sdim remrunqueue(td); 153234285Sdim setrunqueue(td); 154234285Sdim return; 155234285Sdim } 156234285Sdim 157234285Sdim /* 158234285Sdim * If we aren't blocked on a mutex, we should be. 159234285Sdim */ 160234285Sdim KASSERT(td->td_proc->p_stat == SMTX, ( 161234285Sdim "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 162234285Sdim td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 163234285Sdim m->mtx_object.lo_name)); 164239462Sdim 165234285Sdim /* 166234285Sdim * Pick up the mutex that td is blocked on. 167234285Sdim */ 168234285Sdim m = td->td_blocked; 169234285Sdim MPASS(m != NULL); 170234285Sdim 171234285Sdim /* 172234285Sdim * Check if the thread needs to be moved up on 173234285Sdim * the blocked chain 174234285Sdim */ 175234285Sdim if (td == TAILQ_FIRST(&m->mtx_blocked)) { 176234285Sdim continue; 177243830Sdim } 178243830Sdim 179234285Sdim td1 = TAILQ_PREV(td, threadqueue, td_blkq); 180234285Sdim if (td1->td_priority <= pri) { 181234285Sdim continue; 182234285Sdim } 183234285Sdim 184234285Sdim /* 185243830Sdim * Remove thread from blocked chain and determine where 186234285Sdim * it should be moved up to. Since we know that td1 has 187234285Sdim * a lower priority than td, we know that at least one 188234285Sdim * thread in the chain has a lower priority and that 189234285Sdim * td1 will thus not be NULL after the loop. 190243830Sdim */ 191249423Sdim TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 192249423Sdim TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 193249423Sdim MPASS(td1->td_proc->p_magic == P_MAGIC); 194249423Sdim if (td1->td_priority > pri) 195249423Sdim break; 196249423Sdim } 197249423Sdim 198249423Sdim MPASS(td1 != NULL); 199249423Sdim TAILQ_INSERT_BEFORE(td1, td, td_blkq); 200249423Sdim CTR4(KTR_LOCK, 201249423Sdim "propagate_priority: p %p moved before %p on [%p] %s", 202249423Sdim td, td1, m, m->mtx_object.lo_name); 203249423Sdim } 204249423Sdim} 205249423Sdim 206249423Sdim#ifdef MUTEX_PROFILING 207249423SdimSYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 208249423SdimSYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 209249423Sdimstatic int mutex_prof_enable = 0; 210249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 211249423Sdim &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 212249423Sdim 213249423Sdimstruct mutex_prof { 214249423Sdim const char *name; 215249423Sdim const char *file; 216249423Sdim int line; 217249423Sdim#define MPROF_MAX 0 218249423Sdim#define MPROF_TOT 1 219249423Sdim#define MPROF_CNT 2 220249423Sdim#define MPROF_AVG 3 221249423Sdim u_int64_t counter[4]; 222249423Sdim struct mutex_prof *next; 223249423Sdim}; 224249423Sdim 225249423Sdim/* 226249423Sdim * mprof_buf is a static pool of profiling records to avoid possible 227249423Sdim * reentrance of the memory allocation functions. 228249423Sdim * 229249423Sdim * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 230249423Sdim */ 231249423Sdim#define NUM_MPROF_BUFFERS 1000 232249423Sdimstatic struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 233249423Sdimstatic int first_free_mprof_buf; 234249423Sdim#define MPROF_HASH_SIZE 1009 235249423Sdimstatic struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 236249423Sdim 237249423Sdimstatic int mutex_prof_acquisitions; 238249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 239249423Sdim &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 240249423Sdimstatic int mutex_prof_records; 241249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 242249423Sdim &mutex_prof_records, 0, "Number of profiling records"); 243249423Sdimstatic int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 244249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 245249423Sdim &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 246249423Sdimstatic int mutex_prof_rejected; 247249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 248249423Sdim &mutex_prof_rejected, 0, "Number of rejected profiling records"); 249249423Sdimstatic int mutex_prof_hashsize = MPROF_HASH_SIZE; 250249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 251249423Sdim &mutex_prof_hashsize, 0, "Hash size"); 252249423Sdimstatic int mutex_prof_collisions = 0; 253249423SdimSYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 254249423Sdim &mutex_prof_collisions, 0, "Number of hash collisions"); 255249423Sdim 256249423Sdim/* 257249423Sdim * mprof_mtx protects the profiling buffers and the hash. 258243830Sdim */ 259243830Sdimstatic struct mtx mprof_mtx; 260243830SdimMTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 261249423Sdim 262249423Sdimstatic u_int64_t 263249423Sdimnanoseconds(void) 264249423Sdim{ 265249423Sdim struct timespec tv; 266249423Sdim 267249423Sdim nanotime(&tv); 268249423Sdim return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 269249423Sdim} 270249423Sdim 271249423Sdimstatic int 272249423Sdimdump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 273249423Sdim{ 274249423Sdim struct sbuf *sb; 275249423Sdim int error, i; 276249423Sdim 277249423Sdim if (first_free_mprof_buf == 0) 278249423Sdim return SYSCTL_OUT(req, "No locking recorded", 279249423Sdim sizeof("No locking recorded")); 280249423Sdim 281249423Sdim sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 282249423Sdim sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 283249423Sdim "max", "total", "count", "average", "name"); 284249423Sdim mtx_lock_spin(&mprof_mtx); 285249423Sdim for (i = 0; i < first_free_mprof_buf; ++i) 286243830Sdim sbuf_printf(sb, "%12llu %12llu %12llu %12llu %s:%d (%s)\n", 287243830Sdim mprof_buf[i].counter[MPROF_MAX] / 1000, 288234285Sdim mprof_buf[i].counter[MPROF_TOT] / 1000, 289234285Sdim mprof_buf[i].counter[MPROF_CNT], 290234285Sdim mprof_buf[i].counter[MPROF_AVG] / 1000, 291234285Sdim mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 292243830Sdim mtx_unlock_spin(&mprof_mtx); 293234285Sdim sbuf_finish(sb); 294234285Sdim error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 295234285Sdim sbuf_delete(sb); 296234285Sdim return (error); 297234285Sdim} 298243830SdimSYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 299234285Sdim NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 300234285Sdim#endif 301234285Sdim 302234285Sdim/* 303 * Function versions of the inlined __mtx_* macros. These are used by 304 * modules and can also be called from assembly language if needed. 305 */ 306void 307_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 308{ 309 310 MPASS(curthread != NULL); 311 _get_sleep_lock(m, curthread, opts, file, line); 312 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 313 line); 314 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 315#ifdef MUTEX_PROFILING 316 /* don't reset the timer when/if recursing */ 317 if (m->acqtime == 0) { 318 m->file = file; 319 m->line = line; 320 m->acqtime = mutex_prof_enable ? nanoseconds() : 0; 321 ++mutex_prof_acquisitions; 322 } 323#endif 324} 325 326void 327_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 328{ 329 330 MPASS(curthread != NULL); 331 mtx_assert(m, MA_OWNED); 332#ifdef MUTEX_PROFILING 333 if (m->acqtime != 0) { 334 static const char *unknown = "(unknown)"; 335 struct mutex_prof *mpp; 336 u_int64_t acqtime, now; 337 const char *p, *q; 338 volatile u_int hash; 339 340 now = nanoseconds(); 341 acqtime = m->acqtime; 342 m->acqtime = 0; 343 if (now <= acqtime) 344 goto out; 345 for (p = file; strncmp(p, "../", 3) == 0; p += 3) 346 /* nothing */ ; 347 if (p == NULL || *p == '\0') 348 p = unknown; 349 for (hash = line, q = p; *q != '\0'; ++q) 350 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 351 mtx_lock_spin(&mprof_mtx); 352 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 353 if (mpp->line == line && strcmp(mpp->file, p) == 0) 354 break; 355 if (mpp == NULL) { 356 /* Just exit if we cannot get a trace buffer */ 357 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 358 ++mutex_prof_rejected; 359 goto unlock; 360 } 361 mpp = &mprof_buf[first_free_mprof_buf++]; 362 mpp->name = mtx_name(m); 363 mpp->file = p; 364 mpp->line = line; 365 mpp->next = mprof_hash[hash]; 366 if (mprof_hash[hash] != NULL) 367 ++mutex_prof_collisions; 368 mprof_hash[hash] = mpp; 369 ++mutex_prof_records; 370 } 371 /* 372 * Record if the mutex has been held longer now than ever 373 * before 374 */ 375 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 376 mpp->counter[MPROF_MAX] = now - acqtime; 377 mpp->counter[MPROF_TOT] += now - acqtime; 378 mpp->counter[MPROF_CNT] += 1; 379 mpp->counter[MPROF_AVG] = 380 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 381unlock: 382 mtx_unlock_spin(&mprof_mtx); 383 } 384out: 385#endif 386 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 387 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 388 line); 389 _rel_sleep_lock(m, curthread, opts, file, line); 390} 391 392void 393_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 394{ 395 396 MPASS(curthread != NULL); 397 _get_spin_lock(m, curthread, opts, file, line); 398 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 399 line); 400 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 401} 402 403void 404_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 405{ 406 407 MPASS(curthread != NULL); 408 mtx_assert(m, MA_OWNED); 409 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 410 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 411 line); 412 _rel_spin_lock(m); 413} 414 415/* 416 * The important part of mtx_trylock{,_flags}() 417 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 418 * if we're called, it's because we know we don't already own this lock. 419 */ 420int 421_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 422{ 423 int rval; 424 425 MPASS(curthread != NULL); 426 427 rval = _obtain_lock(m, curthread); 428 429 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 430 if (rval) { 431 /* 432 * We do not handle recursion in _mtx_trylock; see the 433 * note at the top of the routine. 434 */ 435 KASSERT(!mtx_recursed(m), 436 ("mtx_trylock() called on a recursed mutex")); 437 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 438 file, line); 439 } 440 441 return (rval); 442} 443 444/* 445 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 446 * 447 * We call this if the lock is either contested (i.e. we need to go to 448 * sleep waiting for it), or if we need to recurse on it. 449 */ 450void 451_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 452{ 453 struct thread *td = curthread; 454 455 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 456 m->mtx_recurse++; 457 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 458 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 459 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 460 return; 461 } 462 463 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 464 CTR4(KTR_LOCK, 465 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 466 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 467 468 while (!_obtain_lock(m, td)) { 469 uintptr_t v; 470 struct thread *td1; 471 472 mtx_lock_spin(&sched_lock); 473 /* 474 * Check if the lock has been released while spinning for 475 * the sched_lock. 476 */ 477 if ((v = m->mtx_lock) == MTX_UNOWNED) { 478 mtx_unlock_spin(&sched_lock); 479 continue; 480 } 481 482 /* 483 * The mutex was marked contested on release. This means that 484 * there are threads blocked on it. 485 */ 486 if (v == MTX_CONTESTED) { 487 td1 = TAILQ_FIRST(&m->mtx_blocked); 488 MPASS(td1 != NULL); 489 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 490 491 if (td1->td_priority < td->td_priority) 492 td->td_priority = td1->td_priority; 493 mtx_unlock_spin(&sched_lock); 494 return; 495 } 496 497 /* 498 * If the mutex isn't already contested and a failure occurs 499 * setting the contested bit, the mutex was either released 500 * or the state of the MTX_RECURSED bit changed. 501 */ 502 if ((v & MTX_CONTESTED) == 0 && 503 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 504 (void *)(v | MTX_CONTESTED))) { 505 mtx_unlock_spin(&sched_lock); 506 continue; 507 } 508 509 /* 510 * We definitely must sleep for this lock. 511 */ 512 mtx_assert(m, MA_NOTOWNED); 513 514#ifdef notyet 515 /* 516 * If we're borrowing an interrupted thread's VM context, we 517 * must clean up before going to sleep. 518 */ 519 if (td->td_ithd != NULL) { 520 struct ithd *it = td->td_ithd; 521 522 if (it->it_interrupted) { 523 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 524 CTR2(KTR_LOCK, 525 "_mtx_lock_sleep: %p interrupted %p", 526 it, it->it_interrupted); 527 intr_thd_fixup(it); 528 } 529 } 530#endif 531 532 /* 533 * Put us on the list of threads blocked on this mutex. 534 */ 535 if (TAILQ_EMPTY(&m->mtx_blocked)) { 536 td1 = mtx_owner(m); 537 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 538 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 539 } else { 540 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 541 if (td1->td_priority > td->td_priority) 542 break; 543 if (td1) 544 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 545 else 546 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 547 } 548 549 /* 550 * Save who we're blocked on. 551 */ 552 td->td_blocked = m; 553 td->td_mtxname = m->mtx_object.lo_name; 554 td->td_proc->p_stat = SMTX; 555 propagate_priority(td); 556 557 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 558 CTR3(KTR_LOCK, 559 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 560 m->mtx_object.lo_name); 561 562 td->td_proc->p_stats->p_ru.ru_nvcsw++; 563 mi_switch(); 564 565 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 566 CTR3(KTR_LOCK, 567 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 568 td, m, m->mtx_object.lo_name); 569 570 mtx_unlock_spin(&sched_lock); 571 } 572 573 return; 574} 575 576/* 577 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 578 * 579 * This is only called if we need to actually spin for the lock. Recursion 580 * is handled inline. 581 */ 582void 583_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 584{ 585 int i = 0; 586 587 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 588 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 589 590 for (;;) { 591 if (_obtain_lock(m, curthread)) 592 break; 593 594 /* Give interrupts a chance while we spin. */ 595 critical_exit(); 596 while (m->mtx_lock != MTX_UNOWNED) { 597 if (i++ < 10000000) 598 continue; 599 if (i++ < 60000000) 600 DELAY(1); 601#ifdef DDB 602 else if (!db_active) 603#else 604 else 605#endif 606 panic("spin lock %s held by %p for > 5 seconds", 607 m->mtx_object.lo_name, (void *)m->mtx_lock); 608 } 609 critical_enter(); 610 } 611 612 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 613 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 614 615 return; 616} 617 618/* 619 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 620 * 621 * We are only called here if the lock is recursed or contested (i.e. we 622 * need to wake up a blocked thread). 623 */ 624void 625_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 626{ 627 struct thread *td, *td1; 628 struct mtx *m1; 629 int pri; 630 631 td = curthread; 632 633 if (mtx_recursed(m)) { 634 if (--(m->mtx_recurse) == 0) 635 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 636 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 637 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 638 return; 639 } 640 641 mtx_lock_spin(&sched_lock); 642 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 643 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 644 645 td1 = TAILQ_FIRST(&m->mtx_blocked); 646 MPASS(td->td_proc->p_magic == P_MAGIC); 647 MPASS(td1->td_proc->p_magic == P_MAGIC); 648 649 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 650 651 if (TAILQ_EMPTY(&m->mtx_blocked)) { 652 LIST_REMOVE(m, mtx_contested); 653 _release_lock_quick(m); 654 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 655 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 656 } else 657 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 658 659 pri = PRI_MAX; 660 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 661 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 662 if (cp < pri) 663 pri = cp; 664 } 665 666 if (pri > td->td_base_pri) 667 pri = td->td_base_pri; 668 td->td_priority = pri; 669 670 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 671 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 672 m, td1); 673 674 td1->td_blocked = NULL; 675 td1->td_proc->p_stat = SRUN; 676 setrunqueue(td1); 677 678 if (td->td_critnest == 1 && td1->td_priority < pri) { 679#ifdef notyet 680 if (td->td_ithd != NULL) { 681 struct ithd *it = td->td_ithd; 682 683 if (it->it_interrupted) { 684 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 685 CTR2(KTR_LOCK, 686 "_mtx_unlock_sleep: %p interrupted %p", 687 it, it->it_interrupted); 688 intr_thd_fixup(it); 689 } 690 } 691#endif 692 setrunqueue(td); 693 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 694 CTR2(KTR_LOCK, 695 "_mtx_unlock_sleep: %p switching out lock=%p", m, 696 (void *)m->mtx_lock); 697 698 td->td_proc->p_stats->p_ru.ru_nivcsw++; 699 mi_switch(); 700 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 701 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 702 m, (void *)m->mtx_lock); 703 } 704 705 mtx_unlock_spin(&sched_lock); 706 707 return; 708} 709 710/* 711 * All the unlocking of MTX_SPIN locks is done inline. 712 * See the _rel_spin_lock() macro for the details. 713 */ 714 715/* 716 * The backing function for the INVARIANTS-enabled mtx_assert() 717 */ 718#ifdef INVARIANT_SUPPORT 719void 720_mtx_assert(struct mtx *m, int what, const char *file, int line) 721{ 722 723 if (panicstr != NULL) 724 return; 725 switch (what) { 726 case MA_OWNED: 727 case MA_OWNED | MA_RECURSED: 728 case MA_OWNED | MA_NOTRECURSED: 729 if (!mtx_owned(m)) 730 panic("mutex %s not owned at %s:%d", 731 m->mtx_object.lo_name, file, line); 732 if (mtx_recursed(m)) { 733 if ((what & MA_NOTRECURSED) != 0) 734 panic("mutex %s recursed at %s:%d", 735 m->mtx_object.lo_name, file, line); 736 } else if ((what & MA_RECURSED) != 0) { 737 panic("mutex %s unrecursed at %s:%d", 738 m->mtx_object.lo_name, file, line); 739 } 740 break; 741 case MA_NOTOWNED: 742 if (mtx_owned(m)) 743 panic("mutex %s owned at %s:%d", 744 m->mtx_object.lo_name, file, line); 745 break; 746 default: 747 panic("unknown mtx_assert at %s:%d", file, line); 748 } 749} 750#endif 751 752/* 753 * The MUTEX_DEBUG-enabled mtx_validate() 754 * 755 * Most of these checks have been moved off into the LO_INITIALIZED flag 756 * maintained by the witness code. 757 */ 758#ifdef MUTEX_DEBUG 759 760void mtx_validate(struct mtx *); 761 762void 763mtx_validate(struct mtx *m) 764{ 765 766/* 767 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 768 * we can re-enable the kernacc() checks. 769 */ 770#ifndef __alpha__ 771 /* 772 * Can't call kernacc() from early init386(), especially when 773 * initializing Giant mutex, because some stuff in kernacc() 774 * requires Giant itself. 775 */ 776 if (!cold) 777 if (!kernacc((caddr_t)m, sizeof(m), 778 VM_PROT_READ | VM_PROT_WRITE)) 779 panic("Can't read and write to mutex %p", m); 780#endif 781} 782#endif 783 784/* 785 * General init routine used by the MTX_SYSINIT() macro. 786 */ 787void 788mtx_sysinit(void *arg) 789{ 790 struct mtx_args *margs = arg; 791 792 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 793} 794 795/* 796 * Mutex initialization routine; initialize lock `m' of type contained in 797 * `opts' with options contained in `opts' and name `name.' The optional 798 * lock type `type' is used as a general lock category name for use with 799 * witness. 800 */ 801void 802mtx_init(struct mtx *m, const char *name, const char *type, int opts) 803{ 804 struct lock_object *lock; 805 806 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 807 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 808 809#ifdef MUTEX_DEBUG 810 /* Diagnostic and error correction */ 811 mtx_validate(m); 812#endif 813 814 lock = &m->mtx_object; 815 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 816 ("mutex %s %p already initialized", name, m)); 817 bzero(m, sizeof(*m)); 818 if (opts & MTX_SPIN) 819 lock->lo_class = &lock_class_mtx_spin; 820 else 821 lock->lo_class = &lock_class_mtx_sleep; 822 lock->lo_name = name; 823 lock->lo_type = type != NULL ? type : name; 824 if (opts & MTX_QUIET) 825 lock->lo_flags = LO_QUIET; 826 if (opts & MTX_RECURSE) 827 lock->lo_flags |= LO_RECURSABLE; 828 if (opts & MTX_SLEEPABLE) 829 lock->lo_flags |= LO_SLEEPABLE; 830 if ((opts & MTX_NOWITNESS) == 0) 831 lock->lo_flags |= LO_WITNESS; 832 if (opts & MTX_DUPOK) 833 lock->lo_flags |= LO_DUPOK; 834 835 m->mtx_lock = MTX_UNOWNED; 836 TAILQ_INIT(&m->mtx_blocked); 837 838 LOCK_LOG_INIT(lock, opts); 839 840 WITNESS_INIT(lock); 841} 842 843/* 844 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 845 * passed in as a flag here because if the corresponding mtx_init() was 846 * called with MTX_QUIET set, then it will already be set in the mutex's 847 * flags. 848 */ 849void 850mtx_destroy(struct mtx *m) 851{ 852 853 LOCK_LOG_DESTROY(&m->mtx_object, 0); 854 855 if (!mtx_owned(m)) 856 MPASS(mtx_unowned(m)); 857 else { 858 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 859 860 /* Tell witness this isn't locked to make it happy. */ 861 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 862 __LINE__); 863 } 864 865 WITNESS_DESTROY(&m->mtx_object); 866} 867 868/* 869 * Intialize the mutex code and system mutexes. This is called from the MD 870 * startup code prior to mi_startup(). The per-CPU data space needs to be 871 * setup before this is called. 872 */ 873void 874mutex_init(void) 875{ 876 877 /* Setup thread0 so that mutexes work. */ 878 LIST_INIT(&thread0.td_contested); 879 880 /* 881 * Initialize mutexes. 882 */ 883 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 884 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 885 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 886 mtx_lock(&Giant); 887} 888 889/* 890 * Encapsulated Giant mutex routines. These routines provide encapsulation 891 * control for the Giant mutex, allowing sysctls to be used to turn on and 892 * off Giant around certain subsystems. The default value for the sysctls 893 * are set to what developers believe is stable and working in regards to 894 * the Giant pushdown. Developers should not turn off Giant via these 895 * sysctls unless they know what they are doing. 896 * 897 * Callers of mtx_lock_giant() are expected to pass the return value to an 898 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 899 * effected by a Giant wrap, all related sysctl variables must be zero for 900 * the subsystem call to operate without Giant (as determined by the caller). 901 */ 902 903SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 904 905static int kern_giant_all = 0; 906SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 907 908int kern_giant_proc = 1; /* Giant around PROC locks */ 909int kern_giant_file = 1; /* Giant around struct file & filedesc */ 910int kern_giant_ucred = 1; /* Giant around ucred */ 911SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 912SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 913SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 914 915int 916mtx_lock_giant(int sysctlvar) 917{ 918 if (sysctlvar || kern_giant_all) { 919 mtx_lock(&Giant); 920 return(1); 921 } 922 return(0); 923} 924 925void 926mtx_unlock_giant(int s) 927{ 928 if (s) 929 mtx_unlock(&Giant); 930} 931 932