subr_turnstile.c revision 97156
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/subr_turnstile.c 97156 2002-05-23 03:08:42Z des $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_adaptive_mutexes.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/ktr.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/resourcevar.h> 50#include <sys/sbuf.h> 51#include <sys/stdint.h> 52#include <sys/sysctl.h> 53#include <sys/vmmeter.h> 54 55#include <machine/atomic.h> 56#include <machine/bus.h> 57#include <machine/clock.h> 58#include <machine/cpu.h> 59 60#include <ddb/ddb.h> 61 62#include <vm/vm.h> 63#include <vm/vm_extern.h> 64 65/* 66 * Internal utility macros. 67 */ 68#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 69 70#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 71 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 72 73/* 74 * Lock classes for sleep and spin mutexes. 75 */ 76struct lock_class lock_class_mtx_sleep = { 77 "sleep mutex", 78 LC_SLEEPLOCK | LC_RECURSABLE 79}; 80struct lock_class lock_class_mtx_spin = { 81 "spin mutex", 82 LC_SPINLOCK | LC_RECURSABLE 83}; 84 85/* 86 * System-wide mutexes 87 */ 88struct mtx sched_lock; 89struct mtx Giant; 90 91/* 92 * Prototypes for non-exported routines. 93 */ 94static void propagate_priority(struct thread *); 95 96static void 97propagate_priority(struct thread *td) 98{ 99 int pri = td->td_priority; 100 struct mtx *m = td->td_blocked; 101 102 mtx_assert(&sched_lock, MA_OWNED); 103 for (;;) { 104 struct thread *td1; 105 106 td = mtx_owner(m); 107 108 if (td == NULL) { 109 /* 110 * This really isn't quite right. Really 111 * ought to bump priority of thread that 112 * next acquires the mutex. 113 */ 114 MPASS(m->mtx_lock == MTX_CONTESTED); 115 return; 116 } 117 118 MPASS(td->td_proc->p_magic == P_MAGIC); 119 KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 120 if (td->td_priority <= pri) /* lower is higher priority */ 121 return; 122 123 /* 124 * Bump this thread's priority. 125 */ 126 td->td_priority = pri; 127 128 /* 129 * If lock holder is actually running, just bump priority. 130 */ 131 /* XXXKSE this test is not sufficient */ 132 if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 133 MPASS(td->td_proc->p_stat == SRUN 134 || td->td_proc->p_stat == SZOMB 135 || td->td_proc->p_stat == SSTOP); 136 return; 137 } 138 139#ifndef SMP 140 /* 141 * For UP, we check to see if td is curthread (this shouldn't 142 * ever happen however as it would mean we are in a deadlock.) 143 */ 144 KASSERT(td != curthread, ("Deadlock detected")); 145#endif 146 147 /* 148 * If on run queue move to new run queue, and quit. 149 * XXXKSE this gets a lot more complicated under threads 150 * but try anyhow. 151 */ 152 if (td->td_proc->p_stat == SRUN) { 153 MPASS(td->td_blocked == NULL); 154 remrunqueue(td); 155 setrunqueue(td); 156 return; 157 } 158 159 /* 160 * If we aren't blocked on a mutex, we should be. 161 */ 162 KASSERT(td->td_proc->p_stat == SMTX, ( 163 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 164 td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 165 m->mtx_object.lo_name)); 166 167 /* 168 * Pick up the mutex that td is blocked on. 169 */ 170 m = td->td_blocked; 171 MPASS(m != NULL); 172 173 /* 174 * Check if the thread needs to be moved up on 175 * the blocked chain 176 */ 177 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 178 continue; 179 } 180 181 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 182 if (td1->td_priority <= pri) { 183 continue; 184 } 185 186 /* 187 * Remove thread from blocked chain and determine where 188 * it should be moved up to. Since we know that td1 has 189 * a lower priority than td, we know that at least one 190 * thread in the chain has a lower priority and that 191 * td1 will thus not be NULL after the loop. 192 */ 193 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 194 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 195 MPASS(td1->td_proc->p_magic == P_MAGIC); 196 if (td1->td_priority > pri) 197 break; 198 } 199 200 MPASS(td1 != NULL); 201 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 202 CTR4(KTR_LOCK, 203 "propagate_priority: p %p moved before %p on [%p] %s", 204 td, td1, m, m->mtx_object.lo_name); 205 } 206} 207 208#ifdef MUTEX_PROFILING 209SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 210SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 211static int mutex_prof_enable = 0; 212SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 213 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 214 215struct mutex_prof { 216 const char *name; 217 const char *file; 218 int line; 219#define MPROF_MAX 0 220#define MPROF_TOT 1 221#define MPROF_CNT 2 222#define MPROF_AVG 3 223 uintmax_t counter[4]; 224 struct mutex_prof *next; 225}; 226 227/* 228 * mprof_buf is a static pool of profiling records to avoid possible 229 * reentrance of the memory allocation functions. 230 * 231 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 232 */ 233#define NUM_MPROF_BUFFERS 1000 234static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 235static int first_free_mprof_buf; 236#define MPROF_HASH_SIZE 1009 237static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 238 239static int mutex_prof_acquisitions; 240SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 241 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 242static int mutex_prof_records; 243SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 244 &mutex_prof_records, 0, "Number of profiling records"); 245static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 246SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 247 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 248static int mutex_prof_rejected; 249SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 250 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 251static int mutex_prof_hashsize = MPROF_HASH_SIZE; 252SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 253 &mutex_prof_hashsize, 0, "Hash size"); 254static int mutex_prof_collisions = 0; 255SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 256 &mutex_prof_collisions, 0, "Number of hash collisions"); 257 258/* 259 * mprof_mtx protects the profiling buffers and the hash. 260 */ 261static struct mtx mprof_mtx; 262MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 263 264static u_int64_t 265nanoseconds(void) 266{ 267 struct timespec tv; 268 269 nanotime(&tv); 270 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 271} 272 273static int 274dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 275{ 276 struct sbuf *sb; 277 int error, i; 278 279 if (first_free_mprof_buf == 0) 280 return SYSCTL_OUT(req, "No locking recorded", 281 sizeof("No locking recorded")); 282 283 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 284 sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 285 "max", "total", "count", "average", "name"); 286 mtx_lock_spin(&mprof_mtx); 287 for (i = 0; i < first_free_mprof_buf; ++i) 288 sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n", 289 mprof_buf[i].counter[MPROF_MAX] / 1000, 290 mprof_buf[i].counter[MPROF_TOT] / 1000, 291 mprof_buf[i].counter[MPROF_CNT], 292 mprof_buf[i].counter[MPROF_AVG] / 1000, 293 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 294 mtx_unlock_spin(&mprof_mtx); 295 sbuf_finish(sb); 296 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 297 sbuf_delete(sb); 298 return (error); 299} 300SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 301 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 302#endif 303 304/* 305 * Function versions of the inlined __mtx_* macros. These are used by 306 * modules and can also be called from assembly language if needed. 307 */ 308void 309_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 310{ 311 312 MPASS(curthread != NULL); 313 _get_sleep_lock(m, curthread, opts, file, line); 314 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 315 line); 316 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 317#ifdef MUTEX_PROFILING 318 /* don't reset the timer when/if recursing */ 319 if (m->acqtime == 0) { 320 m->file = file; 321 m->line = line; 322 m->acqtime = mutex_prof_enable ? nanoseconds() : 0; 323 ++mutex_prof_acquisitions; 324 } 325#endif 326} 327 328void 329_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 330{ 331 332 MPASS(curthread != NULL); 333 mtx_assert(m, MA_OWNED); 334#ifdef MUTEX_PROFILING 335 if (m->acqtime != 0) { 336 static const char *unknown = "(unknown)"; 337 struct mutex_prof *mpp; 338 u_int64_t acqtime, now; 339 const char *p, *q; 340 volatile u_int hash; 341 342 now = nanoseconds(); 343 acqtime = m->acqtime; 344 m->acqtime = 0; 345 if (now <= acqtime) 346 goto out; 347 for (p = file; strncmp(p, "../", 3) == 0; p += 3) 348 /* nothing */ ; 349 if (p == NULL || *p == '\0') 350 p = unknown; 351 for (hash = line, q = p; *q != '\0'; ++q) 352 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 353 mtx_lock_spin(&mprof_mtx); 354 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 355 if (mpp->line == line && strcmp(mpp->file, p) == 0) 356 break; 357 if (mpp == NULL) { 358 /* Just exit if we cannot get a trace buffer */ 359 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 360 ++mutex_prof_rejected; 361 goto unlock; 362 } 363 mpp = &mprof_buf[first_free_mprof_buf++]; 364 mpp->name = mtx_name(m); 365 mpp->file = p; 366 mpp->line = line; 367 mpp->next = mprof_hash[hash]; 368 if (mprof_hash[hash] != NULL) 369 ++mutex_prof_collisions; 370 mprof_hash[hash] = mpp; 371 ++mutex_prof_records; 372 } 373 /* 374 * Record if the mutex has been held longer now than ever 375 * before 376 */ 377 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 378 mpp->counter[MPROF_MAX] = now - acqtime; 379 mpp->counter[MPROF_TOT] += now - acqtime; 380 mpp->counter[MPROF_CNT] += 1; 381 mpp->counter[MPROF_AVG] = 382 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 383unlock: 384 mtx_unlock_spin(&mprof_mtx); 385 } 386out: 387#endif 388 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 389 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 390 line); 391 _rel_sleep_lock(m, curthread, opts, file, line); 392} 393 394void 395_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 396{ 397 398 MPASS(curthread != NULL); 399#if defined(SMP) || LOCK_DEBUG > 0 400 _get_spin_lock(m, curthread, opts, file, line); 401#else 402 critical_enter(); 403#endif 404 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 405 line); 406 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 407} 408 409void 410_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 411{ 412 413 MPASS(curthread != NULL); 414 mtx_assert(m, MA_OWNED); 415 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 416 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 417 line); 418#if defined(SMP) || LOCK_DEBUG > 0 419 _rel_spin_lock(m); 420#else 421 critical_exit(); 422#endif 423} 424 425/* 426 * The important part of mtx_trylock{,_flags}() 427 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 428 * if we're called, it's because we know we don't already own this lock. 429 */ 430int 431_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 432{ 433 int rval; 434 435 MPASS(curthread != NULL); 436 437 rval = _obtain_lock(m, curthread); 438 439 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 440 if (rval) { 441 /* 442 * We do not handle recursion in _mtx_trylock; see the 443 * note at the top of the routine. 444 */ 445 KASSERT(!mtx_recursed(m), 446 ("mtx_trylock() called on a recursed mutex")); 447 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 448 file, line); 449 } 450 451 return (rval); 452} 453 454/* 455 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 456 * 457 * We call this if the lock is either contested (i.e. we need to go to 458 * sleep waiting for it), or if we need to recurse on it. 459 */ 460void 461_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 462{ 463 struct thread *td = curthread; 464#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 465 struct thread *owner; 466#endif 467 468 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 469 m->mtx_recurse++; 470 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 471 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 472 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 473 return; 474 } 475 476 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 477 CTR4(KTR_LOCK, 478 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 479 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 480 481 while (!_obtain_lock(m, td)) { 482 uintptr_t v; 483 struct thread *td1; 484 485 mtx_lock_spin(&sched_lock); 486 /* 487 * Check if the lock has been released while spinning for 488 * the sched_lock. 489 */ 490 if ((v = m->mtx_lock) == MTX_UNOWNED) { 491 mtx_unlock_spin(&sched_lock); 492#ifdef __i386__ 493 ia32_pause(); 494#endif 495 continue; 496 } 497 498 /* 499 * The mutex was marked contested on release. This means that 500 * there are threads blocked on it. 501 */ 502 if (v == MTX_CONTESTED) { 503 td1 = TAILQ_FIRST(&m->mtx_blocked); 504 MPASS(td1 != NULL); 505 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 506 507 if (td1->td_priority < td->td_priority) 508 td->td_priority = td1->td_priority; 509 mtx_unlock_spin(&sched_lock); 510 return; 511 } 512 513 /* 514 * If the mutex isn't already contested and a failure occurs 515 * setting the contested bit, the mutex was either released 516 * or the state of the MTX_RECURSED bit changed. 517 */ 518 if ((v & MTX_CONTESTED) == 0 && 519 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 520 (void *)(v | MTX_CONTESTED))) { 521 mtx_unlock_spin(&sched_lock); 522#ifdef __i386__ 523 ia32_pause(); 524#endif 525 continue; 526 } 527 528#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 529 /* 530 * If the current owner of the lock is executing on another 531 * CPU, spin instead of blocking. 532 */ 533 owner = (struct thread *)(v & MTX_FLAGMASK); 534 if (m != &Giant && owner->td_kse != NULL && 535 owner->td_kse->ke_oncpu != NOCPU) { 536 mtx_unlock_spin(&sched_lock); 537#ifdef __i386__ 538 ia32_pause(); 539#endif 540 continue; 541 } 542#endif /* SMP && ADAPTIVE_MUTEXES */ 543 544 /* 545 * We definitely must sleep for this lock. 546 */ 547 mtx_assert(m, MA_NOTOWNED); 548 549#ifdef notyet 550 /* 551 * If we're borrowing an interrupted thread's VM context, we 552 * must clean up before going to sleep. 553 */ 554 if (td->td_ithd != NULL) { 555 struct ithd *it = td->td_ithd; 556 557 if (it->it_interrupted) { 558 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 559 CTR2(KTR_LOCK, 560 "_mtx_lock_sleep: %p interrupted %p", 561 it, it->it_interrupted); 562 intr_thd_fixup(it); 563 } 564 } 565#endif 566 567 /* 568 * Put us on the list of threads blocked on this mutex. 569 */ 570 if (TAILQ_EMPTY(&m->mtx_blocked)) { 571 td1 = mtx_owner(m); 572 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 573 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 574 } else { 575 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 576 if (td1->td_priority > td->td_priority) 577 break; 578 if (td1) 579 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 580 else 581 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 582 } 583 584 /* 585 * Save who we're blocked on. 586 */ 587 td->td_blocked = m; 588 td->td_mtxname = m->mtx_object.lo_name; 589 td->td_proc->p_stat = SMTX; 590 propagate_priority(td); 591 592 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 593 CTR3(KTR_LOCK, 594 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 595 m->mtx_object.lo_name); 596 597 td->td_proc->p_stats->p_ru.ru_nvcsw++; 598 mi_switch(); 599 600 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 601 CTR3(KTR_LOCK, 602 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 603 td, m, m->mtx_object.lo_name); 604 605 mtx_unlock_spin(&sched_lock); 606 } 607 608 return; 609} 610 611/* 612 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 613 * 614 * This is only called if we need to actually spin for the lock. Recursion 615 * is handled inline. 616 */ 617void 618_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 619{ 620 int i = 0; 621 622 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 623 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 624 625 for (;;) { 626 if (_obtain_lock(m, curthread)) 627 break; 628 629 /* Give interrupts a chance while we spin. */ 630 critical_exit(); 631 while (m->mtx_lock != MTX_UNOWNED) { 632 if (i++ < 10000000) { 633#ifdef __i386__ 634 ia32_pause(); 635#endif 636 continue; 637 } 638 if (i < 60000000) 639 DELAY(1); 640#ifdef DDB 641 else if (!db_active) 642#else 643 else 644#endif 645 panic("spin lock %s held by %p for > 5 seconds", 646 m->mtx_object.lo_name, (void *)m->mtx_lock); 647#ifdef __i386__ 648 ia32_pause(); 649#endif 650 } 651 critical_enter(); 652 } 653 654 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 655 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 656 657 return; 658} 659 660/* 661 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 662 * 663 * We are only called here if the lock is recursed or contested (i.e. we 664 * need to wake up a blocked thread). 665 */ 666void 667_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 668{ 669 struct thread *td, *td1; 670 struct mtx *m1; 671 int pri; 672 673 td = curthread; 674 675 if (mtx_recursed(m)) { 676 if (--(m->mtx_recurse) == 0) 677 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 678 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 679 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 680 return; 681 } 682 683 mtx_lock_spin(&sched_lock); 684 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 685 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 686 687 td1 = TAILQ_FIRST(&m->mtx_blocked); 688#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 689 if (td1 == NULL) { 690 _release_lock_quick(m); 691 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 692 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); 693 mtx_unlock_spin(&sched_lock); 694 return; 695 } 696#endif 697 MPASS(td->td_proc->p_magic == P_MAGIC); 698 MPASS(td1->td_proc->p_magic == P_MAGIC); 699 700 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 701 702 if (TAILQ_EMPTY(&m->mtx_blocked)) { 703 LIST_REMOVE(m, mtx_contested); 704 _release_lock_quick(m); 705 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 706 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 707 } else 708 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 709 710 pri = PRI_MAX; 711 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 712 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 713 if (cp < pri) 714 pri = cp; 715 } 716 717 if (pri > td->td_base_pri) 718 pri = td->td_base_pri; 719 td->td_priority = pri; 720 721 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 722 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 723 m, td1); 724 725 td1->td_blocked = NULL; 726 td1->td_proc->p_stat = SRUN; 727 setrunqueue(td1); 728 729 if (td->td_critnest == 1 && td1->td_priority < pri) { 730#ifdef notyet 731 if (td->td_ithd != NULL) { 732 struct ithd *it = td->td_ithd; 733 734 if (it->it_interrupted) { 735 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 736 CTR2(KTR_LOCK, 737 "_mtx_unlock_sleep: %p interrupted %p", 738 it, it->it_interrupted); 739 intr_thd_fixup(it); 740 } 741 } 742#endif 743 setrunqueue(td); 744 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 745 CTR2(KTR_LOCK, 746 "_mtx_unlock_sleep: %p switching out lock=%p", m, 747 (void *)m->mtx_lock); 748 749 td->td_proc->p_stats->p_ru.ru_nivcsw++; 750 mi_switch(); 751 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 752 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 753 m, (void *)m->mtx_lock); 754 } 755 756 mtx_unlock_spin(&sched_lock); 757 758 return; 759} 760 761/* 762 * All the unlocking of MTX_SPIN locks is done inline. 763 * See the _rel_spin_lock() macro for the details. 764 */ 765 766/* 767 * The backing function for the INVARIANTS-enabled mtx_assert() 768 */ 769#ifdef INVARIANT_SUPPORT 770void 771_mtx_assert(struct mtx *m, int what, const char *file, int line) 772{ 773 774 if (panicstr != NULL) 775 return; 776 switch (what) { 777 case MA_OWNED: 778 case MA_OWNED | MA_RECURSED: 779 case MA_OWNED | MA_NOTRECURSED: 780 if (!mtx_owned(m)) 781 panic("mutex %s not owned at %s:%d", 782 m->mtx_object.lo_name, file, line); 783 if (mtx_recursed(m)) { 784 if ((what & MA_NOTRECURSED) != 0) 785 panic("mutex %s recursed at %s:%d", 786 m->mtx_object.lo_name, file, line); 787 } else if ((what & MA_RECURSED) != 0) { 788 panic("mutex %s unrecursed at %s:%d", 789 m->mtx_object.lo_name, file, line); 790 } 791 break; 792 case MA_NOTOWNED: 793 if (mtx_owned(m)) 794 panic("mutex %s owned at %s:%d", 795 m->mtx_object.lo_name, file, line); 796 break; 797 default: 798 panic("unknown mtx_assert at %s:%d", file, line); 799 } 800} 801#endif 802 803/* 804 * The MUTEX_DEBUG-enabled mtx_validate() 805 * 806 * Most of these checks have been moved off into the LO_INITIALIZED flag 807 * maintained by the witness code. 808 */ 809#ifdef MUTEX_DEBUG 810 811void mtx_validate(struct mtx *); 812 813void 814mtx_validate(struct mtx *m) 815{ 816 817/* 818 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 819 * we can re-enable the kernacc() checks. 820 */ 821#ifndef __alpha__ 822 /* 823 * Can't call kernacc() from early init386(), especially when 824 * initializing Giant mutex, because some stuff in kernacc() 825 * requires Giant itself. 826 */ 827 if (!cold) 828 if (!kernacc((caddr_t)m, sizeof(m), 829 VM_PROT_READ | VM_PROT_WRITE)) 830 panic("Can't read and write to mutex %p", m); 831#endif 832} 833#endif 834 835/* 836 * General init routine used by the MTX_SYSINIT() macro. 837 */ 838void 839mtx_sysinit(void *arg) 840{ 841 struct mtx_args *margs = arg; 842 843 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 844} 845 846/* 847 * Mutex initialization routine; initialize lock `m' of type contained in 848 * `opts' with options contained in `opts' and name `name.' The optional 849 * lock type `type' is used as a general lock category name for use with 850 * witness. 851 */ 852void 853mtx_init(struct mtx *m, const char *name, const char *type, int opts) 854{ 855 struct lock_object *lock; 856 857 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 858 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 859 860#ifdef MUTEX_DEBUG 861 /* Diagnostic and error correction */ 862 mtx_validate(m); 863#endif 864 865 lock = &m->mtx_object; 866 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 867 ("mutex %s %p already initialized", name, m)); 868 bzero(m, sizeof(*m)); 869 if (opts & MTX_SPIN) 870 lock->lo_class = &lock_class_mtx_spin; 871 else 872 lock->lo_class = &lock_class_mtx_sleep; 873 lock->lo_name = name; 874 lock->lo_type = type != NULL ? type : name; 875 if (opts & MTX_QUIET) 876 lock->lo_flags = LO_QUIET; 877 if (opts & MTX_RECURSE) 878 lock->lo_flags |= LO_RECURSABLE; 879 if (opts & MTX_SLEEPABLE) 880 lock->lo_flags |= LO_SLEEPABLE; 881 if ((opts & MTX_NOWITNESS) == 0) 882 lock->lo_flags |= LO_WITNESS; 883 if (opts & MTX_DUPOK) 884 lock->lo_flags |= LO_DUPOK; 885 886 m->mtx_lock = MTX_UNOWNED; 887 TAILQ_INIT(&m->mtx_blocked); 888 889 LOCK_LOG_INIT(lock, opts); 890 891 WITNESS_INIT(lock); 892} 893 894/* 895 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 896 * passed in as a flag here because if the corresponding mtx_init() was 897 * called with MTX_QUIET set, then it will already be set in the mutex's 898 * flags. 899 */ 900void 901mtx_destroy(struct mtx *m) 902{ 903 904 LOCK_LOG_DESTROY(&m->mtx_object, 0); 905 906 if (!mtx_owned(m)) 907 MPASS(mtx_unowned(m)); 908 else { 909 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 910 911 /* Tell witness this isn't locked to make it happy. */ 912 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 913 __LINE__); 914 } 915 916 WITNESS_DESTROY(&m->mtx_object); 917} 918 919/* 920 * Intialize the mutex code and system mutexes. This is called from the MD 921 * startup code prior to mi_startup(). The per-CPU data space needs to be 922 * setup before this is called. 923 */ 924void 925mutex_init(void) 926{ 927 928 /* Setup thread0 so that mutexes work. */ 929 LIST_INIT(&thread0.td_contested); 930 931 /* 932 * Initialize mutexes. 933 */ 934 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 935 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 936 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 937 mtx_lock(&Giant); 938} 939 940/* 941 * Encapsulated Giant mutex routines. These routines provide encapsulation 942 * control for the Giant mutex, allowing sysctls to be used to turn on and 943 * off Giant around certain subsystems. The default value for the sysctls 944 * are set to what developers believe is stable and working in regards to 945 * the Giant pushdown. Developers should not turn off Giant via these 946 * sysctls unless they know what they are doing. 947 * 948 * Callers of mtx_lock_giant() are expected to pass the return value to an 949 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 950 * effected by a Giant wrap, all related sysctl variables must be zero for 951 * the subsystem call to operate without Giant (as determined by the caller). 952 */ 953 954SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 955 956static int kern_giant_all = 0; 957SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 958 959int kern_giant_proc = 1; /* Giant around PROC locks */ 960int kern_giant_file = 1; /* Giant around struct file & filedesc */ 961int kern_giant_ucred = 1; /* Giant around ucred */ 962SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 963SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 964SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 965 966int 967mtx_lock_giant(int sysctlvar) 968{ 969 if (sysctlvar || kern_giant_all) { 970 mtx_lock(&Giant); 971 return(1); 972 } 973 return(0); 974} 975 976void 977mtx_unlock_giant(int s) 978{ 979 if (s) 980 mtx_unlock(&Giant); 981} 982 983