subr_turnstile.c revision 97079
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 * $FreeBSD: head/sys/kern/subr_turnstile.c 97079 2002-05-21 20:34:28Z jhb $ 31 */ 32 33/* 34 * Machine independent bits of mutex implementation. 35 */ 36 37#include "opt_ddb.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/bus.h> 42#include <sys/kernel.h> 43#include <sys/ktr.h> 44#include <sys/lock.h> 45#include <sys/malloc.h> 46#include <sys/mutex.h> 47#include <sys/proc.h> 48#include <sys/resourcevar.h> 49#include <sys/sbuf.h> 50#include <sys/sysctl.h> 51#include <sys/vmmeter.h> 52 53#include <machine/atomic.h> 54#include <machine/bus.h> 55#include <machine/clock.h> 56#include <machine/cpu.h> 57 58#include <ddb/ddb.h> 59 60#include <vm/vm.h> 61#include <vm/vm_extern.h> 62 63/* 64 * Internal utility macros. 65 */ 66#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 67 68#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 69 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 70 71/* 72 * Lock classes for sleep and spin mutexes. 73 */ 74struct lock_class lock_class_mtx_sleep = { 75 "sleep mutex", 76 LC_SLEEPLOCK | LC_RECURSABLE 77}; 78struct lock_class lock_class_mtx_spin = { 79 "spin mutex", 80 LC_SPINLOCK | LC_RECURSABLE 81}; 82 83/* 84 * System-wide mutexes 85 */ 86struct mtx sched_lock; 87struct mtx Giant; 88 89/* 90 * Prototypes for non-exported routines. 91 */ 92static void propagate_priority(struct thread *); 93 94static void 95propagate_priority(struct thread *td) 96{ 97 int pri = td->td_priority; 98 struct mtx *m = td->td_blocked; 99 100 mtx_assert(&sched_lock, MA_OWNED); 101 for (;;) { 102 struct thread *td1; 103 104 td = mtx_owner(m); 105 106 if (td == NULL) { 107 /* 108 * This really isn't quite right. Really 109 * ought to bump priority of thread that 110 * next acquires the mutex. 111 */ 112 MPASS(m->mtx_lock == MTX_CONTESTED); 113 return; 114 } 115 116 MPASS(td->td_proc->p_magic == P_MAGIC); 117 KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 118 if (td->td_priority <= pri) /* lower is higher priority */ 119 return; 120 121 /* 122 * Bump this thread's priority. 123 */ 124 td->td_priority = pri; 125 126 /* 127 * If lock holder is actually running, just bump priority. 128 */ 129 /* XXXKSE this test is not sufficient */ 130 if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 131 MPASS(td->td_proc->p_stat == SRUN 132 || td->td_proc->p_stat == SZOMB 133 || td->td_proc->p_stat == SSTOP); 134 return; 135 } 136 137#ifndef SMP 138 /* 139 * For UP, we check to see if td is curthread (this shouldn't 140 * ever happen however as it would mean we are in a deadlock.) 141 */ 142 KASSERT(td != curthread, ("Deadlock detected")); 143#endif 144 145 /* 146 * If on run queue move to new run queue, and quit. 147 * XXXKSE this gets a lot more complicated under threads 148 * but try anyhow. 149 */ 150 if (td->td_proc->p_stat == SRUN) { 151 MPASS(td->td_blocked == NULL); 152 remrunqueue(td); 153 setrunqueue(td); 154 return; 155 } 156 157 /* 158 * If we aren't blocked on a mutex, we should be. 159 */ 160 KASSERT(td->td_proc->p_stat == SMTX, ( 161 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 162 td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 163 m->mtx_object.lo_name)); 164 165 /* 166 * Pick up the mutex that td is blocked on. 167 */ 168 m = td->td_blocked; 169 MPASS(m != NULL); 170 171 /* 172 * Check if the thread needs to be moved up on 173 * the blocked chain 174 */ 175 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 176 continue; 177 } 178 179 td1 = TAILQ_PREV(td, threadqueue, td_blkq); 180 if (td1->td_priority <= pri) { 181 continue; 182 } 183 184 /* 185 * Remove thread from blocked chain and determine where 186 * it should be moved up to. Since we know that td1 has 187 * a lower priority than td, we know that at least one 188 * thread in the chain has a lower priority and that 189 * td1 will thus not be NULL after the loop. 190 */ 191 TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 192 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 193 MPASS(td1->td_proc->p_magic == P_MAGIC); 194 if (td1->td_priority > pri) 195 break; 196 } 197 198 MPASS(td1 != NULL); 199 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 200 CTR4(KTR_LOCK, 201 "propagate_priority: p %p moved before %p on [%p] %s", 202 td, td1, m, m->mtx_object.lo_name); 203 } 204} 205 206#ifdef MUTEX_PROFILING 207SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 208SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 209static int mutex_prof_enable = 0; 210SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 211 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 212 213struct mutex_prof { 214 const char *name; 215 const char *file; 216 int line; 217#define MPROF_MAX 0 218#define MPROF_TOT 1 219#define MPROF_CNT 2 220#define MPROF_AVG 3 221 u_int64_t counter[4]; 222 struct mutex_prof *next; 223}; 224 225/* 226 * mprof_buf is a static pool of profiling records to avoid possible 227 * reentrance of the memory allocation functions. 228 * 229 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE. 230 */ 231#define NUM_MPROF_BUFFERS 1000 232static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS]; 233static int first_free_mprof_buf; 234#define MPROF_HASH_SIZE 1009 235static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE]; 236 237static int mutex_prof_acquisitions; 238SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD, 239 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded"); 240static int mutex_prof_records; 241SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD, 242 &mutex_prof_records, 0, "Number of profiling records"); 243static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS; 244SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD, 245 &mutex_prof_maxrecords, 0, "Maximum number of profiling records"); 246static int mutex_prof_rejected; 247SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD, 248 &mutex_prof_rejected, 0, "Number of rejected profiling records"); 249static int mutex_prof_hashsize = MPROF_HASH_SIZE; 250SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD, 251 &mutex_prof_hashsize, 0, "Hash size"); 252static int mutex_prof_collisions = 0; 253SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD, 254 &mutex_prof_collisions, 0, "Number of hash collisions"); 255 256/* 257 * mprof_mtx protects the profiling buffers and the hash. 258 */ 259static struct mtx mprof_mtx; 260MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET); 261 262static u_int64_t 263nanoseconds(void) 264{ 265 struct timespec tv; 266 267 nanotime(&tv); 268 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec); 269} 270 271static int 272dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS) 273{ 274 struct sbuf *sb; 275 int error, i; 276 277 if (first_free_mprof_buf == 0) 278 return SYSCTL_OUT(req, "No locking recorded", 279 sizeof("No locking recorded")); 280 281 sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); 282 sbuf_printf(sb, "%12s %12s %12s %12s %s\n", 283 "max", "total", "count", "average", "name"); 284 mtx_lock_spin(&mprof_mtx); 285 for (i = 0; i < first_free_mprof_buf; ++i) 286 sbuf_printf(sb, "%12llu %12llu %12llu %12llu %s:%d (%s)\n", 287 mprof_buf[i].counter[MPROF_MAX] / 1000, 288 mprof_buf[i].counter[MPROF_TOT] / 1000, 289 mprof_buf[i].counter[MPROF_CNT], 290 mprof_buf[i].counter[MPROF_AVG] / 1000, 291 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name); 292 mtx_unlock_spin(&mprof_mtx); 293 sbuf_finish(sb); 294 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); 295 sbuf_delete(sb); 296 return (error); 297} 298SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD, 299 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics"); 300#endif 301 302/* 303 * Function versions of the inlined __mtx_* macros. These are used by 304 * modules and can also be called from assembly language if needed. 305 */ 306void 307_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 308{ 309 310 MPASS(curthread != NULL); 311 _get_sleep_lock(m, curthread, opts, file, line); 312 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 313 line); 314 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 315#ifdef MUTEX_PROFILING 316 /* don't reset the timer when/if recursing */ 317 if (m->acqtime == 0) { 318 m->file = file; 319 m->line = line; 320 m->acqtime = mutex_prof_enable ? nanoseconds() : 0; 321 ++mutex_prof_acquisitions; 322 } 323#endif 324} 325 326void 327_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 328{ 329 330 MPASS(curthread != NULL); 331 mtx_assert(m, MA_OWNED); 332#ifdef MUTEX_PROFILING 333 if (m->acqtime != 0) { 334 static const char *unknown = "(unknown)"; 335 struct mutex_prof *mpp; 336 u_int64_t acqtime, now; 337 const char *p, *q; 338 volatile u_int hash; 339 340 now = nanoseconds(); 341 acqtime = m->acqtime; 342 m->acqtime = 0; 343 if (now <= acqtime) 344 goto out; 345 for (p = file; strncmp(p, "../", 3) == 0; p += 3) 346 /* nothing */ ; 347 if (p == NULL || *p == '\0') 348 p = unknown; 349 for (hash = line, q = p; *q != '\0'; ++q) 350 hash = (hash * 2 + *q) % MPROF_HASH_SIZE; 351 mtx_lock_spin(&mprof_mtx); 352 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next) 353 if (mpp->line == line && strcmp(mpp->file, p) == 0) 354 break; 355 if (mpp == NULL) { 356 /* Just exit if we cannot get a trace buffer */ 357 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) { 358 ++mutex_prof_rejected; 359 goto unlock; 360 } 361 mpp = &mprof_buf[first_free_mprof_buf++]; 362 mpp->name = mtx_name(m); 363 mpp->file = p; 364 mpp->line = line; 365 mpp->next = mprof_hash[hash]; 366 if (mprof_hash[hash] != NULL) 367 ++mutex_prof_collisions; 368 mprof_hash[hash] = mpp; 369 ++mutex_prof_records; 370 } 371 /* 372 * Record if the mutex has been held longer now than ever 373 * before 374 */ 375 if ((now - acqtime) > mpp->counter[MPROF_MAX]) 376 mpp->counter[MPROF_MAX] = now - acqtime; 377 mpp->counter[MPROF_TOT] += now - acqtime; 378 mpp->counter[MPROF_CNT] += 1; 379 mpp->counter[MPROF_AVG] = 380 mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT]; 381unlock: 382 mtx_unlock_spin(&mprof_mtx); 383 } 384out: 385#endif 386 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 387 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 388 line); 389 _rel_sleep_lock(m, curthread, opts, file, line); 390} 391 392void 393_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 394{ 395 396 MPASS(curthread != NULL); 397#if defined(SMP) || LOCK_DEBUG > 0 398 _get_spin_lock(m, curthread, opts, file, line); 399#else 400 critical_enter(); 401#endif 402 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file, 403 line); 404 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 405} 406 407void 408_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 409{ 410 411 MPASS(curthread != NULL); 412 mtx_assert(m, MA_OWNED); 413 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line); 414 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file, 415 line); 416#if defined(SMP) || LOCK_DEBUG > 0 417 _rel_spin_lock(m); 418#else 419 critical_exit(); 420#endif 421} 422 423/* 424 * The important part of mtx_trylock{,_flags}() 425 * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 426 * if we're called, it's because we know we don't already own this lock. 427 */ 428int 429_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 430{ 431 int rval; 432 433 MPASS(curthread != NULL); 434 435 rval = _obtain_lock(m, curthread); 436 437 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 438 if (rval) { 439 /* 440 * We do not handle recursion in _mtx_trylock; see the 441 * note at the top of the routine. 442 */ 443 KASSERT(!mtx_recursed(m), 444 ("mtx_trylock() called on a recursed mutex")); 445 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 446 file, line); 447 } 448 449 return (rval); 450} 451 452/* 453 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 454 * 455 * We call this if the lock is either contested (i.e. we need to go to 456 * sleep waiting for it), or if we need to recurse on it. 457 */ 458void 459_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 460{ 461 struct thread *td = curthread; 462 463 if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 464 m->mtx_recurse++; 465 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 466 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 467 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 468 return; 469 } 470 471 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 472 CTR4(KTR_LOCK, 473 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 474 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 475 476 while (!_obtain_lock(m, td)) { 477 uintptr_t v; 478 struct thread *td1; 479 480 mtx_lock_spin(&sched_lock); 481 /* 482 * Check if the lock has been released while spinning for 483 * the sched_lock. 484 */ 485 if ((v = m->mtx_lock) == MTX_UNOWNED) { 486 mtx_unlock_spin(&sched_lock); 487 continue; 488 } 489 490 /* 491 * The mutex was marked contested on release. This means that 492 * there are threads blocked on it. 493 */ 494 if (v == MTX_CONTESTED) { 495 td1 = TAILQ_FIRST(&m->mtx_blocked); 496 MPASS(td1 != NULL); 497 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 498 499 if (td1->td_priority < td->td_priority) 500 td->td_priority = td1->td_priority; 501 mtx_unlock_spin(&sched_lock); 502 return; 503 } 504 505 /* 506 * If the mutex isn't already contested and a failure occurs 507 * setting the contested bit, the mutex was either released 508 * or the state of the MTX_RECURSED bit changed. 509 */ 510 if ((v & MTX_CONTESTED) == 0 && 511 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 512 (void *)(v | MTX_CONTESTED))) { 513 mtx_unlock_spin(&sched_lock); 514 continue; 515 } 516 517 /* 518 * We definitely must sleep for this lock. 519 */ 520 mtx_assert(m, MA_NOTOWNED); 521 522#ifdef notyet 523 /* 524 * If we're borrowing an interrupted thread's VM context, we 525 * must clean up before going to sleep. 526 */ 527 if (td->td_ithd != NULL) { 528 struct ithd *it = td->td_ithd; 529 530 if (it->it_interrupted) { 531 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 532 CTR2(KTR_LOCK, 533 "_mtx_lock_sleep: %p interrupted %p", 534 it, it->it_interrupted); 535 intr_thd_fixup(it); 536 } 537 } 538#endif 539 540 /* 541 * Put us on the list of threads blocked on this mutex. 542 */ 543 if (TAILQ_EMPTY(&m->mtx_blocked)) { 544 td1 = mtx_owner(m); 545 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 546 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 547 } else { 548 TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 549 if (td1->td_priority > td->td_priority) 550 break; 551 if (td1) 552 TAILQ_INSERT_BEFORE(td1, td, td_blkq); 553 else 554 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 555 } 556 557 /* 558 * Save who we're blocked on. 559 */ 560 td->td_blocked = m; 561 td->td_mtxname = m->mtx_object.lo_name; 562 td->td_proc->p_stat = SMTX; 563 propagate_priority(td); 564 565 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 566 CTR3(KTR_LOCK, 567 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 568 m->mtx_object.lo_name); 569 570 td->td_proc->p_stats->p_ru.ru_nvcsw++; 571 mi_switch(); 572 573 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 574 CTR3(KTR_LOCK, 575 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 576 td, m, m->mtx_object.lo_name); 577 578 mtx_unlock_spin(&sched_lock); 579 } 580 581 return; 582} 583 584/* 585 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 586 * 587 * This is only called if we need to actually spin for the lock. Recursion 588 * is handled inline. 589 */ 590void 591_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) 592{ 593 int i = 0; 594 595 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 596 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 597 598 for (;;) { 599 if (_obtain_lock(m, curthread)) 600 break; 601 602 /* Give interrupts a chance while we spin. */ 603 critical_exit(); 604 while (m->mtx_lock != MTX_UNOWNED) { 605 if (i++ < 10000000) 606 continue; 607 if (i++ < 60000000) 608 DELAY(1); 609#ifdef DDB 610 else if (!db_active) 611#else 612 else 613#endif 614 panic("spin lock %s held by %p for > 5 seconds", 615 m->mtx_object.lo_name, (void *)m->mtx_lock); 616 } 617 critical_enter(); 618 } 619 620 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 621 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 622 623 return; 624} 625 626/* 627 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 628 * 629 * We are only called here if the lock is recursed or contested (i.e. we 630 * need to wake up a blocked thread). 631 */ 632void 633_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 634{ 635 struct thread *td, *td1; 636 struct mtx *m1; 637 int pri; 638 639 td = curthread; 640 641 if (mtx_recursed(m)) { 642 if (--(m->mtx_recurse) == 0) 643 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 644 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 645 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 646 return; 647 } 648 649 mtx_lock_spin(&sched_lock); 650 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 651 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 652 653 td1 = TAILQ_FIRST(&m->mtx_blocked); 654 MPASS(td->td_proc->p_magic == P_MAGIC); 655 MPASS(td1->td_proc->p_magic == P_MAGIC); 656 657 TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 658 659 if (TAILQ_EMPTY(&m->mtx_blocked)) { 660 LIST_REMOVE(m, mtx_contested); 661 _release_lock_quick(m); 662 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 663 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 664 } else 665 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 666 667 pri = PRI_MAX; 668 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 669 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 670 if (cp < pri) 671 pri = cp; 672 } 673 674 if (pri > td->td_base_pri) 675 pri = td->td_base_pri; 676 td->td_priority = pri; 677 678 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 679 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 680 m, td1); 681 682 td1->td_blocked = NULL; 683 td1->td_proc->p_stat = SRUN; 684 setrunqueue(td1); 685 686 if (td->td_critnest == 1 && td1->td_priority < pri) { 687#ifdef notyet 688 if (td->td_ithd != NULL) { 689 struct ithd *it = td->td_ithd; 690 691 if (it->it_interrupted) { 692 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 693 CTR2(KTR_LOCK, 694 "_mtx_unlock_sleep: %p interrupted %p", 695 it, it->it_interrupted); 696 intr_thd_fixup(it); 697 } 698 } 699#endif 700 setrunqueue(td); 701 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 702 CTR2(KTR_LOCK, 703 "_mtx_unlock_sleep: %p switching out lock=%p", m, 704 (void *)m->mtx_lock); 705 706 td->td_proc->p_stats->p_ru.ru_nivcsw++; 707 mi_switch(); 708 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 709 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 710 m, (void *)m->mtx_lock); 711 } 712 713 mtx_unlock_spin(&sched_lock); 714 715 return; 716} 717 718/* 719 * All the unlocking of MTX_SPIN locks is done inline. 720 * See the _rel_spin_lock() macro for the details. 721 */ 722 723/* 724 * The backing function for the INVARIANTS-enabled mtx_assert() 725 */ 726#ifdef INVARIANT_SUPPORT 727void 728_mtx_assert(struct mtx *m, int what, const char *file, int line) 729{ 730 731 if (panicstr != NULL) 732 return; 733 switch (what) { 734 case MA_OWNED: 735 case MA_OWNED | MA_RECURSED: 736 case MA_OWNED | MA_NOTRECURSED: 737 if (!mtx_owned(m)) 738 panic("mutex %s not owned at %s:%d", 739 m->mtx_object.lo_name, file, line); 740 if (mtx_recursed(m)) { 741 if ((what & MA_NOTRECURSED) != 0) 742 panic("mutex %s recursed at %s:%d", 743 m->mtx_object.lo_name, file, line); 744 } else if ((what & MA_RECURSED) != 0) { 745 panic("mutex %s unrecursed at %s:%d", 746 m->mtx_object.lo_name, file, line); 747 } 748 break; 749 case MA_NOTOWNED: 750 if (mtx_owned(m)) 751 panic("mutex %s owned at %s:%d", 752 m->mtx_object.lo_name, file, line); 753 break; 754 default: 755 panic("unknown mtx_assert at %s:%d", file, line); 756 } 757} 758#endif 759 760/* 761 * The MUTEX_DEBUG-enabled mtx_validate() 762 * 763 * Most of these checks have been moved off into the LO_INITIALIZED flag 764 * maintained by the witness code. 765 */ 766#ifdef MUTEX_DEBUG 767 768void mtx_validate(struct mtx *); 769 770void 771mtx_validate(struct mtx *m) 772{ 773 774/* 775 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 776 * we can re-enable the kernacc() checks. 777 */ 778#ifndef __alpha__ 779 /* 780 * Can't call kernacc() from early init386(), especially when 781 * initializing Giant mutex, because some stuff in kernacc() 782 * requires Giant itself. 783 */ 784 if (!cold) 785 if (!kernacc((caddr_t)m, sizeof(m), 786 VM_PROT_READ | VM_PROT_WRITE)) 787 panic("Can't read and write to mutex %p", m); 788#endif 789} 790#endif 791 792/* 793 * General init routine used by the MTX_SYSINIT() macro. 794 */ 795void 796mtx_sysinit(void *arg) 797{ 798 struct mtx_args *margs = arg; 799 800 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); 801} 802 803/* 804 * Mutex initialization routine; initialize lock `m' of type contained in 805 * `opts' with options contained in `opts' and name `name.' The optional 806 * lock type `type' is used as a general lock category name for use with 807 * witness. 808 */ 809void 810mtx_init(struct mtx *m, const char *name, const char *type, int opts) 811{ 812 struct lock_object *lock; 813 814 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 815 MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0); 816 817#ifdef MUTEX_DEBUG 818 /* Diagnostic and error correction */ 819 mtx_validate(m); 820#endif 821 822 lock = &m->mtx_object; 823 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0, 824 ("mutex %s %p already initialized", name, m)); 825 bzero(m, sizeof(*m)); 826 if (opts & MTX_SPIN) 827 lock->lo_class = &lock_class_mtx_spin; 828 else 829 lock->lo_class = &lock_class_mtx_sleep; 830 lock->lo_name = name; 831 lock->lo_type = type != NULL ? type : name; 832 if (opts & MTX_QUIET) 833 lock->lo_flags = LO_QUIET; 834 if (opts & MTX_RECURSE) 835 lock->lo_flags |= LO_RECURSABLE; 836 if (opts & MTX_SLEEPABLE) 837 lock->lo_flags |= LO_SLEEPABLE; 838 if ((opts & MTX_NOWITNESS) == 0) 839 lock->lo_flags |= LO_WITNESS; 840 if (opts & MTX_DUPOK) 841 lock->lo_flags |= LO_DUPOK; 842 843 m->mtx_lock = MTX_UNOWNED; 844 TAILQ_INIT(&m->mtx_blocked); 845 846 LOCK_LOG_INIT(lock, opts); 847 848 WITNESS_INIT(lock); 849} 850 851/* 852 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 853 * passed in as a flag here because if the corresponding mtx_init() was 854 * called with MTX_QUIET set, then it will already be set in the mutex's 855 * flags. 856 */ 857void 858mtx_destroy(struct mtx *m) 859{ 860 861 LOCK_LOG_DESTROY(&m->mtx_object, 0); 862 863 if (!mtx_owned(m)) 864 MPASS(mtx_unowned(m)); 865 else { 866 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 867 868 /* Tell witness this isn't locked to make it happy. */ 869 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__, 870 __LINE__); 871 } 872 873 WITNESS_DESTROY(&m->mtx_object); 874} 875 876/* 877 * Intialize the mutex code and system mutexes. This is called from the MD 878 * startup code prior to mi_startup(). The per-CPU data space needs to be 879 * setup before this is called. 880 */ 881void 882mutex_init(void) 883{ 884 885 /* Setup thread0 so that mutexes work. */ 886 LIST_INIT(&thread0.td_contested); 887 888 /* 889 * Initialize mutexes. 890 */ 891 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 892 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 893 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 894 mtx_lock(&Giant); 895} 896 897/* 898 * Encapsulated Giant mutex routines. These routines provide encapsulation 899 * control for the Giant mutex, allowing sysctls to be used to turn on and 900 * off Giant around certain subsystems. The default value for the sysctls 901 * are set to what developers believe is stable and working in regards to 902 * the Giant pushdown. Developers should not turn off Giant via these 903 * sysctls unless they know what they are doing. 904 * 905 * Callers of mtx_lock_giant() are expected to pass the return value to an 906 * accompanying mtx_unlock_giant() later on. If multiple subsystems are 907 * effected by a Giant wrap, all related sysctl variables must be zero for 908 * the subsystem call to operate without Giant (as determined by the caller). 909 */ 910 911SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation"); 912 913static int kern_giant_all = 0; 914SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, ""); 915 916int kern_giant_proc = 1; /* Giant around PROC locks */ 917int kern_giant_file = 1; /* Giant around struct file & filedesc */ 918int kern_giant_ucred = 1; /* Giant around ucred */ 919SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, ""); 920SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, ""); 921SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, ""); 922 923int 924mtx_lock_giant(int sysctlvar) 925{ 926 if (sysctlvar || kern_giant_all) { 927 mtx_lock(&Giant); 928 return(1); 929 } 930 return(0); 931} 932 933void 934mtx_unlock_giant(int s) 935{ 936 if (s) 937 mtx_unlock(&Giant); 938} 939 940