kern_mutex.c revision 278650
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: stable/10/sys/kern/kern_mutex.c 278650 2015-02-13 00:29:57Z sbruno $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_global.h" 42#include "opt_hwpmc_hooks.h" 43#include "opt_kdtrace.h" 44#include "opt_sched.h" 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/bus.h> 49#include <sys/conf.h> 50#include <sys/kdb.h> 51#include <sys/kernel.h> 52#include <sys/ktr.h> 53#include <sys/lock.h> 54#include <sys/malloc.h> 55#include <sys/mutex.h> 56#include <sys/proc.h> 57#include <sys/resourcevar.h> 58#include <sys/sched.h> 59#include <sys/sbuf.h> 60#include <sys/sysctl.h> 61#include <sys/turnstile.h> 62#include <sys/vmmeter.h> 63#include <sys/lock_profile.h> 64 65#include <machine/atomic.h> 66#include <machine/bus.h> 67#include <machine/cpu.h> 68 69#include <ddb/ddb.h> 70 71#include <fs/devfs/devfs_int.h> 72 73#include <vm/vm.h> 74#include <vm/vm_extern.h> 75 76#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 77#define ADAPTIVE_MUTEXES 78#endif 79 80#ifdef HWPMC_HOOKS 81#include <sys/pmckern.h> 82PMC_SOFT_DEFINE( , , lock, failed); 83#endif 84 85/* 86 * Return the mutex address when the lock cookie address is provided. 87 * This functionality assumes that struct mtx* have a member named mtx_lock. 88 */ 89#define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 90 91/* 92 * Internal utility macros. 93 */ 94#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 95 96#define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 97 98#define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK)) 99 100static void assert_mtx(const struct lock_object *lock, int what); 101#ifdef DDB 102static void db_show_mtx(const struct lock_object *lock); 103#endif 104static void lock_mtx(struct lock_object *lock, uintptr_t how); 105static void lock_spin(struct lock_object *lock, uintptr_t how); 106#ifdef KDTRACE_HOOKS 107static int owner_mtx(const struct lock_object *lock, 108 struct thread **owner); 109#endif 110static uintptr_t unlock_mtx(struct lock_object *lock); 111static uintptr_t unlock_spin(struct lock_object *lock); 112 113/* 114 * Lock classes for sleep and spin mutexes. 115 */ 116struct lock_class lock_class_mtx_sleep = { 117 .lc_name = "sleep mutex", 118 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 119 .lc_assert = assert_mtx, 120#ifdef DDB 121 .lc_ddb_show = db_show_mtx, 122#endif 123 .lc_lock = lock_mtx, 124 .lc_unlock = unlock_mtx, 125#ifdef KDTRACE_HOOKS 126 .lc_owner = owner_mtx, 127#endif 128}; 129struct lock_class lock_class_mtx_spin = { 130 .lc_name = "spin mutex", 131 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 132 .lc_assert = assert_mtx, 133#ifdef DDB 134 .lc_ddb_show = db_show_mtx, 135#endif 136 .lc_lock = lock_spin, 137 .lc_unlock = unlock_spin, 138#ifdef KDTRACE_HOOKS 139 .lc_owner = owner_mtx, 140#endif 141}; 142 143/* 144 * System-wide mutexes 145 */ 146struct mtx blocked_lock; 147struct mtx Giant; 148 149void 150assert_mtx(const struct lock_object *lock, int what) 151{ 152 153 mtx_assert((const struct mtx *)lock, what); 154} 155 156void 157lock_mtx(struct lock_object *lock, uintptr_t how) 158{ 159 160 mtx_lock((struct mtx *)lock); 161} 162 163void 164lock_spin(struct lock_object *lock, uintptr_t how) 165{ 166 167 panic("spin locks can only use msleep_spin"); 168} 169 170uintptr_t 171unlock_mtx(struct lock_object *lock) 172{ 173 struct mtx *m; 174 175 m = (struct mtx *)lock; 176 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 177 mtx_unlock(m); 178 return (0); 179} 180 181uintptr_t 182unlock_spin(struct lock_object *lock) 183{ 184 185 panic("spin locks can only use msleep_spin"); 186} 187 188#ifdef KDTRACE_HOOKS 189int 190owner_mtx(const struct lock_object *lock, struct thread **owner) 191{ 192 const struct mtx *m = (const struct mtx *)lock; 193 194 *owner = mtx_owner(m); 195 return (mtx_unowned(m) == 0); 196} 197#endif 198 199/* 200 * Function versions of the inlined __mtx_* macros. These are used by 201 * modules and can also be called from assembly language if needed. 202 */ 203void 204__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 205{ 206 struct mtx *m; 207 208 if (SCHEDULER_STOPPED()) 209 return; 210 211 m = mtxlock2mtx(c); 212 213 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 214 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 215 curthread, m->lock_object.lo_name, file, line)); 216 KASSERT(m->mtx_lock != MTX_DESTROYED, 217 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 218 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 219 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 220 file, line)); 221 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 222 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 223 224 __mtx_lock(m, curthread, opts, file, line); 225 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 226 line); 227 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 228 file, line); 229 curthread->td_locks++; 230} 231 232void 233__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 234{ 235 struct mtx *m; 236 237 if (SCHEDULER_STOPPED()) 238 return; 239 240 m = mtxlock2mtx(c); 241 242 KASSERT(m->mtx_lock != MTX_DESTROYED, 243 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 244 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 245 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 246 file, line)); 247 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 248 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 249 line); 250 mtx_assert(m, MA_OWNED); 251 252 if (m->mtx_recurse == 0) 253 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m); 254 __mtx_unlock(m, curthread, opts, file, line); 255 curthread->td_locks--; 256} 257 258void 259__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 260 int line) 261{ 262 struct mtx *m; 263 264 if (SCHEDULER_STOPPED()) 265 return; 266 267 m = mtxlock2mtx(c); 268 269 KASSERT(m->mtx_lock != MTX_DESTROYED, 270 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 271 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 272 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 273 m->lock_object.lo_name, file, line)); 274 if (mtx_owned(m)) 275 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 276 (opts & MTX_RECURSE) != 0, 277 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 278 m->lock_object.lo_name, file, line)); 279 opts &= ~MTX_RECURSE; 280 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 281 file, line, NULL); 282 __mtx_lock_spin(m, curthread, opts, file, line); 283 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 284 line); 285 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 286} 287 288void 289__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 290 int line) 291{ 292 struct mtx *m; 293 294 if (SCHEDULER_STOPPED()) 295 return; 296 297 m = mtxlock2mtx(c); 298 299 KASSERT(m->mtx_lock != MTX_DESTROYED, 300 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 301 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 302 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 303 m->lock_object.lo_name, file, line)); 304 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 305 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 306 line); 307 mtx_assert(m, MA_OWNED); 308 309 __mtx_unlock_spin(m); 310} 311 312/* 313 * The important part of mtx_trylock{,_flags}() 314 * Tries to acquire lock `m.' If this function is called on a mutex that 315 * is already owned, it will recursively acquire the lock. 316 */ 317int 318_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 319{ 320 struct mtx *m; 321#ifdef LOCK_PROFILING 322 uint64_t waittime = 0; 323 int contested = 0; 324#endif 325 int rval; 326 327 if (SCHEDULER_STOPPED()) 328 return (1); 329 330 m = mtxlock2mtx(c); 331 332 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 333 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 334 curthread, m->lock_object.lo_name, file, line)); 335 KASSERT(m->mtx_lock != MTX_DESTROYED, 336 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 337 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 338 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 339 file, line)); 340 341 if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 342 (opts & MTX_RECURSE) != 0)) { 343 m->mtx_recurse++; 344 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 345 rval = 1; 346 } else 347 rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 348 opts &= ~MTX_RECURSE; 349 350 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 351 if (rval) { 352 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 353 file, line); 354 curthread->td_locks++; 355 if (m->mtx_recurse == 0) 356 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, 357 m, contested, waittime, file, line); 358 359 } 360 361 return (rval); 362} 363 364/* 365 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 366 * 367 * We call this if the lock is either contested (i.e. we need to go to 368 * sleep waiting for it), or if we need to recurse on it. 369 */ 370void 371__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, 372 const char *file, int line) 373{ 374 struct mtx *m; 375 struct turnstile *ts; 376 uintptr_t v; 377#ifdef ADAPTIVE_MUTEXES 378 volatile struct thread *owner; 379#endif 380#ifdef KTR 381 int cont_logged = 0; 382#endif 383#ifdef LOCK_PROFILING 384 int contested = 0; 385 uint64_t waittime = 0; 386#endif 387#ifdef KDTRACE_HOOKS 388 uint64_t spin_cnt = 0; 389 uint64_t sleep_cnt = 0; 390 int64_t sleep_time = 0; 391#endif 392 393 if (SCHEDULER_STOPPED()) 394 return; 395 396 m = mtxlock2mtx(c); 397 398 if (mtx_owned(m)) { 399 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 400 (opts & MTX_RECURSE) != 0, 401 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 402 m->lock_object.lo_name, file, line)); 403 opts &= ~MTX_RECURSE; 404 m->mtx_recurse++; 405 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 406 if (LOCK_LOG_TEST(&m->lock_object, opts)) 407 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 408 return; 409 } 410 opts &= ~MTX_RECURSE; 411 412#ifdef HWPMC_HOOKS 413 PMC_SOFT_CALL( , , lock, failed); 414#endif 415 lock_profile_obtain_lock_failed(&m->lock_object, 416 &contested, &waittime); 417 if (LOCK_LOG_TEST(&m->lock_object, opts)) 418 CTR4(KTR_LOCK, 419 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 420 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 421 422 while (!_mtx_obtain_lock(m, tid)) { 423#ifdef KDTRACE_HOOKS 424 spin_cnt++; 425#endif 426#ifdef ADAPTIVE_MUTEXES 427 /* 428 * If the owner is running on another CPU, spin until the 429 * owner stops running or the state of the lock changes. 430 */ 431 v = m->mtx_lock; 432 if (v != MTX_UNOWNED) { 433 owner = (struct thread *)(v & ~MTX_FLAGMASK); 434 if (TD_IS_RUNNING(owner)) { 435 if (LOCK_LOG_TEST(&m->lock_object, 0)) 436 CTR3(KTR_LOCK, 437 "%s: spinning on %p held by %p", 438 __func__, m, owner); 439 KTR_STATE1(KTR_SCHED, "thread", 440 sched_tdname((struct thread *)tid), 441 "spinning", "lockname:\"%s\"", 442 m->lock_object.lo_name); 443 while (mtx_owner(m) == owner && 444 TD_IS_RUNNING(owner)) { 445 cpu_spinwait(); 446#ifdef KDTRACE_HOOKS 447 spin_cnt++; 448#endif 449 } 450 KTR_STATE0(KTR_SCHED, "thread", 451 sched_tdname((struct thread *)tid), 452 "running"); 453 continue; 454 } 455 } 456#endif 457 458 ts = turnstile_trywait(&m->lock_object); 459 v = m->mtx_lock; 460 461 /* 462 * Check if the lock has been released while spinning for 463 * the turnstile chain lock. 464 */ 465 if (v == MTX_UNOWNED) { 466 turnstile_cancel(ts); 467 continue; 468 } 469 470#ifdef ADAPTIVE_MUTEXES 471 /* 472 * The current lock owner might have started executing 473 * on another CPU (or the lock could have changed 474 * owners) while we were waiting on the turnstile 475 * chain lock. If so, drop the turnstile lock and try 476 * again. 477 */ 478 owner = (struct thread *)(v & ~MTX_FLAGMASK); 479 if (TD_IS_RUNNING(owner)) { 480 turnstile_cancel(ts); 481 continue; 482 } 483#endif 484 485 /* 486 * If the mutex isn't already contested and a failure occurs 487 * setting the contested bit, the mutex was either released 488 * or the state of the MTX_RECURSED bit changed. 489 */ 490 if ((v & MTX_CONTESTED) == 0 && 491 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 492 turnstile_cancel(ts); 493 continue; 494 } 495 496 /* 497 * We definitely must sleep for this lock. 498 */ 499 mtx_assert(m, MA_NOTOWNED); 500 501#ifdef KTR 502 if (!cont_logged) { 503 CTR6(KTR_CONTENTION, 504 "contention: %p at %s:%d wants %s, taken by %s:%d", 505 (void *)tid, file, line, m->lock_object.lo_name, 506 WITNESS_FILE(&m->lock_object), 507 WITNESS_LINE(&m->lock_object)); 508 cont_logged = 1; 509 } 510#endif 511 512 /* 513 * Block on the turnstile. 514 */ 515#ifdef KDTRACE_HOOKS 516 sleep_time -= lockstat_nsecs(); 517#endif 518 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 519#ifdef KDTRACE_HOOKS 520 sleep_time += lockstat_nsecs(); 521 sleep_cnt++; 522#endif 523 } 524#ifdef KTR 525 if (cont_logged) { 526 CTR4(KTR_CONTENTION, 527 "contention end: %s acquired by %p at %s:%d", 528 m->lock_object.lo_name, (void *)tid, file, line); 529 } 530#endif 531 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested, 532 waittime, file, line); 533#ifdef KDTRACE_HOOKS 534 if (sleep_time) 535 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time); 536 537 /* 538 * Only record the loops spinning and not sleeping. 539 */ 540 if (spin_cnt > sleep_cnt) 541 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt)); 542#endif 543} 544 545static void 546_mtx_lock_spin_failed(struct mtx *m) 547{ 548 struct thread *td; 549 550 td = mtx_owner(m); 551 552 /* If the mutex is unlocked, try again. */ 553 if (td == NULL) 554 return; 555 556 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 557 m, m->lock_object.lo_name, td, td->td_tid); 558#ifdef WITNESS 559 witness_display_spinlock(&m->lock_object, td, printf); 560#endif 561 panic("spin lock held too long"); 562} 563 564#ifdef SMP 565/* 566 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 567 * 568 * This is only called if we need to actually spin for the lock. Recursion 569 * is handled inline. 570 */ 571void 572_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, 573 const char *file, int line) 574{ 575 struct mtx *m; 576 int i = 0; 577#ifdef LOCK_PROFILING 578 int contested = 0; 579 uint64_t waittime = 0; 580#endif 581 582 if (SCHEDULER_STOPPED()) 583 return; 584 585 m = mtxlock2mtx(c); 586 587 if (LOCK_LOG_TEST(&m->lock_object, opts)) 588 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 589 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 590 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 591 592#ifdef HWPMC_HOOKS 593 PMC_SOFT_CALL( , , lock, failed); 594#endif 595 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 596 while (!_mtx_obtain_lock(m, tid)) { 597 598 /* Give interrupts a chance while we spin. */ 599 spinlock_exit(); 600 while (m->mtx_lock != MTX_UNOWNED) { 601 if (i++ < 10000000) { 602 cpu_spinwait(); 603 continue; 604 } 605 if (i < 60000000 || kdb_active || panicstr != NULL) 606 DELAY(1); 607 else 608 _mtx_lock_spin_failed(m); 609 cpu_spinwait(); 610 } 611 spinlock_enter(); 612 } 613 614 if (LOCK_LOG_TEST(&m->lock_object, opts)) 615 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 616 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 617 "running"); 618 619 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, 620 contested, waittime, (file), (line)); 621 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); 622} 623#endif /* SMP */ 624 625void 626thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 627{ 628 struct mtx *m; 629 uintptr_t tid; 630 int i; 631#ifdef LOCK_PROFILING 632 int contested = 0; 633 uint64_t waittime = 0; 634#endif 635#ifdef KDTRACE_HOOKS 636 uint64_t spin_cnt = 0; 637#endif 638 639 i = 0; 640 tid = (uintptr_t)curthread; 641 642 if (SCHEDULER_STOPPED()) 643 return; 644 645 for (;;) { 646retry: 647 spinlock_enter(); 648 m = td->td_lock; 649 KASSERT(m->mtx_lock != MTX_DESTROYED, 650 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 651 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 652 ("thread_lock() of sleep mutex %s @ %s:%d", 653 m->lock_object.lo_name, file, line)); 654 if (mtx_owned(m)) 655 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 656 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 657 m->lock_object.lo_name, file, line)); 658 WITNESS_CHECKORDER(&m->lock_object, 659 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 660 while (!_mtx_obtain_lock(m, tid)) { 661#ifdef KDTRACE_HOOKS 662 spin_cnt++; 663#endif 664 if (m->mtx_lock == tid) { 665 m->mtx_recurse++; 666 break; 667 } 668#ifdef HWPMC_HOOKS 669 PMC_SOFT_CALL( , , lock, failed); 670#endif 671 lock_profile_obtain_lock_failed(&m->lock_object, 672 &contested, &waittime); 673 /* Give interrupts a chance while we spin. */ 674 spinlock_exit(); 675 while (m->mtx_lock != MTX_UNOWNED) { 676 if (i++ < 10000000) 677 cpu_spinwait(); 678 else if (i < 60000000 || 679 kdb_active || panicstr != NULL) 680 DELAY(1); 681 else 682 _mtx_lock_spin_failed(m); 683 cpu_spinwait(); 684 if (m != td->td_lock) 685 goto retry; 686 } 687 spinlock_enter(); 688 } 689 if (m == td->td_lock) 690 break; 691 __mtx_unlock_spin(m); /* does spinlock_exit() */ 692#ifdef KDTRACE_HOOKS 693 spin_cnt++; 694#endif 695 } 696 if (m->mtx_recurse == 0) 697 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, 698 m, contested, waittime, (file), (line)); 699 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 700 line); 701 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 702 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt); 703} 704 705struct mtx * 706thread_lock_block(struct thread *td) 707{ 708 struct mtx *lock; 709 710 THREAD_LOCK_ASSERT(td, MA_OWNED); 711 lock = td->td_lock; 712 td->td_lock = &blocked_lock; 713 mtx_unlock_spin(lock); 714 715 return (lock); 716} 717 718void 719thread_lock_unblock(struct thread *td, struct mtx *new) 720{ 721 mtx_assert(new, MA_OWNED); 722 MPASS(td->td_lock == &blocked_lock); 723 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 724} 725 726void 727thread_lock_set(struct thread *td, struct mtx *new) 728{ 729 struct mtx *lock; 730 731 mtx_assert(new, MA_OWNED); 732 THREAD_LOCK_ASSERT(td, MA_OWNED); 733 lock = td->td_lock; 734 td->td_lock = new; 735 mtx_unlock_spin(lock); 736} 737 738/* 739 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 740 * 741 * We are only called here if the lock is recursed or contested (i.e. we 742 * need to wake up a blocked thread). 743 */ 744void 745__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) 746{ 747 struct mtx *m; 748 struct turnstile *ts; 749 750 if (SCHEDULER_STOPPED()) 751 return; 752 753 m = mtxlock2mtx(c); 754 755 if (mtx_recursed(m)) { 756 if (--(m->mtx_recurse) == 0) 757 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 758 if (LOCK_LOG_TEST(&m->lock_object, opts)) 759 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 760 return; 761 } 762 763 /* 764 * We have to lock the chain before the turnstile so this turnstile 765 * can be removed from the hash list if it is empty. 766 */ 767 turnstile_chain_lock(&m->lock_object); 768 ts = turnstile_lookup(&m->lock_object); 769 if (LOCK_LOG_TEST(&m->lock_object, opts)) 770 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 771 MPASS(ts != NULL); 772 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 773 _mtx_release_lock_quick(m); 774 775 /* 776 * This turnstile is now no longer associated with the mutex. We can 777 * unlock the chain lock so a new turnstile may take it's place. 778 */ 779 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 780 turnstile_chain_unlock(&m->lock_object); 781} 782 783/* 784 * All the unlocking of MTX_SPIN locks is done inline. 785 * See the __mtx_unlock_spin() macro for the details. 786 */ 787 788/* 789 * The backing function for the INVARIANTS-enabled mtx_assert() 790 */ 791#ifdef INVARIANT_SUPPORT 792void 793__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 794{ 795 const struct mtx *m; 796 797 if (panicstr != NULL || dumping) 798 return; 799 800 m = mtxlock2mtx(c); 801 802 switch (what) { 803 case MA_OWNED: 804 case MA_OWNED | MA_RECURSED: 805 case MA_OWNED | MA_NOTRECURSED: 806 if (!mtx_owned(m)) 807 panic("mutex %s not owned at %s:%d", 808 m->lock_object.lo_name, file, line); 809 if (mtx_recursed(m)) { 810 if ((what & MA_NOTRECURSED) != 0) 811 panic("mutex %s recursed at %s:%d", 812 m->lock_object.lo_name, file, line); 813 } else if ((what & MA_RECURSED) != 0) { 814 panic("mutex %s unrecursed at %s:%d", 815 m->lock_object.lo_name, file, line); 816 } 817 break; 818 case MA_NOTOWNED: 819 if (mtx_owned(m)) 820 panic("mutex %s owned at %s:%d", 821 m->lock_object.lo_name, file, line); 822 break; 823 default: 824 panic("unknown mtx_assert at %s:%d", file, line); 825 } 826} 827#endif 828 829/* 830 * The MUTEX_DEBUG-enabled mtx_validate() 831 * 832 * Most of these checks have been moved off into the LO_INITIALIZED flag 833 * maintained by the witness code. 834 */ 835#ifdef MUTEX_DEBUG 836 837void mtx_validate(struct mtx *); 838 839void 840mtx_validate(struct mtx *m) 841{ 842 843/* 844 * XXX: When kernacc() does not require Giant we can reenable this check 845 */ 846#ifdef notyet 847 /* 848 * Can't call kernacc() from early init386(), especially when 849 * initializing Giant mutex, because some stuff in kernacc() 850 * requires Giant itself. 851 */ 852 if (!cold) 853 if (!kernacc((caddr_t)m, sizeof(m), 854 VM_PROT_READ | VM_PROT_WRITE)) 855 panic("Can't read and write to mutex %p", m); 856#endif 857} 858#endif 859 860/* 861 * General init routine used by the MTX_SYSINIT() macro. 862 */ 863void 864mtx_sysinit(void *arg) 865{ 866 struct mtx_args *margs = arg; 867 868 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 869 margs->ma_opts); 870} 871 872/* 873 * Mutex initialization routine; initialize lock `m' of type contained in 874 * `opts' with options contained in `opts' and name `name.' The optional 875 * lock type `type' is used as a general lock category name for use with 876 * witness. 877 */ 878void 879_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 880{ 881 struct mtx *m; 882 struct lock_class *class; 883 int flags; 884 885 m = mtxlock2mtx(c); 886 887 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 888 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); 889 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 890 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 891 &m->mtx_lock)); 892 893#ifdef MUTEX_DEBUG 894 /* Diagnostic and error correction */ 895 mtx_validate(m); 896#endif 897 898 /* Determine lock class and lock flags. */ 899 if (opts & MTX_SPIN) 900 class = &lock_class_mtx_spin; 901 else 902 class = &lock_class_mtx_sleep; 903 flags = 0; 904 if (opts & MTX_QUIET) 905 flags |= LO_QUIET; 906 if (opts & MTX_RECURSE) 907 flags |= LO_RECURSABLE; 908 if ((opts & MTX_NOWITNESS) == 0) 909 flags |= LO_WITNESS; 910 if (opts & MTX_DUPOK) 911 flags |= LO_DUPOK; 912 if (opts & MTX_NOPROFILE) 913 flags |= LO_NOPROFILE; 914 915 /* Initialize mutex. */ 916 lock_init(&m->lock_object, class, name, type, flags); 917 918 m->mtx_lock = MTX_UNOWNED; 919 m->mtx_recurse = 0; 920} 921 922/* 923 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 924 * passed in as a flag here because if the corresponding mtx_init() was 925 * called with MTX_QUIET set, then it will already be set in the mutex's 926 * flags. 927 */ 928void 929_mtx_destroy(volatile uintptr_t *c) 930{ 931 struct mtx *m; 932 933 m = mtxlock2mtx(c); 934 935 if (!mtx_owned(m)) 936 MPASS(mtx_unowned(m)); 937 else { 938 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 939 940 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 941 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 942 spinlock_exit(); 943 else 944 curthread->td_locks--; 945 946 lock_profile_release_lock(&m->lock_object); 947 /* Tell witness this isn't locked to make it happy. */ 948 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 949 __LINE__); 950 } 951 952 m->mtx_lock = MTX_DESTROYED; 953 lock_destroy(&m->lock_object); 954} 955 956/* 957 * Intialize the mutex code and system mutexes. This is called from the MD 958 * startup code prior to mi_startup(). The per-CPU data space needs to be 959 * setup before this is called. 960 */ 961void 962mutex_init(void) 963{ 964 965 /* Setup turnstiles so that sleep mutexes work. */ 966 init_turnstiles(); 967 968 /* 969 * Initialize mutexes. 970 */ 971 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 972 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 973 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 974 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 975 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE); 976 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 977 mtx_lock(&Giant); 978} 979 980#ifdef DDB 981void 982db_show_mtx(const struct lock_object *lock) 983{ 984 struct thread *td; 985 const struct mtx *m; 986 987 m = (const struct mtx *)lock; 988 989 db_printf(" flags: {"); 990 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 991 db_printf("SPIN"); 992 else 993 db_printf("DEF"); 994 if (m->lock_object.lo_flags & LO_RECURSABLE) 995 db_printf(", RECURSE"); 996 if (m->lock_object.lo_flags & LO_DUPOK) 997 db_printf(", DUPOK"); 998 db_printf("}\n"); 999 db_printf(" state: {"); 1000 if (mtx_unowned(m)) 1001 db_printf("UNOWNED"); 1002 else if (mtx_destroyed(m)) 1003 db_printf("DESTROYED"); 1004 else { 1005 db_printf("OWNED"); 1006 if (m->mtx_lock & MTX_CONTESTED) 1007 db_printf(", CONTESTED"); 1008 if (m->mtx_lock & MTX_RECURSED) 1009 db_printf(", RECURSED"); 1010 } 1011 db_printf("}\n"); 1012 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1013 td = mtx_owner(m); 1014 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1015 td->td_tid, td->td_proc->p_pid, td->td_name); 1016 if (mtx_recursed(m)) 1017 db_printf(" recursed: %d\n", m->mtx_recurse); 1018 } 1019} 1020#endif 1021