kern_mutex.c revision 315378
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> 37__FBSDID("$FreeBSD: stable/11/sys/kern/kern_mutex.c 315378 2017-03-16 06:45:36Z mjg $"); 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41#include "opt_hwpmc_hooks.h" 42#include "opt_sched.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/bus.h> 47#include <sys/conf.h> 48#include <sys/kdb.h> 49#include <sys/kernel.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/malloc.h> 53#include <sys/mutex.h> 54#include <sys/proc.h> 55#include <sys/resourcevar.h> 56#include <sys/sched.h> 57#include <sys/sbuf.h> 58#include <sys/smp.h> 59#include <sys/sysctl.h> 60#include <sys/turnstile.h> 61#include <sys/vmmeter.h> 62#include <sys/lock_profile.h> 63 64#include <machine/atomic.h> 65#include <machine/bus.h> 66#include <machine/cpu.h> 67 68#include <ddb/ddb.h> 69 70#include <fs/devfs/devfs_int.h> 71 72#include <vm/vm.h> 73#include <vm/vm_extern.h> 74 75#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) 76#define ADAPTIVE_MUTEXES 77#endif 78 79#ifdef HWPMC_HOOKS 80#include <sys/pmckern.h> 81PMC_SOFT_DEFINE( , , lock, failed); 82#endif 83 84/* 85 * Return the mutex address when the lock cookie address is provided. 86 * This functionality assumes that struct mtx* have a member named mtx_lock. 87 */ 88#define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) 89 90/* 91 * Internal utility macros. 92 */ 93#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 94 95#define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) 96 97static void assert_mtx(const struct lock_object *lock, int what); 98#ifdef DDB 99static void db_show_mtx(const struct lock_object *lock); 100#endif 101static void lock_mtx(struct lock_object *lock, uintptr_t how); 102static void lock_spin(struct lock_object *lock, uintptr_t how); 103#ifdef KDTRACE_HOOKS 104static int owner_mtx(const struct lock_object *lock, 105 struct thread **owner); 106#endif 107static uintptr_t unlock_mtx(struct lock_object *lock); 108static uintptr_t unlock_spin(struct lock_object *lock); 109 110/* 111 * Lock classes for sleep and spin mutexes. 112 */ 113struct lock_class lock_class_mtx_sleep = { 114 .lc_name = "sleep mutex", 115 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 116 .lc_assert = assert_mtx, 117#ifdef DDB 118 .lc_ddb_show = db_show_mtx, 119#endif 120 .lc_lock = lock_mtx, 121 .lc_unlock = unlock_mtx, 122#ifdef KDTRACE_HOOKS 123 .lc_owner = owner_mtx, 124#endif 125}; 126struct lock_class lock_class_mtx_spin = { 127 .lc_name = "spin mutex", 128 .lc_flags = LC_SPINLOCK | LC_RECURSABLE, 129 .lc_assert = assert_mtx, 130#ifdef DDB 131 .lc_ddb_show = db_show_mtx, 132#endif 133 .lc_lock = lock_spin, 134 .lc_unlock = unlock_spin, 135#ifdef KDTRACE_HOOKS 136 .lc_owner = owner_mtx, 137#endif 138}; 139 140#ifdef ADAPTIVE_MUTEXES 141static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); 142 143static struct lock_delay_config __read_mostly mtx_delay; 144 145SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 146 0, ""); 147SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 148 0, ""); 149 150LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); 151#endif 152 153static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, 154 "mtx spin debugging"); 155 156static struct lock_delay_config __read_mostly mtx_spin_delay; 157 158SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, 159 &mtx_spin_delay.base, 0, ""); 160SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, 161 &mtx_spin_delay.max, 0, ""); 162 163LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); 164 165/* 166 * System-wide mutexes 167 */ 168struct mtx blocked_lock; 169struct mtx Giant; 170 171void 172assert_mtx(const struct lock_object *lock, int what) 173{ 174 175 mtx_assert((const struct mtx *)lock, what); 176} 177 178void 179lock_mtx(struct lock_object *lock, uintptr_t how) 180{ 181 182 mtx_lock((struct mtx *)lock); 183} 184 185void 186lock_spin(struct lock_object *lock, uintptr_t how) 187{ 188 189 panic("spin locks can only use msleep_spin"); 190} 191 192uintptr_t 193unlock_mtx(struct lock_object *lock) 194{ 195 struct mtx *m; 196 197 m = (struct mtx *)lock; 198 mtx_assert(m, MA_OWNED | MA_NOTRECURSED); 199 mtx_unlock(m); 200 return (0); 201} 202 203uintptr_t 204unlock_spin(struct lock_object *lock) 205{ 206 207 panic("spin locks can only use msleep_spin"); 208} 209 210#ifdef KDTRACE_HOOKS 211int 212owner_mtx(const struct lock_object *lock, struct thread **owner) 213{ 214 const struct mtx *m = (const struct mtx *)lock; 215 216 *owner = mtx_owner(m); 217 return (mtx_unowned(m) == 0); 218} 219#endif 220 221/* 222 * Function versions of the inlined __mtx_* macros. These are used by 223 * modules and can also be called from assembly language if needed. 224 */ 225void 226__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 227{ 228 struct mtx *m; 229 uintptr_t tid, v; 230 231 if (SCHEDULER_STOPPED()) 232 return; 233 234 m = mtxlock2mtx(c); 235 236 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 237 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", 238 curthread, m->lock_object.lo_name, file, line)); 239 KASSERT(m->mtx_lock != MTX_DESTROYED, 240 ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); 241 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 242 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 243 file, line)); 244 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | 245 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 246 247 tid = (uintptr_t)curthread; 248 v = MTX_UNOWNED; 249 if (!_mtx_obtain_lock_fetch(m, &v, tid)) 250 _mtx_lock_sleep(m, v, tid, opts, file, line); 251 else 252 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 253 m, 0, 0, file, line); 254 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 255 line); 256 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, 257 file, line); 258 TD_LOCKS_INC(curthread); 259} 260 261void 262__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) 263{ 264 struct mtx *m; 265 266 if (SCHEDULER_STOPPED()) 267 return; 268 269 m = mtxlock2mtx(c); 270 271 KASSERT(m->mtx_lock != MTX_DESTROYED, 272 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); 273 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 274 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 275 file, line)); 276 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 277 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 278 line); 279 mtx_assert(m, MA_OWNED); 280 281 __mtx_unlock_sleep(c, opts, file, line); 282 TD_LOCKS_DEC(curthread); 283} 284 285void 286__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 287 int line) 288{ 289 struct mtx *m; 290 291 if (SCHEDULER_STOPPED()) 292 return; 293 294 m = mtxlock2mtx(c); 295 296 KASSERT(m->mtx_lock != MTX_DESTROYED, 297 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); 298 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 299 ("mtx_lock_spin() of sleep mutex %s @ %s:%d", 300 m->lock_object.lo_name, file, line)); 301 if (mtx_owned(m)) 302 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 303 (opts & MTX_RECURSE) != 0, 304 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", 305 m->lock_object.lo_name, file, line)); 306 opts &= ~MTX_RECURSE; 307 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, 308 file, line, NULL); 309 __mtx_lock_spin(m, curthread, opts, file, line); 310 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 311 line); 312 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 313} 314 315int 316__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 317 int line) 318{ 319 struct mtx *m; 320 321 if (SCHEDULER_STOPPED()) 322 return (1); 323 324 m = mtxlock2mtx(c); 325 326 KASSERT(m->mtx_lock != MTX_DESTROYED, 327 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); 328 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 329 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", 330 m->lock_object.lo_name, file, line)); 331 KASSERT((opts & MTX_RECURSE) == 0, 332 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", 333 m->lock_object.lo_name, file, line)); 334 if (__mtx_trylock_spin(m, curthread, opts, file, line)) { 335 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); 336 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 337 return (1); 338 } 339 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); 340 return (0); 341} 342 343void 344__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 345 int line) 346{ 347 struct mtx *m; 348 349 if (SCHEDULER_STOPPED()) 350 return; 351 352 m = mtxlock2mtx(c); 353 354 KASSERT(m->mtx_lock != MTX_DESTROYED, 355 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); 356 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 357 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", 358 m->lock_object.lo_name, file, line)); 359 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 360 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, 361 line); 362 mtx_assert(m, MA_OWNED); 363 364 __mtx_unlock_spin(m); 365} 366 367/* 368 * The important part of mtx_trylock{,_flags}() 369 * Tries to acquire lock `m.' If this function is called on a mutex that 370 * is already owned, it will recursively acquire the lock. 371 */ 372int 373_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) 374{ 375 struct mtx *m; 376#ifdef LOCK_PROFILING 377 uint64_t waittime = 0; 378 int contested = 0; 379#endif 380 int rval; 381 382 if (SCHEDULER_STOPPED()) 383 return (1); 384 385 m = mtxlock2mtx(c); 386 387 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 388 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", 389 curthread, m->lock_object.lo_name, file, line)); 390 KASSERT(m->mtx_lock != MTX_DESTROYED, 391 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); 392 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, 393 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, 394 file, line)); 395 396 if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 397 (opts & MTX_RECURSE) != 0)) { 398 m->mtx_recurse++; 399 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 400 rval = 1; 401 } else 402 rval = _mtx_obtain_lock(m, (uintptr_t)curthread); 403 opts &= ~MTX_RECURSE; 404 405 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); 406 if (rval) { 407 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 408 file, line); 409 TD_LOCKS_INC(curthread); 410 if (m->mtx_recurse == 0) 411 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, 412 m, contested, waittime, file, line); 413 414 } 415 416 return (rval); 417} 418 419/* 420 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 421 * 422 * We call this if the lock is either contested (i.e. we need to go to 423 * sleep waiting for it), or if we need to recurse on it. 424 */ 425void 426__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, 427 const char *file, int line) 428{ 429 struct mtx *m; 430 struct turnstile *ts; 431#ifdef ADAPTIVE_MUTEXES 432 volatile struct thread *owner; 433#endif 434#ifdef KTR 435 int cont_logged = 0; 436#endif 437#ifdef LOCK_PROFILING 438 int contested = 0; 439 uint64_t waittime = 0; 440#endif 441#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) 442 struct lock_delay_arg lda; 443#endif 444#ifdef KDTRACE_HOOKS 445 u_int sleep_cnt = 0; 446 int64_t sleep_time = 0; 447 int64_t all_time = 0; 448#endif 449 450 if (SCHEDULER_STOPPED()) 451 return; 452 453#if defined(ADAPTIVE_MUTEXES) 454 lock_delay_arg_init(&lda, &mtx_delay); 455#elif defined(KDTRACE_HOOKS) 456 lock_delay_arg_init(&lda, NULL); 457#endif 458 m = mtxlock2mtx(c); 459 if (__predict_false(v == MTX_UNOWNED)) 460 v = MTX_READ_VALUE(m); 461 462 if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) { 463 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || 464 (opts & MTX_RECURSE) != 0, 465 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", 466 m->lock_object.lo_name, file, line)); 467 opts &= ~MTX_RECURSE; 468 m->mtx_recurse++; 469 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 470 if (LOCK_LOG_TEST(&m->lock_object, opts)) 471 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 472 return; 473 } 474 opts &= ~MTX_RECURSE; 475 476#ifdef HWPMC_HOOKS 477 PMC_SOFT_CALL( , , lock, failed); 478#endif 479 lock_profile_obtain_lock_failed(&m->lock_object, 480 &contested, &waittime); 481 if (LOCK_LOG_TEST(&m->lock_object, opts)) 482 CTR4(KTR_LOCK, 483 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 484 m->lock_object.lo_name, (void *)m->mtx_lock, file, line); 485#ifdef KDTRACE_HOOKS 486 all_time -= lockstat_nsecs(&m->lock_object); 487#endif 488 489 for (;;) { 490 if (v == MTX_UNOWNED) { 491 if (_mtx_obtain_lock_fetch(m, &v, tid)) 492 break; 493 continue; 494 } 495#ifdef KDTRACE_HOOKS 496 lda.spin_cnt++; 497#endif 498#ifdef ADAPTIVE_MUTEXES 499 /* 500 * If the owner is running on another CPU, spin until the 501 * owner stops running or the state of the lock changes. 502 */ 503 owner = lv_mtx_owner(v); 504 if (TD_IS_RUNNING(owner)) { 505 if (LOCK_LOG_TEST(&m->lock_object, 0)) 506 CTR3(KTR_LOCK, 507 "%s: spinning on %p held by %p", 508 __func__, m, owner); 509 KTR_STATE1(KTR_SCHED, "thread", 510 sched_tdname((struct thread *)tid), 511 "spinning", "lockname:\"%s\"", 512 m->lock_object.lo_name); 513 do { 514 lock_delay(&lda); 515 v = MTX_READ_VALUE(m); 516 owner = lv_mtx_owner(v); 517 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); 518 KTR_STATE0(KTR_SCHED, "thread", 519 sched_tdname((struct thread *)tid), 520 "running"); 521 continue; 522 } 523#endif 524 525 ts = turnstile_trywait(&m->lock_object); 526 v = MTX_READ_VALUE(m); 527 528 /* 529 * Check if the lock has been released while spinning for 530 * the turnstile chain lock. 531 */ 532 if (v == MTX_UNOWNED) { 533 turnstile_cancel(ts); 534 continue; 535 } 536 537#ifdef ADAPTIVE_MUTEXES 538 /* 539 * The current lock owner might have started executing 540 * on another CPU (or the lock could have changed 541 * owners) while we were waiting on the turnstile 542 * chain lock. If so, drop the turnstile lock and try 543 * again. 544 */ 545 owner = lv_mtx_owner(v); 546 if (TD_IS_RUNNING(owner)) { 547 turnstile_cancel(ts); 548 continue; 549 } 550#endif 551 552 /* 553 * If the mutex isn't already contested and a failure occurs 554 * setting the contested bit, the mutex was either released 555 * or the state of the MTX_RECURSED bit changed. 556 */ 557 if ((v & MTX_CONTESTED) == 0 && 558 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { 559 turnstile_cancel(ts); 560 v = MTX_READ_VALUE(m); 561 continue; 562 } 563 564 /* 565 * We definitely must sleep for this lock. 566 */ 567 mtx_assert(m, MA_NOTOWNED); 568 569#ifdef KTR 570 if (!cont_logged) { 571 CTR6(KTR_CONTENTION, 572 "contention: %p at %s:%d wants %s, taken by %s:%d", 573 (void *)tid, file, line, m->lock_object.lo_name, 574 WITNESS_FILE(&m->lock_object), 575 WITNESS_LINE(&m->lock_object)); 576 cont_logged = 1; 577 } 578#endif 579 580 /* 581 * Block on the turnstile. 582 */ 583#ifdef KDTRACE_HOOKS 584 sleep_time -= lockstat_nsecs(&m->lock_object); 585#endif 586 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); 587#ifdef KDTRACE_HOOKS 588 sleep_time += lockstat_nsecs(&m->lock_object); 589 sleep_cnt++; 590#endif 591 v = MTX_READ_VALUE(m); 592 } 593#ifdef KDTRACE_HOOKS 594 all_time += lockstat_nsecs(&m->lock_object); 595#endif 596#ifdef KTR 597 if (cont_logged) { 598 CTR4(KTR_CONTENTION, 599 "contention end: %s acquired by %p at %s:%d", 600 m->lock_object.lo_name, (void *)tid, file, line); 601 } 602#endif 603 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, 604 waittime, file, line); 605#ifdef KDTRACE_HOOKS 606 if (sleep_time) 607 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); 608 609 /* 610 * Only record the loops spinning and not sleeping. 611 */ 612 if (lda.spin_cnt > sleep_cnt) 613 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); 614#endif 615} 616 617static void 618_mtx_lock_spin_failed(struct mtx *m) 619{ 620 struct thread *td; 621 622 td = mtx_owner(m); 623 624 /* If the mutex is unlocked, try again. */ 625 if (td == NULL) 626 return; 627 628 printf( "spin lock %p (%s) held by %p (tid %d) too long\n", 629 m, m->lock_object.lo_name, td, td->td_tid); 630#ifdef WITNESS 631 witness_display_spinlock(&m->lock_object, td, printf); 632#endif 633 panic("spin lock held too long"); 634} 635 636#ifdef SMP 637/* 638 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. 639 * 640 * This is only called if we need to actually spin for the lock. Recursion 641 * is handled inline. 642 */ 643void 644_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, 645 int opts, const char *file, int line) 646{ 647 struct mtx *m; 648 struct lock_delay_arg lda; 649#ifdef LOCK_PROFILING 650 int contested = 0; 651 uint64_t waittime = 0; 652#endif 653#ifdef KDTRACE_HOOKS 654 int64_t spin_time = 0; 655#endif 656 657 if (SCHEDULER_STOPPED()) 658 return; 659 660 lock_delay_arg_init(&lda, &mtx_spin_delay); 661 m = mtxlock2mtx(c); 662 663 if (__predict_false(v == MTX_UNOWNED)) 664 v = MTX_READ_VALUE(m); 665 666 if (__predict_false(v == tid)) { 667 m->mtx_recurse++; 668 return; 669 } 670 671 if (LOCK_LOG_TEST(&m->lock_object, opts)) 672 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 673 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 674 "spinning", "lockname:\"%s\"", m->lock_object.lo_name); 675 676#ifdef HWPMC_HOOKS 677 PMC_SOFT_CALL( , , lock, failed); 678#endif 679 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); 680#ifdef KDTRACE_HOOKS 681 spin_time -= lockstat_nsecs(&m->lock_object); 682#endif 683 for (;;) { 684 if (v == MTX_UNOWNED) { 685 if (_mtx_obtain_lock_fetch(m, &v, tid)) 686 break; 687 continue; 688 } 689 /* Give interrupts a chance while we spin. */ 690 spinlock_exit(); 691 do { 692 if (lda.spin_cnt < 10000000) { 693 lock_delay(&lda); 694 } else { 695 lda.spin_cnt++; 696 if (lda.spin_cnt < 60000000 || kdb_active || 697 panicstr != NULL) 698 DELAY(1); 699 else 700 _mtx_lock_spin_failed(m); 701 cpu_spinwait(); 702 } 703 v = MTX_READ_VALUE(m); 704 } while (v != MTX_UNOWNED); 705 spinlock_enter(); 706 } 707#ifdef KDTRACE_HOOKS 708 spin_time += lockstat_nsecs(&m->lock_object); 709#endif 710 711 if (LOCK_LOG_TEST(&m->lock_object, opts)) 712 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 713 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), 714 "running"); 715 716#ifdef KDTRACE_HOOKS 717 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 718 contested, waittime, file, line); 719 if (spin_time != 0) 720 LOCKSTAT_RECORD1(spin__spin, m, spin_time); 721#endif 722} 723#endif /* SMP */ 724 725void 726thread_lock_flags_(struct thread *td, int opts, const char *file, int line) 727{ 728 struct mtx *m; 729 uintptr_t tid, v; 730 struct lock_delay_arg lda; 731#ifdef LOCK_PROFILING 732 int contested = 0; 733 uint64_t waittime = 0; 734#endif 735#ifdef KDTRACE_HOOKS 736 int64_t spin_time = 0; 737#endif 738 739 tid = (uintptr_t)curthread; 740 741 if (SCHEDULER_STOPPED()) { 742 /* 743 * Ensure that spinlock sections are balanced even when the 744 * scheduler is stopped, since we may otherwise inadvertently 745 * re-enable interrupts while dumping core. 746 */ 747 spinlock_enter(); 748 return; 749 } 750 751 lock_delay_arg_init(&lda, &mtx_spin_delay); 752 753#ifdef KDTRACE_HOOKS 754 spin_time -= lockstat_nsecs(&td->td_lock->lock_object); 755#endif 756 for (;;) { 757retry: 758 v = MTX_UNOWNED; 759 spinlock_enter(); 760 m = td->td_lock; 761 KASSERT(m->mtx_lock != MTX_DESTROYED, 762 ("thread_lock() of destroyed mutex @ %s:%d", file, line)); 763 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, 764 ("thread_lock() of sleep mutex %s @ %s:%d", 765 m->lock_object.lo_name, file, line)); 766 if (mtx_owned(m)) 767 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, 768 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", 769 m->lock_object.lo_name, file, line)); 770 WITNESS_CHECKORDER(&m->lock_object, 771 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); 772 for (;;) { 773 if (_mtx_obtain_lock_fetch(m, &v, tid)) 774 break; 775 if (v == MTX_UNOWNED) 776 continue; 777 if (v == tid) { 778 m->mtx_recurse++; 779 break; 780 } 781#ifdef HWPMC_HOOKS 782 PMC_SOFT_CALL( , , lock, failed); 783#endif 784 lock_profile_obtain_lock_failed(&m->lock_object, 785 &contested, &waittime); 786 /* Give interrupts a chance while we spin. */ 787 spinlock_exit(); 788 do { 789 if (lda.spin_cnt < 10000000) { 790 lock_delay(&lda); 791 } else { 792 lda.spin_cnt++; 793 if (lda.spin_cnt < 60000000 || 794 kdb_active || panicstr != NULL) 795 DELAY(1); 796 else 797 _mtx_lock_spin_failed(m); 798 cpu_spinwait(); 799 } 800 if (m != td->td_lock) 801 goto retry; 802 v = MTX_READ_VALUE(m); 803 } while (v != MTX_UNOWNED); 804 spinlock_enter(); 805 } 806 if (m == td->td_lock) 807 break; 808 __mtx_unlock_spin(m); /* does spinlock_exit() */ 809 } 810#ifdef KDTRACE_HOOKS 811 spin_time += lockstat_nsecs(&m->lock_object); 812#endif 813 if (m->mtx_recurse == 0) 814 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, 815 contested, waittime, file, line); 816 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, 817 line); 818 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); 819#ifdef KDTRACE_HOOKS 820 if (spin_time != 0) 821 LOCKSTAT_RECORD1(thread__spin, m, spin_time); 822#endif 823} 824 825struct mtx * 826thread_lock_block(struct thread *td) 827{ 828 struct mtx *lock; 829 830 THREAD_LOCK_ASSERT(td, MA_OWNED); 831 lock = td->td_lock; 832 td->td_lock = &blocked_lock; 833 mtx_unlock_spin(lock); 834 835 return (lock); 836} 837 838void 839thread_lock_unblock(struct thread *td, struct mtx *new) 840{ 841 mtx_assert(new, MA_OWNED); 842 MPASS(td->td_lock == &blocked_lock); 843 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); 844} 845 846void 847thread_lock_set(struct thread *td, struct mtx *new) 848{ 849 struct mtx *lock; 850 851 mtx_assert(new, MA_OWNED); 852 THREAD_LOCK_ASSERT(td, MA_OWNED); 853 lock = td->td_lock; 854 td->td_lock = new; 855 mtx_unlock_spin(lock); 856} 857 858/* 859 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 860 * 861 * We are only called here if the lock is recursed or contested (i.e. we 862 * need to wake up a blocked thread). 863 */ 864void 865__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) 866{ 867 struct mtx *m; 868 struct turnstile *ts; 869 870 if (SCHEDULER_STOPPED()) 871 return; 872 873 m = mtxlock2mtx(c); 874 875 if (!mtx_recursed(m)) { 876 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); 877 if (_mtx_release_lock(m, (uintptr_t)curthread)) 878 return; 879 } else { 880 if (--(m->mtx_recurse) == 0) 881 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 882 if (LOCK_LOG_TEST(&m->lock_object, opts)) 883 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 884 return; 885 } 886 887 /* 888 * We have to lock the chain before the turnstile so this turnstile 889 * can be removed from the hash list if it is empty. 890 */ 891 turnstile_chain_lock(&m->lock_object); 892 ts = turnstile_lookup(&m->lock_object); 893 if (LOCK_LOG_TEST(&m->lock_object, opts)) 894 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 895 MPASS(ts != NULL); 896 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 897 _mtx_release_lock_quick(m); 898 899 /* 900 * This turnstile is now no longer associated with the mutex. We can 901 * unlock the chain lock so a new turnstile may take it's place. 902 */ 903 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 904 turnstile_chain_unlock(&m->lock_object); 905} 906 907/* 908 * All the unlocking of MTX_SPIN locks is done inline. 909 * See the __mtx_unlock_spin() macro for the details. 910 */ 911 912/* 913 * The backing function for the INVARIANTS-enabled mtx_assert() 914 */ 915#ifdef INVARIANT_SUPPORT 916void 917__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) 918{ 919 const struct mtx *m; 920 921 if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) 922 return; 923 924 m = mtxlock2mtx(c); 925 926 switch (what) { 927 case MA_OWNED: 928 case MA_OWNED | MA_RECURSED: 929 case MA_OWNED | MA_NOTRECURSED: 930 if (!mtx_owned(m)) 931 panic("mutex %s not owned at %s:%d", 932 m->lock_object.lo_name, file, line); 933 if (mtx_recursed(m)) { 934 if ((what & MA_NOTRECURSED) != 0) 935 panic("mutex %s recursed at %s:%d", 936 m->lock_object.lo_name, file, line); 937 } else if ((what & MA_RECURSED) != 0) { 938 panic("mutex %s unrecursed at %s:%d", 939 m->lock_object.lo_name, file, line); 940 } 941 break; 942 case MA_NOTOWNED: 943 if (mtx_owned(m)) 944 panic("mutex %s owned at %s:%d", 945 m->lock_object.lo_name, file, line); 946 break; 947 default: 948 panic("unknown mtx_assert at %s:%d", file, line); 949 } 950} 951#endif 952 953/* 954 * General init routine used by the MTX_SYSINIT() macro. 955 */ 956void 957mtx_sysinit(void *arg) 958{ 959 struct mtx_args *margs = arg; 960 961 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, 962 margs->ma_opts); 963} 964 965/* 966 * Mutex initialization routine; initialize lock `m' of type contained in 967 * `opts' with options contained in `opts' and name `name.' The optional 968 * lock type `type' is used as a general lock category name for use with 969 * witness. 970 */ 971void 972_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) 973{ 974 struct mtx *m; 975 struct lock_class *class; 976 int flags; 977 978 m = mtxlock2mtx(c); 979 980 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 981 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); 982 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, 983 ("%s: mtx_lock not aligned for %s: %p", __func__, name, 984 &m->mtx_lock)); 985 986 /* Determine lock class and lock flags. */ 987 if (opts & MTX_SPIN) 988 class = &lock_class_mtx_spin; 989 else 990 class = &lock_class_mtx_sleep; 991 flags = 0; 992 if (opts & MTX_QUIET) 993 flags |= LO_QUIET; 994 if (opts & MTX_RECURSE) 995 flags |= LO_RECURSABLE; 996 if ((opts & MTX_NOWITNESS) == 0) 997 flags |= LO_WITNESS; 998 if (opts & MTX_DUPOK) 999 flags |= LO_DUPOK; 1000 if (opts & MTX_NOPROFILE) 1001 flags |= LO_NOPROFILE; 1002 if (opts & MTX_NEW) 1003 flags |= LO_NEW; 1004 1005 /* Initialize mutex. */ 1006 lock_init(&m->lock_object, class, name, type, flags); 1007 1008 m->mtx_lock = MTX_UNOWNED; 1009 m->mtx_recurse = 0; 1010} 1011 1012/* 1013 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 1014 * passed in as a flag here because if the corresponding mtx_init() was 1015 * called with MTX_QUIET set, then it will already be set in the mutex's 1016 * flags. 1017 */ 1018void 1019_mtx_destroy(volatile uintptr_t *c) 1020{ 1021 struct mtx *m; 1022 1023 m = mtxlock2mtx(c); 1024 1025 if (!mtx_owned(m)) 1026 MPASS(mtx_unowned(m)); 1027 else { 1028 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 1029 1030 /* Perform the non-mtx related part of mtx_unlock_spin(). */ 1031 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) 1032 spinlock_exit(); 1033 else 1034 TD_LOCKS_DEC(curthread); 1035 1036 lock_profile_release_lock(&m->lock_object); 1037 /* Tell witness this isn't locked to make it happy. */ 1038 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, 1039 __LINE__); 1040 } 1041 1042 m->mtx_lock = MTX_DESTROYED; 1043 lock_destroy(&m->lock_object); 1044} 1045 1046/* 1047 * Intialize the mutex code and system mutexes. This is called from the MD 1048 * startup code prior to mi_startup(). The per-CPU data space needs to be 1049 * setup before this is called. 1050 */ 1051void 1052mutex_init(void) 1053{ 1054 1055 /* Setup turnstiles so that sleep mutexes work. */ 1056 init_turnstiles(); 1057 1058 /* 1059 * Initialize mutexes. 1060 */ 1061 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 1062 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); 1063 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ 1064 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 1065 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); 1066 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); 1067 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); 1068 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); 1069 mtx_init(&devmtx, "cdev", NULL, MTX_DEF); 1070 mtx_lock(&Giant); 1071} 1072 1073#ifdef DDB 1074void 1075db_show_mtx(const struct lock_object *lock) 1076{ 1077 struct thread *td; 1078 const struct mtx *m; 1079 1080 m = (const struct mtx *)lock; 1081 1082 db_printf(" flags: {"); 1083 if (LOCK_CLASS(lock) == &lock_class_mtx_spin) 1084 db_printf("SPIN"); 1085 else 1086 db_printf("DEF"); 1087 if (m->lock_object.lo_flags & LO_RECURSABLE) 1088 db_printf(", RECURSE"); 1089 if (m->lock_object.lo_flags & LO_DUPOK) 1090 db_printf(", DUPOK"); 1091 db_printf("}\n"); 1092 db_printf(" state: {"); 1093 if (mtx_unowned(m)) 1094 db_printf("UNOWNED"); 1095 else if (mtx_destroyed(m)) 1096 db_printf("DESTROYED"); 1097 else { 1098 db_printf("OWNED"); 1099 if (m->mtx_lock & MTX_CONTESTED) 1100 db_printf(", CONTESTED"); 1101 if (m->mtx_lock & MTX_RECURSED) 1102 db_printf(", RECURSED"); 1103 } 1104 db_printf("}\n"); 1105 if (!mtx_unowned(m) && !mtx_destroyed(m)) { 1106 td = mtx_owner(m); 1107 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, 1108 td->td_tid, td->td_proc->p_pid, td->td_name); 1109 if (mtx_recursed(m)) 1110 db_printf(" recursed: %d\n", m->mtx_recurse); 1111 } 1112} 1113#endif 1114