kern_mutex.c (118272) | kern_mutex.c (122514) |
---|---|
1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. --- 20 unchanged lines hidden (view full) --- 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. --- 20 unchanged lines hidden (view full) --- 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32/* 33 * Machine independent bits of mutex implementation. 34 */ 35 36#include <sys/cdefs.h> |
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 118272 2003-07-31 18:52:18Z jhb $"); | 37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 122514 2003-11-11 22:07:29Z jhb $"); |
38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/kernel.h> 46#include <sys/ktr.h> 47#include <sys/lock.h> 48#include <sys/malloc.h> 49#include <sys/mutex.h> 50#include <sys/proc.h> 51#include <sys/resourcevar.h> 52#include <sys/sched.h> 53#include <sys/sbuf.h> 54#include <sys/sysctl.h> | 38 39#include "opt_adaptive_mutexes.h" 40#include "opt_ddb.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/kernel.h> 46#include <sys/ktr.h> 47#include <sys/lock.h> 48#include <sys/malloc.h> 49#include <sys/mutex.h> 50#include <sys/proc.h> 51#include <sys/resourcevar.h> 52#include <sys/sched.h> 53#include <sys/sbuf.h> 54#include <sys/sysctl.h> |
55#include <sys/turnstile.h> |
|
55#include <sys/vmmeter.h> 56 57#include <machine/atomic.h> 58#include <machine/bus.h> 59#include <machine/clock.h> 60#include <machine/cpu.h> 61 62#include <ddb/ddb.h> --- 22 unchanged lines hidden (view full) --- 85}; 86 87/* 88 * System-wide mutexes 89 */ 90struct mtx sched_lock; 91struct mtx Giant; 92 | 56#include <sys/vmmeter.h> 57 58#include <machine/atomic.h> 59#include <machine/bus.h> 60#include <machine/clock.h> 61#include <machine/cpu.h> 62 63#include <ddb/ddb.h> --- 22 unchanged lines hidden (view full) --- 86}; 87 88/* 89 * System-wide mutexes 90 */ 91struct mtx sched_lock; 92struct mtx Giant; 93 |
93/* 94 * Prototypes for non-exported routines. 95 */ 96static void propagate_priority(struct thread *); 97 98static void 99propagate_priority(struct thread *td) 100{ 101 int pri = td->td_priority; 102 struct mtx *m = td->td_blocked; 103 104 mtx_assert(&sched_lock, MA_OWNED); 105 for (;;) { 106 struct thread *td1; 107 108 td = mtx_owner(m); 109 110 if (td == NULL) { 111 /* 112 * This really isn't quite right. Really 113 * ought to bump priority of thread that 114 * next acquires the mutex. 115 */ 116 MPASS(m->mtx_lock == MTX_CONTESTED); 117 return; 118 } 119 120 MPASS(td->td_proc != NULL); 121 MPASS(td->td_proc->p_magic == P_MAGIC); 122 KASSERT(!TD_IS_SLEEPING(td), ( 123 "sleeping thread (pid %d) owns a mutex", 124 td->td_proc->p_pid)); 125 if (td->td_priority <= pri) /* lower is higher priority */ 126 return; 127 128 129 /* 130 * If lock holder is actually running, just bump priority. 131 */ 132 if (TD_IS_RUNNING(td)) { 133 td->td_priority = pri; 134 return; 135 } 136 137#ifndef SMP 138 /* 139 * For UP, we check to see if td is curthread (this shouldn't 140 * ever happen however as it would mean we are in a deadlock.) 141 */ 142 KASSERT(td != curthread, ("Deadlock detected")); 143#endif 144 145 /* 146 * If on run queue move to new run queue, and quit. 147 * XXXKSE this gets a lot more complicated under threads 148 * but try anyhow. 149 */ 150 if (TD_ON_RUNQ(td)) { 151 MPASS(td->td_blocked == NULL); 152 sched_prio(td, pri); 153 return; 154 } 155 /* 156 * Adjust for any other cases. 157 */ 158 td->td_priority = pri; 159 160 /* 161 * If we aren't blocked on a mutex, we should be. 162 */ 163 KASSERT(TD_ON_LOCK(td), ( 164 "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 165 td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 166 m->mtx_object.lo_name)); 167 168 /* 169 * Pick up the mutex that td is blocked on. 170 */ 171 m = td->td_blocked; 172 MPASS(m != NULL); 173 174 /* 175 * Check if the thread needs to be moved up on 176 * the blocked chain 177 */ 178 if (td == TAILQ_FIRST(&m->mtx_blocked)) { 179 continue; 180 } 181 182 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 183 if (td1->td_priority <= pri) { 184 continue; 185 } 186 187 /* 188 * Remove thread from blocked chain and determine where 189 * it should be moved up to. Since we know that td1 has 190 * a lower priority than td, we know that at least one 191 * thread in the chain has a lower priority and that 192 * td1 will thus not be NULL after the loop. 193 */ 194 TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq); 195 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) { 196 MPASS(td1->td_proc->p_magic == P_MAGIC); 197 if (td1->td_priority > pri) 198 break; 199 } 200 201 MPASS(td1 != NULL); 202 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 203 CTR4(KTR_LOCK, 204 "propagate_priority: p %p moved before %p on [%p] %s", 205 td, td1, m, m->mtx_object.lo_name); 206 } 207} 208 | |
209#ifdef MUTEX_PROFILING 210SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 211SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 212static int mutex_prof_enable = 0; 213SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 214 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 215 216struct mutex_prof { --- 262 unchanged lines hidden (view full) --- 479 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 480 * 481 * We call this if the lock is either contested (i.e. we need to go to 482 * sleep waiting for it), or if we need to recurse on it. 483 */ 484void 485_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 486{ | 94#ifdef MUTEX_PROFILING 95SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging"); 96SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling"); 97static int mutex_prof_enable = 0; 98SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW, 99 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime"); 100 101struct mutex_prof { --- 262 unchanged lines hidden (view full) --- 364 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 365 * 366 * We call this if the lock is either contested (i.e. we need to go to 367 * sleep waiting for it), or if we need to recurse on it. 368 */ 369void 370_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 371{ |
372 struct turnstile *ts; |
|
487 struct thread *td = curthread; | 373 struct thread *td = curthread; |
488 struct thread *td1; | |
489#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 490 struct thread *owner; 491#endif 492 uintptr_t v; 493#ifdef KTR 494 int cont_logged = 0; 495#endif 496 --- 7 unchanged lines hidden (view full) --- 504 505 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 506 CTR4(KTR_LOCK, 507 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 508 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 509 510 while (!_obtain_lock(m, td)) { 511 | 374#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 375 struct thread *owner; 376#endif 377 uintptr_t v; 378#ifdef KTR 379 int cont_logged = 0; 380#endif 381 --- 7 unchanged lines hidden (view full) --- 389 390 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 391 CTR4(KTR_LOCK, 392 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 393 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 394 395 while (!_obtain_lock(m, td)) { 396 |
512 mtx_lock_spin(&sched_lock); | 397 ts = turnstile_lookup(&m->mtx_object); |
513 v = m->mtx_lock; 514 515 /* 516 * Check if the lock has been released while spinning for | 398 v = m->mtx_lock; 399 400 /* 401 * Check if the lock has been released while spinning for |
517 * the sched_lock. | 402 * the turnstile chain lock. |
518 */ 519 if (v == MTX_UNOWNED) { | 403 */ 404 if (v == MTX_UNOWNED) { |
520 mtx_unlock_spin(&sched_lock); | 405 turnstile_release(&m->mtx_object); |
521#ifdef __i386__ 522 ia32_pause(); 523#endif 524 continue; 525 } 526 527 /* 528 * The mutex was marked contested on release. This means that 529 * there are other threads blocked on it. Grab ownership of 530 * it and propagate its priority to the current thread if 531 * necessary. 532 */ 533 if (v == MTX_CONTESTED) { | 406#ifdef __i386__ 407 ia32_pause(); 408#endif 409 continue; 410 } 411 412 /* 413 * The mutex was marked contested on release. This means that 414 * there are other threads blocked on it. Grab ownership of 415 * it and propagate its priority to the current thread if 416 * necessary. 417 */ 418 if (v == MTX_CONTESTED) { |
534 td1 = TAILQ_FIRST(&m->mtx_blocked); 535 MPASS(td1 != NULL); | 419 MPASS(ts != NULL); |
536 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; | 420 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; |
537 LIST_INSERT_HEAD(&td->td_contested, m, mtx_contested); 538 539 if (td1->td_priority < td->td_priority) 540 td->td_priority = td1->td_priority; 541 mtx_unlock_spin(&sched_lock); | 421 turnstile_claim(ts); |
542 return; 543 } 544 545 /* 546 * If the mutex isn't already contested and a failure occurs 547 * setting the contested bit, the mutex was either released 548 * or the state of the MTX_RECURSED bit changed. 549 */ 550 if ((v & MTX_CONTESTED) == 0 && 551 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 552 (void *)(v | MTX_CONTESTED))) { | 422 return; 423 } 424 425 /* 426 * If the mutex isn't already contested and a failure occurs 427 * setting the contested bit, the mutex was either released 428 * or the state of the MTX_RECURSED bit changed. 429 */ 430 if ((v & MTX_CONTESTED) == 0 && 431 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 432 (void *)(v | MTX_CONTESTED))) { |
553 mtx_unlock_spin(&sched_lock); | 433 turnstile_release(&m->mtx_object); |
554#ifdef __i386__ 555 ia32_pause(); 556#endif 557 continue; 558 } 559 560#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 561 /* 562 * If the current owner of the lock is executing on another 563 * CPU, spin instead of blocking. 564 */ 565 owner = (struct thread *)(v & MTX_FLAGMASK); 566 if (m != &Giant && TD_IS_RUNNING(owner)) { | 434#ifdef __i386__ 435 ia32_pause(); 436#endif 437 continue; 438 } 439 440#if defined(SMP) && defined(ADAPTIVE_MUTEXES) 441 /* 442 * If the current owner of the lock is executing on another 443 * CPU, spin instead of blocking. 444 */ 445 owner = (struct thread *)(v & MTX_FLAGMASK); 446 if (m != &Giant && TD_IS_RUNNING(owner)) { |
567 mtx_unlock_spin(&sched_lock); | 447 turnstile_release(&m->mtx_object); |
568 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 569#ifdef __i386__ 570 ia32_pause(); 571#endif 572 } 573 continue; 574 } 575#endif /* SMP && ADAPTIVE_MUTEXES */ 576 577 /* 578 * We definitely must sleep for this lock. 579 */ 580 mtx_assert(m, MA_NOTOWNED); 581 | 448 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { 449#ifdef __i386__ 450 ia32_pause(); 451#endif 452 } 453 continue; 454 } 455#endif /* SMP && ADAPTIVE_MUTEXES */ 456 457 /* 458 * We definitely must sleep for this lock. 459 */ 460 mtx_assert(m, MA_NOTOWNED); 461 |
582#ifdef notyet 583 /* 584 * If we're borrowing an interrupted thread's VM context, we 585 * must clean up before going to sleep. 586 */ 587 if (td->td_ithd != NULL) { 588 struct ithd *it = td->td_ithd; 589 590 if (it->it_interrupted) { 591 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 592 CTR2(KTR_LOCK, 593 "_mtx_lock_sleep: %p interrupted %p", 594 it, it->it_interrupted); 595 intr_thd_fixup(it); 596 } 597 } 598#endif 599 600 /* 601 * Put us on the list of threads blocked on this mutex 602 * and add this mutex to the owning thread's list of 603 * contested mutexes if needed. 604 */ 605 if (TAILQ_EMPTY(&m->mtx_blocked)) { 606 td1 = mtx_owner(m); 607 LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 608 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 609 } else { 610 TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) 611 if (td1->td_priority > td->td_priority) 612 break; 613 if (td1) 614 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 615 else 616 TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq); 617 } | |
618#ifdef KTR 619 if (!cont_logged) { 620 CTR6(KTR_CONTENTION, 621 "contention: %p at %s:%d wants %s, taken by %s:%d", 622 td, file, line, m->mtx_object.lo_name, 623 WITNESS_FILE(&m->mtx_object), 624 WITNESS_LINE(&m->mtx_object)); 625 cont_logged = 1; 626 } 627#endif 628 629 /* | 462#ifdef KTR 463 if (!cont_logged) { 464 CTR6(KTR_CONTENTION, 465 "contention: %p at %s:%d wants %s, taken by %s:%d", 466 td, file, line, m->mtx_object.lo_name, 467 WITNESS_FILE(&m->mtx_object), 468 WITNESS_LINE(&m->mtx_object)); 469 cont_logged = 1; 470 } 471#endif 472 473 /* |
630 * Save who we're blocked on. | 474 * Block on the turnstile. |
631 */ | 475 */ |
632 td->td_blocked = m; 633 td->td_lockname = m->mtx_object.lo_name; 634 TD_SET_LOCK(td); 635 propagate_priority(td); 636 637 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 638 CTR3(KTR_LOCK, 639 "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 640 m->mtx_object.lo_name); 641 642 td->td_proc->p_stats->p_ru.ru_nvcsw++; 643 mi_switch(); 644 645 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 646 CTR3(KTR_LOCK, 647 "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 648 td, m, m->mtx_object.lo_name); 649 650 mtx_unlock_spin(&sched_lock); | 476 turnstile_wait(ts, &m->mtx_object, mtx_owner(m)); |
651 } 652 653#ifdef KTR 654 if (cont_logged) { 655 CTR4(KTR_CONTENTION, 656 "contention end: %s acquired by %p at %s:%d", 657 m->mtx_object.lo_name, td, file, line); 658 } --- 60 unchanged lines hidden (view full) --- 719 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 720 * 721 * We are only called here if the lock is recursed or contested (i.e. we 722 * need to wake up a blocked thread). 723 */ 724void 725_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 726{ | 477 } 478 479#ifdef KTR 480 if (cont_logged) { 481 CTR4(KTR_CONTENTION, 482 "contention end: %s acquired by %p at %s:%d", 483 m->mtx_object.lo_name, td, file, line); 484 } --- 60 unchanged lines hidden (view full) --- 545 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 546 * 547 * We are only called here if the lock is recursed or contested (i.e. we 548 * need to wake up a blocked thread). 549 */ 550void 551_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 552{ |
553 struct turnstile *ts; |
|
727 struct thread *td, *td1; | 554 struct thread *td, *td1; |
728 struct mtx *m1; 729 int pri; | |
730 | 555 |
731 td = curthread; 732 | |
733 if (mtx_recursed(m)) { 734 if (--(m->mtx_recurse) == 0) 735 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 736 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 737 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 738 return; 739 } 740 | 556 if (mtx_recursed(m)) { 557 if (--(m->mtx_recurse) == 0) 558 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 559 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 560 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 561 return; 562 } 563 |
741 mtx_lock_spin(&sched_lock); | 564 ts = turnstile_lookup(&m->mtx_object); |
742 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 743 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 744 | 565 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 566 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 567 |
745 td1 = TAILQ_FIRST(&m->mtx_blocked); | |
746#if defined(SMP) && defined(ADAPTIVE_MUTEXES) | 568#if defined(SMP) && defined(ADAPTIVE_MUTEXES) |
747 if (td1 == NULL) { | 569 if (ts == NULL) { |
748 _release_lock_quick(m); 749 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 750 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); | 570 _release_lock_quick(m); 571 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 572 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); |
751 mtx_unlock_spin(&sched_lock); | 573 turnstile_release(&m->mtx_object); |
752 return; 753 } | 574 return; 575 } |
576#else 577 MPASS(ts != NULL); |
|
754#endif | 578#endif |
755 MPASS(td->td_proc->p_magic == P_MAGIC); 756 MPASS(td1->td_proc->p_magic == P_MAGIC); 757 758 TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq); 759 760 LIST_REMOVE(m, mtx_contested); 761 if (TAILQ_EMPTY(&m->mtx_blocked)) { | 579 /* XXX */ 580 td1 = turnstile_head(ts); 581 if (turnstile_signal(ts)) { |
762 _release_lock_quick(m); 763 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 764 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); | 582 _release_lock_quick(m); 583 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 584 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); |
765 } else | 585 } else { |
766 m->mtx_lock = MTX_CONTESTED; | 586 m->mtx_lock = MTX_CONTESTED; |
767 768 pri = PRI_MAX; 769 LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 770 int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority; 771 if (cp < pri) 772 pri = cp; | 587 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 588 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested", 589 m); |
773 } | 590 } |
591 turnstile_unpend(ts); |
|
774 | 592 |
775 if (pri > td->td_base_pri) 776 pri = td->td_base_pri; 777 td->td_priority = pri; 778 779 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 780 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 781 m, td1); 782 783 td1->td_blocked = NULL; 784 TD_CLR_LOCK(td1); 785 if (!TD_CAN_RUN(td1)) { 786 mtx_unlock_spin(&sched_lock); | 593 /* 594 * XXX: This is just a hack until preemption is done. However, 595 * once preemption is done we need to either wrap the 596 * turnstile_signal() and release of the actual lock in an 597 * extra critical section or change the preemption code to 598 * always just set a flag and never do instant-preempts. 599 */ 600 td = curthread; 601 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority) |
787 return; | 602 return; |
788 } 789 setrunqueue(td1); 790 791 if (td->td_critnest == 1 && td1->td_priority < pri) { | 603 mtx_lock_spin(&sched_lock); 604 if (!TD_IS_RUNNING(td1)) { |
792#ifdef notyet 793 if (td->td_ithd != NULL) { 794 struct ithd *it = td->td_ithd; 795 796 if (it->it_interrupted) { 797 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 798 CTR2(KTR_LOCK, 799 "_mtx_unlock_sleep: %p interrupted %p", --- 8 unchanged lines hidden (view full) --- 808 (void *)m->mtx_lock); 809 810 td->td_proc->p_stats->p_ru.ru_nivcsw++; 811 mi_switch(); 812 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 813 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 814 m, (void *)m->mtx_lock); 815 } | 605#ifdef notyet 606 if (td->td_ithd != NULL) { 607 struct ithd *it = td->td_ithd; 608 609 if (it->it_interrupted) { 610 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 611 CTR2(KTR_LOCK, 612 "_mtx_unlock_sleep: %p interrupted %p", --- 8 unchanged lines hidden (view full) --- 621 (void *)m->mtx_lock); 622 623 td->td_proc->p_stats->p_ru.ru_nivcsw++; 624 mi_switch(); 625 if (LOCK_LOG_TEST(&m->mtx_object, opts)) 626 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 627 m, (void *)m->mtx_lock); 628 } |
816 | |
817 mtx_unlock_spin(&sched_lock); 818 819 return; 820} 821 822/* 823 * All the unlocking of MTX_SPIN locks is done inline. 824 * See the _rel_spin_lock() macro for the details. --- 118 unchanged lines hidden (view full) --- 943 if (opts & MTX_RECURSE) 944 lock->lo_flags |= LO_RECURSABLE; 945 if ((opts & MTX_NOWITNESS) == 0) 946 lock->lo_flags |= LO_WITNESS; 947 if (opts & MTX_DUPOK) 948 lock->lo_flags |= LO_DUPOK; 949 950 m->mtx_lock = MTX_UNOWNED; | 629 mtx_unlock_spin(&sched_lock); 630 631 return; 632} 633 634/* 635 * All the unlocking of MTX_SPIN locks is done inline. 636 * See the _rel_spin_lock() macro for the details. --- 118 unchanged lines hidden (view full) --- 755 if (opts & MTX_RECURSE) 756 lock->lo_flags |= LO_RECURSABLE; 757 if ((opts & MTX_NOWITNESS) == 0) 758 lock->lo_flags |= LO_WITNESS; 759 if (opts & MTX_DUPOK) 760 lock->lo_flags |= LO_DUPOK; 761 762 m->mtx_lock = MTX_UNOWNED; |
951 TAILQ_INIT(&m->mtx_blocked); | |
952 953 LOCK_LOG_INIT(lock, opts); 954 955 WITNESS_INIT(lock); 956} 957 958/* 959 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be --- 27 unchanged lines hidden (view full) --- 987 */ 988void 989mutex_init(void) 990{ 991 992 /* Setup thread0 so that mutexes work. */ 993 LIST_INIT(&thread0.td_contested); 994 | 763 764 LOCK_LOG_INIT(lock, opts); 765 766 WITNESS_INIT(lock); 767} 768 769/* 770 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be --- 27 unchanged lines hidden (view full) --- 798 */ 799void 800mutex_init(void) 801{ 802 803 /* Setup thread0 so that mutexes work. */ 804 LIST_INIT(&thread0.td_contested); 805 |
806 /* Setup turnstiles so that sleep mutexes work. */ 807 init_turnstiles(); 808 |
|
995 /* 996 * Initialize mutexes. 997 */ 998 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 999 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 1000 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 1001 mtx_lock(&Giant); 1002} | 809 /* 810 * Initialize mutexes. 811 */ 812 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); 813 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 814 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); 815 mtx_lock(&Giant); 816} |