kern_mutex.c revision 83366
1100966Siwasaki/*- 2100966Siwasaki * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3100966Siwasaki * 4100966Siwasaki * Redistribution and use in source and binary forms, with or without 5100966Siwasaki * modification, are permitted provided that the following conditions 6100966Siwasaki * are met: 7100966Siwasaki * 1. Redistributions of source code must retain the above copyright 8100966Siwasaki * notice, this list of conditions and the following disclaimer. 9100966Siwasaki * 2. Redistributions in binary form must reproduce the above copyright 10100966Siwasaki * notice, this list of conditions and the following disclaimer in the 11193267Sjkim * documentation and/or other materials provided with the distribution. 12100966Siwasaki * 3. Berkeley Software Design Inc's name may not be used to endorse or 13100966Siwasaki * promote products derived from this software without specific prior 14100966Siwasaki * written permission. 15100966Siwasaki * 16100966Siwasaki * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17100966Siwasaki * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18100966Siwasaki * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19100966Siwasaki * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20100966Siwasaki * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21100966Siwasaki * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22100966Siwasaki * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23100966Siwasaki * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24100966Siwasaki * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25100966Siwasaki * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26100966Siwasaki * SUCH DAMAGE. 27100966Siwasaki * 28100966Siwasaki * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29100966Siwasaki * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30100966Siwasaki * $FreeBSD: head/sys/kern/kern_mutex.c 83366 2001-09-12 08:38:13Z julian $ 31100966Siwasaki */ 32100966Siwasaki 33100966Siwasaki/* 34100966Siwasaki * Machine independent bits of mutex implementation and implementation of 35100966Siwasaki * `witness' structure & related debugging routines. 36100966Siwasaki */ 37100966Siwasaki 38100966Siwasaki/* 39100966Siwasaki * Main Entry: witness 40100966Siwasaki * Pronunciation: 'wit-n&s 41100966Siwasaki * Function: noun 42100966Siwasaki * Etymology: Middle English witnesse, from Old English witnes knowledge, 43100966Siwasaki * testimony, witness, from 2wit 44100966Siwasaki * Date: before 12th century 45100966Siwasaki * 1 : attestation of a fact or event : TESTIMONY 46100966Siwasaki * 2 : one that gives evidence; specifically : one who testifies in 47100966Siwasaki * a cause or before a judicial tribunal 48100966Siwasaki * 3 : one asked to be present at a transaction so as to be able to 49100966Siwasaki * testify to its having taken place 50100966Siwasaki * 4 : one who has personal knowledge of something 51100966Siwasaki * 5 a : something serving as evidence or proof : SIGN 52100966Siwasaki * b : public affirmation by word or example of usually 53100966Siwasaki * religious faith or conviction <the heroic witness to divine 54100966Siwasaki * life -- Pilot> 55100966Siwasaki * 6 capitalized : a member of the Jehovah's Witnesses 56100966Siwasaki */ 57100966Siwasaki 58100966Siwasaki#include "opt_ddb.h" 59100966Siwasaki 60100966Siwasaki#include <sys/param.h> 61100966Siwasaki#include <sys/bus.h> 62100966Siwasaki#include <sys/kernel.h> 63100966Siwasaki#include <sys/lock.h> 64100966Siwasaki#include <sys/malloc.h> 65100966Siwasaki#include <sys/mutex.h> 66100966Siwasaki#include <sys/proc.h> 67100966Siwasaki#include <sys/resourcevar.h> 68100966Siwasaki#include <sys/sysctl.h> 69100966Siwasaki#include <sys/systm.h> 70100966Siwasaki#include <sys/vmmeter.h> 71100966Siwasaki#include <sys/ktr.h> 72100966Siwasaki 73100966Siwasaki#include <machine/atomic.h> 74100966Siwasaki#include <machine/bus.h> 75100966Siwasaki#include <machine/clock.h> 76100966Siwasaki#include <machine/cpu.h> 77100966Siwasaki 78100966Siwasaki#include <ddb/ddb.h> 79100966Siwasaki 80100966Siwasaki#include <vm/vm.h> 81100966Siwasaki#include <vm/vm_extern.h> 82100966Siwasaki 83100966Siwasaki/* 84100966Siwasaki * Internal utility macros. 85100966Siwasaki */ 86100966Siwasaki#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) 87100966Siwasaki 88100966Siwasaki#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ 89100966Siwasaki : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK)) 90100966Siwasaki 91100966Siwasaki#define SET_PRIO(td, pri) (td)->td_ksegrp->kg_pri.pri_level = (pri) 92100966Siwasaki 93100966Siwasaki/* 94100966Siwasaki * Lock classes for sleep and spin mutexes. 95100966Siwasaki */ 96100966Siwasakistruct lock_class lock_class_mtx_sleep = { 97100966Siwasaki "sleep mutex", 98100966Siwasaki LC_SLEEPLOCK | LC_RECURSABLE 99100966Siwasaki}; 100100966Siwasakistruct lock_class lock_class_mtx_spin = { 101100966Siwasaki "spin mutex", 102100966Siwasaki LC_SPINLOCK | LC_RECURSABLE 103100966Siwasaki}; 104100966Siwasaki 105100966Siwasaki/* 106100966Siwasaki * Prototypes for non-exported routines. 107100966Siwasaki */ 108100966Siwasakistatic void propagate_priority(struct thread *); 109100966Siwasaki 110100966Siwasakistatic void 111100966Siwasakipropagate_priority(struct thread *td) 112100966Siwasaki{ 113100966Siwasaki struct ksegrp *kg = td->td_ksegrp; 114100966Siwasaki int pri = kg->kg_pri.pri_level; 115100966Siwasaki struct mtx *m = td->td_blocked; 116100966Siwasaki 117193341Sjkim mtx_assert(&sched_lock, MA_OWNED); 118193341Sjkim for (;;) { 119193341Sjkim struct thread *td1; 120193341Sjkim 121193341Sjkim td = mtx_owner(m); 122193341Sjkim 123100966Siwasaki if (td == NULL) { 124100966Siwasaki /* 125100966Siwasaki * This really isn't quite right. Really 126100966Siwasaki * ought to bump priority of thread that 127102550Siwasaki * next acquires the mutex. 128100966Siwasaki */ 129100966Siwasaki MPASS(m->mtx_lock == MTX_CONTESTED); 130100966Siwasaki return; 131151937Sjkim } 132100966Siwasaki 133151937Sjkim MPASS(td->td_proc->p_magic == P_MAGIC); 134100966Siwasaki KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex")); 135151937Sjkim if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */ 136151937Sjkim return; 137151937Sjkim 138151937Sjkim /* 139151937Sjkim * Bump this thread's priority. 140151937Sjkim */ 141151937Sjkim SET_PRIO(td, pri); 142151937Sjkim 143151937Sjkim /* 144151937Sjkim * If lock holder is actually running, just bump priority. 145151937Sjkim */ 146151937Sjkim /* XXXKSE this test is not sufficient */ 147151937Sjkim if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) { 148151937Sjkim MPASS(td->td_proc->p_stat == SRUN 149151937Sjkim || td->td_proc->p_stat == SZOMB 150151937Sjkim || td->td_proc->p_stat == SSTOP); 151193267Sjkim return; 152193267Sjkim } 153193267Sjkim 154151937Sjkim#ifndef SMP 155193267Sjkim /* 156193267Sjkim * For UP, we check to see if td is curthread (this shouldn't 157193267Sjkim * ever happen however as it would mean we are in a deadlock.) 158193267Sjkim */ 159193267Sjkim KASSERT(td != curthread, ("Deadlock detected")); 160193267Sjkim#endif 161193267Sjkim 162193267Sjkim /* 163193267Sjkim * If on run queue move to new run queue, and quit. 164193267Sjkim * XXXKSE this gets a lot more complicated under threads 165193267Sjkim * but try anyhow. 166193267Sjkim */ 167193267Sjkim if (td->td_proc->p_stat == SRUN) { 168193267Sjkim MPASS(td->td_blocked == NULL); 169193267Sjkim remrunqueue(td); 170193267Sjkim setrunqueue(td); 171193267Sjkim return; 172193267Sjkim } 173193267Sjkim 174193267Sjkim /* 175193267Sjkim * If we aren't blocked on a mutex, we should be. 176193267Sjkim */ 177193267Sjkim KASSERT(td->td_proc->p_stat == SMTX, ( 178193267Sjkim "process %d(%s):%d holds %s but isn't blocked on a mutex\n", 179193267Sjkim td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat, 180193267Sjkim m->mtx_object.lo_name)); 181193267Sjkim 182193267Sjkim /* 183193267Sjkim * Pick up the mutex that td is blocked on. 184100966Siwasaki */ 185100966Siwasaki m = td->td_blocked; 186193267Sjkim MPASS(m != NULL); 187193267Sjkim 188193267Sjkim /* 189193267Sjkim * Check if the thread needs to be moved up on 190193267Sjkim * the blocked chain 191193267Sjkim */ 192193267Sjkim if (td == TAILQ_FIRST(&m->mtx_blocked)) { 193193267Sjkim continue; 194193267Sjkim } 195193267Sjkim 196193267Sjkim td1 = TAILQ_PREV(td, threadqueue, td_blkq); 197193267Sjkim if (td1->td_ksegrp->kg_pri.pri_level <= pri) { 198193267Sjkim continue; 199193267Sjkim } 200193267Sjkim 201193267Sjkim /* 202193267Sjkim * Remove thread from blocked chain and determine where 203193267Sjkim * it should be moved up to. Since we know that td1 has 204193267Sjkim * a lower priority than td, we know that at least one 205193267Sjkim * thread in the chain has a lower priority and that 206193267Sjkim * td1 will thus not be NULL after the loop. 207193267Sjkim */ 208193267Sjkim TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq); 209193267Sjkim TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) { 210193267Sjkim MPASS(td1->td_proc->p_magic == P_MAGIC); 211193267Sjkim if (td1->td_ksegrp->kg_pri.pri_level > pri) 212193267Sjkim break; 213193267Sjkim } 214193267Sjkim 215193267Sjkim MPASS(td1 != NULL); 216193267Sjkim TAILQ_INSERT_BEFORE(td1, td, td_blkq); 217100966Siwasaki CTR4(KTR_LOCK, 218100966Siwasaki "propagate_priority: p %p moved before %p on [%p] %s", 219151937Sjkim td, td1, m, m->mtx_object.lo_name); 220151937Sjkim } 221100966Siwasaki} 222100966Siwasaki 223100966Siwasaki/* 224100966Siwasaki * Function versions of the inlined __mtx_* macros. These are used by 225100966Siwasaki * modules and can also be called from assembly language if needed. 226100966Siwasaki */ 227100966Siwasakivoid 228100966Siwasaki_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) 229100966Siwasaki{ 230100966Siwasaki 231100966Siwasaki __mtx_lock_flags(m, opts, file, line); 232100966Siwasaki} 233100966Siwasaki 234100966Siwasakivoid 235100966Siwasaki_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) 236100966Siwasaki{ 237100966Siwasaki 238100966Siwasaki __mtx_unlock_flags(m, opts, file, line); 239100966Siwasaki} 240100966Siwasaki 241100966Siwasakivoid 242100966Siwasaki_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) 243100966Siwasaki{ 244100966Siwasaki 245167802Sjkim __mtx_lock_spin_flags(m, opts, file, line); 246100966Siwasaki} 247167802Sjkim 248151937Sjkimvoid 249100966Siwasaki_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) 250100966Siwasaki{ 251100966Siwasaki 252100966Siwasaki __mtx_unlock_spin_flags(m, opts, file, line); 253100966Siwasaki} 254100966Siwasaki 255100966Siwasaki/* 256100966Siwasaki * The important part of mtx_trylock{,_flags}() 257100966Siwasaki * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that 258151937Sjkim * if we're called, it's because we know we don't already own this lock. 259151937Sjkim */ 260100966Siwasakiint 261100966Siwasaki_mtx_trylock(struct mtx *m, int opts, const char *file, int line) 262100966Siwasaki{ 263100966Siwasaki int rval; 264100966Siwasaki 265100966Siwasaki MPASS(curthread != NULL); 266100966Siwasaki 267100966Siwasaki /* 268100966Siwasaki * _mtx_trylock does not accept MTX_NOSWITCH option. 269167802Sjkim */ 270100966Siwasaki KASSERT((opts & MTX_NOSWITCH) == 0, 271100966Siwasaki ("mtx_trylock() called with invalid option flag(s) %d", opts)); 272100966Siwasaki 273100966Siwasaki rval = _obtain_lock(m, curthread); 274100966Siwasaki 275100966Siwasaki LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line); 276100966Siwasaki if (rval) { 277100966Siwasaki /* 278100966Siwasaki * We do not handle recursion in _mtx_trylock; see the 279100966Siwasaki * note at the top of the routine. 280100966Siwasaki */ 281100966Siwasaki KASSERT(!mtx_recursed(m), 282100966Siwasaki ("mtx_trylock() called on a recursed mutex")); 283100966Siwasaki WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, 284100966Siwasaki file, line); 285100966Siwasaki } 286100966Siwasaki 287100966Siwasaki return (rval); 288100966Siwasaki} 289100966Siwasaki 290167802Sjkim/* 291100966Siwasaki * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. 292167802Sjkim * 293167802Sjkim * We call this if the lock is either contested (i.e. we need to go to 294167802Sjkim * sleep waiting for it), or if we need to recurse on it. 295167802Sjkim */ 296167802Sjkimvoid 297100966Siwasaki_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) 298100966Siwasaki{ 299100966Siwasaki struct thread *td = curthread; 300100966Siwasaki struct ksegrp *kg = td->td_ksegrp; 301117521Snjl 302117521Snjl if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) { 303100966Siwasaki m->mtx_recurse++; 304100966Siwasaki atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); 305100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 306100966Siwasaki CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); 307100966Siwasaki return; 308100966Siwasaki } 309100966Siwasaki 310100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 311100966Siwasaki CTR4(KTR_LOCK, 312100966Siwasaki "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", 313100966Siwasaki m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); 314100966Siwasaki 315100966Siwasaki while (!_obtain_lock(m, td)) { 316100966Siwasaki uintptr_t v; 317100966Siwasaki struct thread *td1; 318100966Siwasaki 319100966Siwasaki mtx_lock_spin(&sched_lock); 320100966Siwasaki /* 321100966Siwasaki * Check if the lock has been released while spinning for 322100966Siwasaki * the sched_lock. 323100966Siwasaki */ 324100966Siwasaki if ((v = m->mtx_lock) == MTX_UNOWNED) { 325100966Siwasaki mtx_unlock_spin(&sched_lock); 326100966Siwasaki continue; 327100966Siwasaki } 328100966Siwasaki 329100966Siwasaki /* 330100966Siwasaki * The mutex was marked contested on release. This means that 331100966Siwasaki * there are threads blocked on it. 332100966Siwasaki */ 333100966Siwasaki if (v == MTX_CONTESTED) { 334100966Siwasaki td1 = TAILQ_FIRST(&m->mtx_blocked); 335100966Siwasaki MPASS(td1 != NULL); 336100966Siwasaki m->mtx_lock = (uintptr_t)td | MTX_CONTESTED; 337100966Siwasaki 338100966Siwasaki if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level) 339100966Siwasaki SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level); 340100966Siwasaki mtx_unlock_spin(&sched_lock); 341100966Siwasaki return; 342100966Siwasaki } 343100966Siwasaki 344100966Siwasaki /* 345100966Siwasaki * If the mutex isn't already contested and a failure occurs 346100966Siwasaki * setting the contested bit, the mutex was either released 347100966Siwasaki * or the state of the MTX_RECURSED bit changed. 348100966Siwasaki */ 349100966Siwasaki if ((v & MTX_CONTESTED) == 0 && 350100966Siwasaki !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, 351100966Siwasaki (void *)(v | MTX_CONTESTED))) { 352100966Siwasaki mtx_unlock_spin(&sched_lock); 353100966Siwasaki continue; 354100966Siwasaki } 355100966Siwasaki 356100966Siwasaki /* 357100966Siwasaki * We deffinately must sleep for this lock. 358100966Siwasaki */ 359100966Siwasaki mtx_assert(m, MA_NOTOWNED); 360100966Siwasaki 361100966Siwasaki#ifdef notyet 362100966Siwasaki /* 363100966Siwasaki * If we're borrowing an interrupted thread's VM context, we 364100966Siwasaki * must clean up before going to sleep. 365100966Siwasaki */ 366100966Siwasaki if (td->td_ithd != NULL) { 367100966Siwasaki struct ithd *it = td->td_ithd; 368100966Siwasaki 369100966Siwasaki if (it->it_interrupted) { 370100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 371100966Siwasaki CTR2(KTR_LOCK, 372151937Sjkim "_mtx_lock_sleep: %p interrupted %p", 373100966Siwasaki it, it->it_interrupted); 374100966Siwasaki intr_thd_fixup(it); 375100966Siwasaki } 376100966Siwasaki } 377100966Siwasaki#endif 378151937Sjkim 379100966Siwasaki /* 380100966Siwasaki * Put us on the list of threads blocked on this mutex. 381100966Siwasaki */ 382100966Siwasaki if (TAILQ_EMPTY(&m->mtx_blocked)) { 383100966Siwasaki td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK); 384100966Siwasaki LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested); 385100966Siwasaki TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 386100966Siwasaki } else { 387100966Siwasaki TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) 388100966Siwasaki if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level) 389100966Siwasaki break; 390100966Siwasaki if (td1) 391100966Siwasaki TAILQ_INSERT_BEFORE(td1, td, td_blkq); 392100966Siwasaki else 393100966Siwasaki TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq); 394100966Siwasaki } 395100966Siwasaki 396100966Siwasaki /* 397100966Siwasaki * Save who we're blocked on. 398100966Siwasaki */ 399100966Siwasaki td->td_blocked = m; 400100966Siwasaki td->td_mtxname = m->mtx_object.lo_name; 401100966Siwasaki td->td_proc->p_stat = SMTX; 402100966Siwasaki propagate_priority(td); 403100966Siwasaki 404100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 405100966Siwasaki CTR3(KTR_LOCK, 406100966Siwasaki "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m, 407100966Siwasaki m->mtx_object.lo_name); 408100966Siwasaki 409100966Siwasaki td->td_proc->p_stats->p_ru.ru_nvcsw++; 410100966Siwasaki mi_switch(); 411100966Siwasaki 412100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 413100966Siwasaki CTR3(KTR_LOCK, 414100966Siwasaki "_mtx_lock_sleep: p %p free from blocked on [%p] %s", 415100966Siwasaki td, m, m->mtx_object.lo_name); 416100966Siwasaki 417100966Siwasaki mtx_unlock_spin(&sched_lock); 418100966Siwasaki } 419100966Siwasaki 420100966Siwasaki return; 421100966Siwasaki} 422100966Siwasaki 423100966Siwasaki/* 424100966Siwasaki * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. 425100966Siwasaki * 426100966Siwasaki * This is only called if we need to actually spin for the lock. Recursion 427100966Siwasaki * is handled inline. 428100966Siwasaki */ 429100966Siwasakivoid 430100966Siwasaki_mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file, 431100966Siwasaki int line) 432100966Siwasaki{ 433100966Siwasaki int i = 0; 434100966Siwasaki 435100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 436100966Siwasaki CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); 437100966Siwasaki 438100966Siwasaki for (;;) { 439100966Siwasaki if (_obtain_lock(m, curthread)) 440100966Siwasaki break; 441100966Siwasaki 442100966Siwasaki /* Give interrupts a chance while we spin. */ 443100966Siwasaki critical_exit(mtx_crit); 444100966Siwasaki while (m->mtx_lock != MTX_UNOWNED) { 445100966Siwasaki if (i++ < 1000000) 446100966Siwasaki continue; 447151937Sjkim if (i++ < 6000000) 448100966Siwasaki DELAY(1); 449100966Siwasaki#ifdef DDB 450100966Siwasaki else if (!db_active) 451100966Siwasaki#else 452100966Siwasaki else 453100966Siwasaki#endif 454100966Siwasaki panic("spin lock %s held by %p for > 5 seconds", 455100966Siwasaki m->mtx_object.lo_name, (void *)m->mtx_lock); 456100966Siwasaki } 457100966Siwasaki mtx_crit = critical_enter(); 458100966Siwasaki } 459100966Siwasaki 460100966Siwasaki m->mtx_savecrit = mtx_crit; 461100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 462100966Siwasaki CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); 463100966Siwasaki 464100966Siwasaki return; 465100966Siwasaki} 466100966Siwasaki 467100966Siwasaki/* 468100966Siwasaki * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. 469100966Siwasaki * 470100966Siwasaki * We are only called here if the lock is recursed or contested (i.e. we 471100966Siwasaki * need to wake up a blocked thread). 472100966Siwasaki */ 473100966Siwasakivoid 474100966Siwasaki_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) 475100966Siwasaki{ 476100966Siwasaki struct thread *td, *td1; 477100966Siwasaki struct mtx *m1; 478100966Siwasaki int pri; 479100966Siwasaki struct ksegrp *kg; 480100966Siwasaki 481151937Sjkim td = curthread; 482100966Siwasaki kg = td->td_ksegrp; 483100966Siwasaki 484100966Siwasaki if (mtx_recursed(m)) { 485100966Siwasaki if (--(m->mtx_recurse) == 0) 486100966Siwasaki atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); 487100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 488100966Siwasaki CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); 489100966Siwasaki return; 490100966Siwasaki } 491100966Siwasaki 492100966Siwasaki mtx_lock_spin(&sched_lock); 493100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 494100966Siwasaki CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); 495100966Siwasaki 496100966Siwasaki td1 = TAILQ_FIRST(&m->mtx_blocked); 497100966Siwasaki MPASS(td->td_proc->p_magic == P_MAGIC); 498100966Siwasaki MPASS(td1->td_proc->p_magic == P_MAGIC); 499100966Siwasaki 500100966Siwasaki TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq); 501100966Siwasaki 502100966Siwasaki if (TAILQ_EMPTY(&m->mtx_blocked)) { 503100966Siwasaki LIST_REMOVE(m, mtx_contested); 504100966Siwasaki _release_lock_quick(m); 505100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 506100966Siwasaki CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); 507100966Siwasaki } else 508100966Siwasaki atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); 509100966Siwasaki 510102550Siwasaki pri = PRI_MAX; 511100966Siwasaki LIST_FOREACH(m1, &td->td_contested, mtx_contested) { 512100966Siwasaki int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level; 513100966Siwasaki if (cp < pri) 514100966Siwasaki pri = cp; 515151937Sjkim } 516100966Siwasaki 517100966Siwasaki if (pri > kg->kg_pri.pri_native) 518100966Siwasaki pri = kg->kg_pri.pri_native; 519100966Siwasaki SET_PRIO(td, pri); 520100966Siwasaki 521100966Siwasaki if (LOCK_LOG_TEST(&m->mtx_object, opts)) 522100966Siwasaki CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", 523100966Siwasaki m, td1); 524100966Siwasaki 525128212Snjl td1->td_blocked = NULL; 526100966Siwasaki td1->td_proc->p_stat = SRUN; 527100966Siwasaki setrunqueue(td1); 528100966Siwasaki 529100966Siwasaki if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) { 530100966Siwasaki#ifdef notyet 531100966Siwasaki if (td->td_ithd != NULL) { 532100966Siwasaki struct ithd *it = td->td_ithd; 533100966Siwasaki 534100966Siwasaki if (it->it_interrupted) { 535128212Snjl if (LOCK_LOG_TEST(&m->mtx_object, opts)) 536100966Siwasaki CTR2(KTR_LOCK, 537100966Siwasaki "_mtx_unlock_sleep: %p interrupted %p", 538100966Siwasaki it, it->it_interrupted); 539100966Siwasaki intr_thd_fixup(it); 540100966Siwasaki } 541151937Sjkim } 542151937Sjkim#endif 543151937Sjkim setrunqueue(td); 544151937Sjkim if (LOCK_LOG_TEST(&m->mtx_object, opts)) 545151937Sjkim CTR2(KTR_LOCK, 546151937Sjkim "_mtx_unlock_sleep: %p switching out lock=%p", m, 547151937Sjkim (void *)m->mtx_lock); 548100966Siwasaki 549100966Siwasaki td->td_proc->p_stats->p_ru.ru_nivcsw++; 550100966Siwasaki mi_switch(); 551128212Snjl if (LOCK_LOG_TEST(&m->mtx_object, opts)) 552128212Snjl CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", 553100966Siwasaki m, (void *)m->mtx_lock); 554128212Snjl } 555128212Snjl 556128212Snjl mtx_unlock_spin(&sched_lock); 557128212Snjl 558128212Snjl return; 559151937Sjkim} 560151937Sjkim 561151937Sjkim/* 562151937Sjkim * All the unlocking of MTX_SPIN locks is done inline. 563128212Snjl * See the _rel_spin_lock() macro for the details. 564128212Snjl */ 565193267Sjkim 566193267Sjkim/* 567193267Sjkim * The backing function for the INVARIANTS-enabled mtx_assert() 568128212Snjl */ 569167802Sjkim#ifdef INVARIANT_SUPPORT 570167802Sjkimvoid 571167802Sjkim_mtx_assert(struct mtx *m, int what, const char *file, int line) 572167802Sjkim{ 573167802Sjkim 574167802Sjkim if (panicstr != NULL) 575167802Sjkim return; 576167802Sjkim switch (what) { 577167802Sjkim case MA_OWNED: 578128212Snjl case MA_OWNED | MA_RECURSED: 579167802Sjkim case MA_OWNED | MA_NOTRECURSED: 580167802Sjkim if (!mtx_owned(m)) 581129684Snjl panic("mutex %s not owned at %s:%d", 582128212Snjl m->mtx_object.lo_name, file, line); 583128212Snjl if (mtx_recursed(m)) { 584128212Snjl if ((what & MA_NOTRECURSED) != 0) 585128212Snjl panic("mutex %s recursed at %s:%d", 586100966Siwasaki m->mtx_object.lo_name, file, line); 587100966Siwasaki } else if ((what & MA_RECURSED) != 0) { 588100966Siwasaki panic("mutex %s unrecursed at %s:%d", 589100966Siwasaki m->mtx_object.lo_name, file, line); 590100966Siwasaki } 591100966Siwasaki break; 592100966Siwasaki case MA_NOTOWNED: 593151937Sjkim if (mtx_owned(m)) 594151937Sjkim panic("mutex %s owned at %s:%d", 595151937Sjkim m->mtx_object.lo_name, file, line); 596151937Sjkim break; 597100966Siwasaki default: 598167802Sjkim panic("unknown mtx_assert at %s:%d", file, line); 599167802Sjkim } 600100966Siwasaki} 601100966Siwasaki#endif 602167802Sjkim 603167802Sjkim/* 604167802Sjkim * The MUTEX_DEBUG-enabled mtx_validate() 605167802Sjkim * 606167802Sjkim * Most of these checks have been moved off into the LO_INITIALIZED flag 607167802Sjkim * maintained by the witness code. 608167802Sjkim */ 609167802Sjkim#ifdef MUTEX_DEBUG 610167802Sjkim 611167802Sjkimvoid mtx_validate __P((struct mtx *)); 612167802Sjkim 613167802Sjkimvoid 614167802Sjkimmtx_validate(struct mtx *m) 615167802Sjkim{ 616100966Siwasaki 617100966Siwasaki/* 618100966Siwasaki * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly 619100966Siwasaki * we can re-enable the kernacc() checks. 620167802Sjkim */ 621167802Sjkim#ifndef __alpha__ 622167802Sjkim /* 623167802Sjkim * Can't call kernacc() from early init386(), especially when 624167802Sjkim * initializing Giant mutex, because some stuff in kernacc() 625100966Siwasaki * requires Giant itself. 626100966Siwasaki */ 627100966Siwasaki if (!cold) 628100966Siwasaki if (!kernacc((caddr_t)m, sizeof(m), 629100966Siwasaki VM_PROT_READ | VM_PROT_WRITE)) 630100966Siwasaki panic("Can't read and write to mutex %p", m); 631117521Snjl#endif 632117521Snjl} 633100966Siwasaki#endif 634100966Siwasaki 635102550Siwasaki/* 636100966Siwasaki * Mutex initialization routine; initialize lock `m' of type contained in 637100966Siwasaki * `opts' with options contained in `opts' and description `description.' 638100966Siwasaki */ 639100966Siwasakivoid 640100966Siwasakimtx_init(struct mtx *m, const char *description, int opts) 641100966Siwasaki{ 642100966Siwasaki struct lock_object *lock; 643100966Siwasaki 644100966Siwasaki MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | 645117521Snjl MTX_SLEEPABLE | MTX_NOWITNESS)) == 0); 646117521Snjl 647100966Siwasaki#ifdef MUTEX_DEBUG 648100966Siwasaki /* Diagnostic and error correction */ 649100966Siwasaki mtx_validate(m); 650100966Siwasaki#endif 651100966Siwasaki 652100966Siwasaki bzero(m, sizeof(*m)); 653100966Siwasaki lock = &m->mtx_object; 654100966Siwasaki if (opts & MTX_SPIN) 655100966Siwasaki lock->lo_class = &lock_class_mtx_spin; 656100966Siwasaki else 657100966Siwasaki lock->lo_class = &lock_class_mtx_sleep; 658100966Siwasaki lock->lo_name = description; 659100966Siwasaki if (opts & MTX_QUIET) 660100966Siwasaki lock->lo_flags = LO_QUIET; 661100966Siwasaki if (opts & MTX_RECURSE) 662100966Siwasaki lock->lo_flags |= LO_RECURSABLE; 663100966Siwasaki if (opts & MTX_SLEEPABLE) 664100966Siwasaki lock->lo_flags |= LO_SLEEPABLE; 665100966Siwasaki if ((opts & MTX_NOWITNESS) == 0) 666100966Siwasaki lock->lo_flags |= LO_WITNESS; 667100966Siwasaki 668100966Siwasaki m->mtx_lock = MTX_UNOWNED; 669193267Sjkim TAILQ_INIT(&m->mtx_blocked); 670100966Siwasaki 671100966Siwasaki LOCK_LOG_INIT(lock, opts); 672100966Siwasaki 673100966Siwasaki WITNESS_INIT(lock); 674100966Siwasaki} 675100966Siwasaki 676100966Siwasaki/* 677100966Siwasaki * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be 678100966Siwasaki * passed in as a flag here because if the corresponding mtx_init() was 679100966Siwasaki * called with MTX_QUIET set, then it will already be set in the mutex's 680100966Siwasaki * flags. 681100966Siwasaki */ 682100966Siwasakivoid 683100966Siwasakimtx_destroy(struct mtx *m) 684100966Siwasaki{ 685100966Siwasaki 686100966Siwasaki LOCK_LOG_DESTROY(&m->mtx_object, 0); 687100966Siwasaki 688100966Siwasaki if (!mtx_owned(m)) 689100966Siwasaki MPASS(mtx_unowned(m)); 690100966Siwasaki else { 691100966Siwasaki MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); 692100966Siwasaki 693100966Siwasaki /* Tell witness this isn't locked to make it happy. */ 694100966Siwasaki WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH, 695151937Sjkim __FILE__, __LINE__); 696100966Siwasaki } 697100966Siwasaki 698100966Siwasaki WITNESS_DESTROY(&m->mtx_object); 699100966Siwasaki} 700100966Siwasaki