kern_rwlock.c revision 177843
1154941Sjhb/*- 2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3154941Sjhb * All rights reserved. 4154941Sjhb * 5154941Sjhb * Redistribution and use in source and binary forms, with or without 6154941Sjhb * modification, are permitted provided that the following conditions 7154941Sjhb * are met: 8154941Sjhb * 1. Redistributions of source code must retain the above copyright 9154941Sjhb * notice, this list of conditions and the following disclaimer. 10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright 11154941Sjhb * notice, this list of conditions and the following disclaimer in the 12154941Sjhb * documentation and/or other materials provided with the distribution. 13154941Sjhb * 3. Neither the name of the author nor the names of any co-contributors 14154941Sjhb * may be used to endorse or promote products derived from this software 15154941Sjhb * without specific prior written permission. 16154941Sjhb * 17154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20154941Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27154941Sjhb * SUCH DAMAGE. 28154941Sjhb */ 29154941Sjhb 30154941Sjhb/* 31154941Sjhb * Machine independent bits of reader/writer lock implementation. 32154941Sjhb */ 33154941Sjhb 34154941Sjhb#include <sys/cdefs.h> 35154941Sjhb__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 177843 2008-04-01 20:31:55Z attilio $"); 36154941Sjhb 37154941Sjhb#include "opt_ddb.h" 38167801Sjhb#include "opt_no_adaptive_rwlocks.h" 39154941Sjhb 40154941Sjhb#include <sys/param.h> 41154941Sjhb#include <sys/ktr.h> 42154941Sjhb#include <sys/lock.h> 43154941Sjhb#include <sys/mutex.h> 44154941Sjhb#include <sys/proc.h> 45154941Sjhb#include <sys/rwlock.h> 46154941Sjhb#include <sys/systm.h> 47154941Sjhb#include <sys/turnstile.h> 48171516Sattilio 49154941Sjhb#include <machine/cpu.h> 50154941Sjhb 51171052SattilioCTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); 52171052Sattilio 53167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 54167801Sjhb#define ADAPTIVE_RWLOCKS 55167801Sjhb#endif 56167801Sjhb 57154941Sjhb#ifdef DDB 58154941Sjhb#include <ddb/ddb.h> 59154941Sjhb 60154941Sjhbstatic void db_show_rwlock(struct lock_object *lock); 61154941Sjhb#endif 62173733Sattiliostatic void assert_rw(struct lock_object *lock, int what); 63167368Sjhbstatic void lock_rw(struct lock_object *lock, int how); 64167368Sjhbstatic int unlock_rw(struct lock_object *lock); 65154941Sjhb 66154941Sjhbstruct lock_class lock_class_rw = { 67167365Sjhb .lc_name = "rw", 68167365Sjhb .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 69173733Sattilio .lc_assert = assert_rw, 70154941Sjhb#ifdef DDB 71167365Sjhb .lc_ddb_show = db_show_rwlock, 72154941Sjhb#endif 73167368Sjhb .lc_lock = lock_rw, 74167368Sjhb .lc_unlock = unlock_rw, 75154941Sjhb}; 76154941Sjhb 77157826Sjhb/* 78157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or 79157826Sjhb * NULL if the lock is unlocked or read-locked. 80157826Sjhb */ 81157826Sjhb#define rw_wowner(rw) \ 82154941Sjhb ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 83154941Sjhb (struct thread *)RW_OWNER((rw)->rw_lock)) 84154941Sjhb 85157826Sjhb/* 86171052Sattilio * Returns if a write owner is recursed. Write ownership is not assured 87171052Sattilio * here and should be previously checked. 88171052Sattilio */ 89171052Sattilio#define rw_recursed(rw) ((rw)->rw_recurse != 0) 90171052Sattilio 91171052Sattilio/* 92171052Sattilio * Return true if curthread helds the lock. 93171052Sattilio */ 94171052Sattilio#define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 95171052Sattilio 96171052Sattilio/* 97157826Sjhb * Return a pointer to the owning thread for this lock who should receive 98157826Sjhb * any priority lent by threads that block on this lock. Currently this 99157826Sjhb * is identical to rw_wowner(). 100157826Sjhb */ 101157826Sjhb#define rw_owner(rw) rw_wowner(rw) 102157826Sjhb 103154941Sjhb#ifndef INVARIANTS 104154941Sjhb#define _rw_assert(rw, what, file, line) 105154941Sjhb#endif 106154941Sjhb 107154941Sjhbvoid 108173733Sattilioassert_rw(struct lock_object *lock, int what) 109173733Sattilio{ 110173733Sattilio 111173733Sattilio rw_assert((struct rwlock *)lock, what); 112173733Sattilio} 113173733Sattilio 114173733Sattiliovoid 115167368Sjhblock_rw(struct lock_object *lock, int how) 116167368Sjhb{ 117167368Sjhb struct rwlock *rw; 118167368Sjhb 119167368Sjhb rw = (struct rwlock *)lock; 120167368Sjhb if (how) 121167368Sjhb rw_wlock(rw); 122167368Sjhb else 123167368Sjhb rw_rlock(rw); 124167368Sjhb} 125167368Sjhb 126167368Sjhbint 127167368Sjhbunlock_rw(struct lock_object *lock) 128167368Sjhb{ 129167368Sjhb struct rwlock *rw; 130167368Sjhb 131167368Sjhb rw = (struct rwlock *)lock; 132167368Sjhb rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 133167368Sjhb if (rw->rw_lock & RW_LOCK_READ) { 134167368Sjhb rw_runlock(rw); 135167368Sjhb return (0); 136167368Sjhb } else { 137167368Sjhb rw_wunlock(rw); 138167368Sjhb return (1); 139167368Sjhb } 140167368Sjhb} 141167368Sjhb 142167368Sjhbvoid 143171052Sattiliorw_init_flags(struct rwlock *rw, const char *name, int opts) 144154941Sjhb{ 145171052Sattilio int flags; 146154941Sjhb 147171052Sattilio MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 148171052Sattilio RW_RECURSE)) == 0); 149171052Sattilio 150171052Sattilio flags = LO_UPGRADABLE | LO_RECURSABLE; 151171052Sattilio if (opts & RW_DUPOK) 152171052Sattilio flags |= LO_DUPOK; 153171052Sattilio if (opts & RW_NOPROFILE) 154171052Sattilio flags |= LO_NOPROFILE; 155171052Sattilio if (!(opts & RW_NOWITNESS)) 156171052Sattilio flags |= LO_WITNESS; 157171052Sattilio if (opts & RW_QUIET) 158171052Sattilio flags |= LO_QUIET; 159171052Sattilio flags |= opts & RW_RECURSE; 160171052Sattilio 161154941Sjhb rw->rw_lock = RW_UNLOCKED; 162171052Sattilio rw->rw_recurse = 0; 163171052Sattilio lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 164154941Sjhb} 165154941Sjhb 166154941Sjhbvoid 167154941Sjhbrw_destroy(struct rwlock *rw) 168154941Sjhb{ 169154941Sjhb 170154941Sjhb KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 171171052Sattilio KASSERT(rw->rw_recurse == 0, ("rw lock still recursed")); 172169394Sjhb rw->rw_lock = RW_DESTROYED; 173167787Sjhb lock_destroy(&rw->lock_object); 174154941Sjhb} 175154941Sjhb 176154941Sjhbvoid 177154941Sjhbrw_sysinit(void *arg) 178154941Sjhb{ 179154941Sjhb struct rw_args *args = arg; 180154941Sjhb 181154941Sjhb rw_init(args->ra_rw, args->ra_desc); 182154941Sjhb} 183154941Sjhb 184167024Srwatsonint 185167024Srwatsonrw_wowned(struct rwlock *rw) 186167024Srwatson{ 187167024Srwatson 188167024Srwatson return (rw_wowner(rw) == curthread); 189167024Srwatson} 190167024Srwatson 191154941Sjhbvoid 192154941Sjhb_rw_wlock(struct rwlock *rw, const char *file, int line) 193154941Sjhb{ 194154941Sjhb 195154941Sjhb MPASS(curthread != NULL); 196169394Sjhb KASSERT(rw->rw_lock != RW_DESTROYED, 197169394Sjhb ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 198167787Sjhb WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 199154941Sjhb line); 200154941Sjhb __rw_wlock(rw, curthread, file, line); 201171052Sattilio LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 202167787Sjhb WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 203160771Sjhb curthread->td_locks++; 204154941Sjhb} 205154941Sjhb 206177843Sattilioint 207177843Sattilio_rw_try_wlock(struct rwlock *rw, const char *file, int line) 208177843Sattilio{ 209177843Sattilio int rval; 210177843Sattilio 211177843Sattilio KASSERT(rw->rw_lock != RW_DESTROYED, 212177843Sattilio ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); 213177843Sattilio 214177843Sattilio if (rw_wlocked(rw) && (rw->lock_object.lo_flags & RW_RECURSE) != 0) { 215177843Sattilio rw->rw_recurse++; 216177843Sattilio rval = 1; 217177843Sattilio } else 218177843Sattilio rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, 219177843Sattilio (uintptr_t)curthread); 220177843Sattilio 221177843Sattilio LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); 222177843Sattilio if (rval) { 223177843Sattilio WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 224177843Sattilio file, line); 225177843Sattilio curthread->td_locks++; 226177843Sattilio } 227177843Sattilio return (rval); 228177843Sattilio} 229177843Sattilio 230154941Sjhbvoid 231154941Sjhb_rw_wunlock(struct rwlock *rw, const char *file, int line) 232154941Sjhb{ 233154941Sjhb 234154941Sjhb MPASS(curthread != NULL); 235169394Sjhb KASSERT(rw->rw_lock != RW_DESTROYED, 236169394Sjhb ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 237154941Sjhb _rw_assert(rw, RA_WLOCKED, file, line); 238160771Sjhb curthread->td_locks--; 239167787Sjhb WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 240171052Sattilio LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 241171052Sattilio line); 242171052Sattilio if (!rw_recursed(rw)) 243171052Sattilio lock_profile_release_lock(&rw->lock_object); 244154941Sjhb __rw_wunlock(rw, curthread, file, line); 245154941Sjhb} 246176017Sjeff/* 247176017Sjeff * Determines whether a new reader can acquire a lock. Succeeds if the 248176017Sjeff * reader already owns a read lock and the lock is locked for read to 249176017Sjeff * prevent deadlock from reader recursion. Also succeeds if the lock 250176017Sjeff * is unlocked and has no writer waiters or spinners. Failing otherwise 251176017Sjeff * prioritizes writers before readers. 252176017Sjeff */ 253176017Sjeff#define RW_CAN_READ(_rw) \ 254176017Sjeff ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 255176017Sjeff (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 256176017Sjeff RW_LOCK_READ) 257154941Sjhb 258154941Sjhbvoid 259154941Sjhb_rw_rlock(struct rwlock *rw, const char *file, int line) 260154941Sjhb{ 261170295Sjeff struct turnstile *ts; 262167801Sjhb#ifdef ADAPTIVE_RWLOCKS 263157846Sjhb volatile struct thread *owner; 264157851Swkoszek#endif 265167307Sjhb uint64_t waittime = 0; 266167054Skmacy int contested = 0; 267176017Sjeff uintptr_t v; 268154941Sjhb 269169394Sjhb KASSERT(rw->rw_lock != RW_DESTROYED, 270169394Sjhb ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 271157826Sjhb KASSERT(rw_wowner(rw) != curthread, 272154941Sjhb ("%s (%s): wlock already held @ %s:%d", __func__, 273167787Sjhb rw->lock_object.lo_name, file, line)); 274167787Sjhb WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 275154941Sjhb 276154941Sjhb for (;;) { 277154941Sjhb /* 278154941Sjhb * Handle the easy case. If no other thread has a write 279154941Sjhb * lock, then try to bump up the count of read locks. Note 280154941Sjhb * that we have to preserve the current state of the 281154941Sjhb * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 282154941Sjhb * read lock, then rw_lock must have changed, so restart 283154941Sjhb * the loop. Note that this handles the case of a 284154941Sjhb * completely unlocked rwlock since such a lock is encoded 285154941Sjhb * as a read lock with no waiters. 286154941Sjhb */ 287176017Sjeff v = rw->rw_lock; 288176017Sjeff if (RW_CAN_READ(v)) { 289154941Sjhb /* 290154941Sjhb * The RW_LOCK_READ_WAITERS flag should only be set 291176017Sjeff * if the lock has been unlocked and write waiters 292176017Sjeff * were present. 293154941Sjhb */ 294176017Sjeff if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, 295176017Sjeff v + RW_ONE_READER)) { 296167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 297154941Sjhb CTR4(KTR_LOCK, 298154941Sjhb "%s: %p succeed %p -> %p", __func__, 299176017Sjeff rw, (void *)v, 300176017Sjeff (void *)(v + RW_ONE_READER)); 301154941Sjhb break; 302154941Sjhb } 303157846Sjhb cpu_spinwait(); 304154941Sjhb continue; 305154941Sjhb } 306174629Sjeff lock_profile_obtain_lock_failed(&rw->lock_object, 307174629Sjeff &contested, &waittime); 308154941Sjhb 309173960Sattilio#ifdef ADAPTIVE_RWLOCKS 310154941Sjhb /* 311173960Sattilio * If the owner is running on another CPU, spin until 312173960Sattilio * the owner stops running or the state of the lock 313173960Sattilio * changes. 314173960Sattilio */ 315176017Sjeff if ((v & RW_LOCK_READ) == 0) { 316176017Sjeff owner = (struct thread *)RW_OWNER(v); 317176017Sjeff if (TD_IS_RUNNING(owner)) { 318176017Sjeff if (LOCK_LOG_TEST(&rw->lock_object, 0)) 319176017Sjeff CTR3(KTR_LOCK, 320176017Sjeff "%s: spinning on %p held by %p", 321176017Sjeff __func__, rw, owner); 322176017Sjeff while ((struct thread*)RW_OWNER(rw->rw_lock) == 323176017Sjeff owner && TD_IS_RUNNING(owner)) 324176017Sjeff cpu_spinwait(); 325176017Sjeff continue; 326176017Sjeff } 327173960Sattilio } 328173960Sattilio#endif 329173960Sattilio 330173960Sattilio /* 331154941Sjhb * Okay, now it's the hard case. Some other thread already 332176017Sjeff * has a write lock or there are write waiters present, 333176017Sjeff * acquire the turnstile lock so we can begin the process 334176017Sjeff * of blocking. 335154941Sjhb */ 336170295Sjeff ts = turnstile_trywait(&rw->lock_object); 337154941Sjhb 338154941Sjhb /* 339154941Sjhb * The lock might have been released while we spun, so 340176017Sjeff * recheck its state and restart the loop if needed. 341154941Sjhb */ 342176017Sjeff v = rw->rw_lock; 343176017Sjeff if (RW_CAN_READ(v)) { 344170295Sjeff turnstile_cancel(ts); 345157846Sjhb cpu_spinwait(); 346154941Sjhb continue; 347154941Sjhb } 348154941Sjhb 349173960Sattilio#ifdef ADAPTIVE_RWLOCKS 350154941Sjhb /* 351173960Sattilio * If the current owner of the lock is executing on another 352173960Sattilio * CPU quit the hard path and try to spin. 353173960Sattilio */ 354176017Sjeff if ((v & RW_LOCK_READ) == 0) { 355176017Sjeff owner = (struct thread *)RW_OWNER(v); 356176017Sjeff if (TD_IS_RUNNING(owner)) { 357176017Sjeff turnstile_cancel(ts); 358176017Sjeff cpu_spinwait(); 359176017Sjeff continue; 360176017Sjeff } 361173960Sattilio } 362173960Sattilio#endif 363173960Sattilio 364173960Sattilio /* 365176017Sjeff * The lock is held in write mode or it already has waiters. 366154941Sjhb */ 367176017Sjeff MPASS(!RW_CAN_READ(v)); 368176017Sjeff 369176017Sjeff /* 370176017Sjeff * If the RW_LOCK_READ_WAITERS flag is already set, then 371176017Sjeff * we can go ahead and block. If it is not set then try 372176017Sjeff * to set it. If we fail to set it drop the turnstile 373176017Sjeff * lock and restart the loop. 374176017Sjeff */ 375176017Sjeff if (!(v & RW_LOCK_READ_WAITERS)) { 376176017Sjeff if (!atomic_cmpset_ptr(&rw->rw_lock, v, 377176017Sjeff v | RW_LOCK_READ_WAITERS)) { 378170295Sjeff turnstile_cancel(ts); 379157826Sjhb cpu_spinwait(); 380157826Sjhb continue; 381157826Sjhb } 382167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 383157826Sjhb CTR2(KTR_LOCK, "%s: %p set read waiters flag", 384157826Sjhb __func__, rw); 385154941Sjhb } 386154941Sjhb 387154941Sjhb /* 388154941Sjhb * We were unable to acquire the lock and the read waiters 389154941Sjhb * flag is set, so we must block on the turnstile. 390154941Sjhb */ 391167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 392154941Sjhb CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 393154941Sjhb rw); 394170295Sjeff turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 395167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 396154941Sjhb CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 397154941Sjhb __func__, rw); 398154941Sjhb } 399154941Sjhb 400154941Sjhb /* 401154941Sjhb * TODO: acquire "owner of record" here. Here be turnstile dragons 402154941Sjhb * however. turnstiles don't like owners changing between calls to 403154941Sjhb * turnstile_wait() currently. 404154941Sjhb */ 405174629Sjeff lock_profile_obtain_lock_success( &rw->lock_object, contested, 406174629Sjeff waittime, file, line); 407167787Sjhb LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 408167787Sjhb WITNESS_LOCK(&rw->lock_object, 0, file, line); 409160771Sjhb curthread->td_locks++; 410176017Sjeff curthread->td_rw_rlocks++; 411154941Sjhb} 412154941Sjhb 413177843Sattilioint 414177843Sattilio_rw_try_rlock(struct rwlock *rw, const char *file, int line) 415177843Sattilio{ 416177843Sattilio uintptr_t x; 417177843Sattilio 418177843Sattilio for (;;) { 419177843Sattilio x = rw->rw_lock; 420177843Sattilio KASSERT(rw->rw_lock != RW_DESTROYED, 421177843Sattilio ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); 422177843Sattilio if (!(x & RW_LOCK_READ)) 423177843Sattilio break; 424177843Sattilio if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { 425177843Sattilio LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, 426177843Sattilio line); 427177843Sattilio WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); 428177843Sattilio curthread->td_locks++; 429177843Sattilio curthread->td_rw_rlocks++; 430177843Sattilio return (1); 431177843Sattilio } 432177843Sattilio } 433177843Sattilio 434177843Sattilio LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); 435177843Sattilio return (0); 436177843Sattilio} 437177843Sattilio 438154941Sjhbvoid 439154941Sjhb_rw_runlock(struct rwlock *rw, const char *file, int line) 440154941Sjhb{ 441154941Sjhb struct turnstile *ts; 442176017Sjeff uintptr_t x, v, queue; 443154941Sjhb 444169394Sjhb KASSERT(rw->rw_lock != RW_DESTROYED, 445169394Sjhb ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 446154941Sjhb _rw_assert(rw, RA_RLOCKED, file, line); 447160771Sjhb curthread->td_locks--; 448176017Sjeff curthread->td_rw_rlocks--; 449167787Sjhb WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 450167787Sjhb LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 451154941Sjhb 452154941Sjhb /* TODO: drop "owner of record" here. */ 453154941Sjhb 454154941Sjhb for (;;) { 455154941Sjhb /* 456154941Sjhb * See if there is more than one read lock held. If so, 457154941Sjhb * just drop one and return. 458154941Sjhb */ 459154941Sjhb x = rw->rw_lock; 460154941Sjhb if (RW_READERS(x) > 1) { 461154941Sjhb if (atomic_cmpset_ptr(&rw->rw_lock, x, 462154941Sjhb x - RW_ONE_READER)) { 463167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 464154941Sjhb CTR4(KTR_LOCK, 465154941Sjhb "%s: %p succeeded %p -> %p", 466154941Sjhb __func__, rw, (void *)x, 467154941Sjhb (void *)(x - RW_ONE_READER)); 468154941Sjhb break; 469154941Sjhb } 470154941Sjhb continue; 471167307Sjhb } 472154941Sjhb /* 473154941Sjhb * If there aren't any waiters for a write lock, then try 474154941Sjhb * to drop it quickly. 475154941Sjhb */ 476176017Sjeff if (!(x & RW_LOCK_WAITERS)) { 477176017Sjeff MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 478176017Sjeff RW_READERS_LOCK(1)); 479176017Sjeff if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) { 480167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 481154941Sjhb CTR2(KTR_LOCK, "%s: %p last succeeded", 482154941Sjhb __func__, rw); 483154941Sjhb break; 484154941Sjhb } 485154941Sjhb continue; 486154941Sjhb } 487154941Sjhb /* 488176017Sjeff * Ok, we know we have waiters and we think we are the 489176017Sjeff * last reader, so grab the turnstile lock. 490154941Sjhb */ 491170295Sjeff turnstile_chain_lock(&rw->lock_object); 492176017Sjeff v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 493176017Sjeff MPASS(v & RW_LOCK_WAITERS); 494154941Sjhb 495154941Sjhb /* 496154941Sjhb * Try to drop our lock leaving the lock in a unlocked 497154941Sjhb * state. 498154941Sjhb * 499154941Sjhb * If you wanted to do explicit lock handoff you'd have to 500154941Sjhb * do it here. You'd also want to use turnstile_signal() 501154941Sjhb * and you'd have to handle the race where a higher 502154941Sjhb * priority thread blocks on the write lock before the 503154941Sjhb * thread you wakeup actually runs and have the new thread 504154941Sjhb * "steal" the lock. For now it's a lot simpler to just 505154941Sjhb * wakeup all of the waiters. 506154941Sjhb * 507154941Sjhb * As above, if we fail, then another thread might have 508154941Sjhb * acquired a read lock, so drop the turnstile lock and 509154941Sjhb * restart. 510154941Sjhb */ 511176017Sjeff x = RW_UNLOCKED; 512176017Sjeff if (v & RW_LOCK_WRITE_WAITERS) { 513176017Sjeff queue = TS_EXCLUSIVE_QUEUE; 514176017Sjeff x |= (v & RW_LOCK_READ_WAITERS); 515176017Sjeff } else 516176017Sjeff queue = TS_SHARED_QUEUE; 517176017Sjeff if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 518176017Sjeff x)) { 519170295Sjeff turnstile_chain_unlock(&rw->lock_object); 520154941Sjhb continue; 521154941Sjhb } 522167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 523154941Sjhb CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 524154941Sjhb __func__, rw); 525154941Sjhb 526154941Sjhb /* 527154941Sjhb * Ok. The lock is released and all that's left is to 528154941Sjhb * wake up the waiters. Note that the lock might not be 529154941Sjhb * free anymore, but in that case the writers will just 530154941Sjhb * block again if they run before the new lock holder(s) 531154941Sjhb * release the lock. 532154941Sjhb */ 533167787Sjhb ts = turnstile_lookup(&rw->lock_object); 534157846Sjhb MPASS(ts != NULL); 535176017Sjeff turnstile_broadcast(ts, queue); 536154941Sjhb turnstile_unpend(ts, TS_SHARED_LOCK); 537170295Sjeff turnstile_chain_unlock(&rw->lock_object); 538154941Sjhb break; 539154941Sjhb } 540174629Sjeff lock_profile_release_lock(&rw->lock_object); 541154941Sjhb} 542154941Sjhb 543154941Sjhb/* 544154941Sjhb * This function is called when we are unable to obtain a write lock on the 545154941Sjhb * first try. This means that at least one other thread holds either a 546154941Sjhb * read or write lock. 547154941Sjhb */ 548154941Sjhbvoid 549154941Sjhb_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 550154941Sjhb{ 551170295Sjeff struct turnstile *ts; 552167801Sjhb#ifdef ADAPTIVE_RWLOCKS 553157846Sjhb volatile struct thread *owner; 554176017Sjeff int spintries = 0; 555176017Sjeff int i; 556157851Swkoszek#endif 557171516Sattilio uint64_t waittime = 0; 558176017Sjeff uintptr_t v, x; 559171516Sattilio int contested = 0; 560154941Sjhb 561171052Sattilio if (rw_wlocked(rw)) { 562171052Sattilio KASSERT(rw->lock_object.lo_flags & RW_RECURSE, 563171052Sattilio ("%s: recursing but non-recursive rw %s @ %s:%d\n", 564171052Sattilio __func__, rw->lock_object.lo_name, file, line)); 565171052Sattilio rw->rw_recurse++; 566171052Sattilio if (LOCK_LOG_TEST(&rw->lock_object, 0)) 567171052Sattilio CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 568171052Sattilio return; 569171052Sattilio } 570171052Sattilio 571167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 572154941Sjhb CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 573167787Sjhb rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 574154941Sjhb 575154941Sjhb while (!_rw_write_lock(rw, tid)) { 576174629Sjeff lock_profile_obtain_lock_failed(&rw->lock_object, 577174629Sjeff &contested, &waittime); 578173960Sattilio#ifdef ADAPTIVE_RWLOCKS 579173960Sattilio /* 580173960Sattilio * If the lock is write locked and the owner is 581173960Sattilio * running on another CPU, spin until the owner stops 582173960Sattilio * running or the state of the lock changes. 583173960Sattilio */ 584173960Sattilio v = rw->rw_lock; 585173960Sattilio owner = (struct thread *)RW_OWNER(v); 586173960Sattilio if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 587173960Sattilio if (LOCK_LOG_TEST(&rw->lock_object, 0)) 588173960Sattilio CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 589173960Sattilio __func__, rw, owner); 590173960Sattilio while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && 591173960Sattilio TD_IS_RUNNING(owner)) 592173960Sattilio cpu_spinwait(); 593173960Sattilio continue; 594173960Sattilio } 595176017Sjeff if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) { 596176017Sjeff if (!(v & RW_LOCK_WRITE_SPINNER)) { 597176017Sjeff if (!atomic_cmpset_ptr(&rw->rw_lock, v, 598176017Sjeff v | RW_LOCK_WRITE_SPINNER)) { 599176017Sjeff cpu_spinwait(); 600176017Sjeff continue; 601176017Sjeff } 602176017Sjeff } 603176017Sjeff spintries++; 604176017Sjeff for (i = 100000; i > 0; i--) { 605176017Sjeff if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 606176017Sjeff break; 607176017Sjeff cpu_spinwait(); 608176017Sjeff } 609176017Sjeff if (i) 610176017Sjeff continue; 611176017Sjeff } 612173960Sattilio#endif 613170295Sjeff ts = turnstile_trywait(&rw->lock_object); 614154941Sjhb v = rw->rw_lock; 615154941Sjhb 616173960Sattilio#ifdef ADAPTIVE_RWLOCKS 617154941Sjhb /* 618173960Sattilio * If the current owner of the lock is executing on another 619173960Sattilio * CPU quit the hard path and try to spin. 620173960Sattilio */ 621173960Sattilio if (!(v & RW_LOCK_READ)) { 622173960Sattilio owner = (struct thread *)RW_OWNER(v); 623173960Sattilio if (TD_IS_RUNNING(owner)) { 624173960Sattilio turnstile_cancel(ts); 625173960Sattilio cpu_spinwait(); 626173960Sattilio continue; 627173960Sattilio } 628173960Sattilio } 629173960Sattilio#endif 630173960Sattilio /* 631176017Sjeff * If the lock was released while waiting for the turnstile 632176017Sjeff * chain lock retry. 633154941Sjhb */ 634176017Sjeff x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 635176017Sjeff if ((v & ~x) == RW_UNLOCKED) { 636176017Sjeff x &= ~RW_LOCK_WRITE_SPINNER; 637176017Sjeff if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 638176017Sjeff if (x) 639176017Sjeff turnstile_claim(ts); 640176017Sjeff else 641176017Sjeff turnstile_cancel(ts); 642154941Sjhb break; 643154941Sjhb } 644170295Sjeff turnstile_cancel(ts); 645154941Sjhb cpu_spinwait(); 646154941Sjhb continue; 647154941Sjhb } 648154941Sjhb /* 649154941Sjhb * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 650154941Sjhb * set it. If we fail to set it, then loop back and try 651154941Sjhb * again. 652154941Sjhb */ 653157826Sjhb if (!(v & RW_LOCK_WRITE_WAITERS)) { 654157826Sjhb if (!atomic_cmpset_ptr(&rw->rw_lock, v, 655157826Sjhb v | RW_LOCK_WRITE_WAITERS)) { 656170295Sjeff turnstile_cancel(ts); 657157826Sjhb cpu_spinwait(); 658157826Sjhb continue; 659157826Sjhb } 660167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 661157826Sjhb CTR2(KTR_LOCK, "%s: %p set write waiters flag", 662157826Sjhb __func__, rw); 663154941Sjhb } 664157846Sjhb /* 665154941Sjhb * We were unable to acquire the lock and the write waiters 666154941Sjhb * flag is set, so we must block on the turnstile. 667154941Sjhb */ 668167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 669154941Sjhb CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 670154941Sjhb rw); 671170295Sjeff turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 672167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 673154941Sjhb CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 674154941Sjhb __func__, rw); 675176017Sjeff#ifdef ADAPTIVE_RWLOCKS 676176017Sjeff spintries = 0; 677176017Sjeff#endif 678154941Sjhb } 679171516Sattilio lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime, 680171516Sattilio file, line); 681154941Sjhb} 682154941Sjhb 683154941Sjhb/* 684154941Sjhb * This function is called if the first try at releasing a write lock failed. 685154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at 686154941Sjhb * least one thread is waiting on this lock. 687154941Sjhb */ 688154941Sjhbvoid 689154941Sjhb_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 690154941Sjhb{ 691154941Sjhb struct turnstile *ts; 692154941Sjhb uintptr_t v; 693154941Sjhb int queue; 694154941Sjhb 695171052Sattilio if (rw_wlocked(rw) && rw_recursed(rw)) { 696176017Sjeff rw->rw_recurse--; 697171052Sattilio if (LOCK_LOG_TEST(&rw->lock_object, 0)) 698171052Sattilio CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 699171052Sattilio return; 700171052Sattilio } 701176017Sjeff v = rw->rw_lock; 702171052Sattilio 703154941Sjhb KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 704154941Sjhb ("%s: neither of the waiter flags are set", __func__)); 705154941Sjhb 706167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 707154941Sjhb CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 708154941Sjhb 709170295Sjeff turnstile_chain_lock(&rw->lock_object); 710167787Sjhb ts = turnstile_lookup(&rw->lock_object); 711154941Sjhb 712154941Sjhb MPASS(ts != NULL); 713154941Sjhb 714154941Sjhb /* 715154941Sjhb * Use the same algo as sx locks for now. Prefer waking up shared 716154941Sjhb * waiters if we have any over writers. This is probably not ideal. 717154941Sjhb * 718154941Sjhb * 'v' is the value we are going to write back to rw_lock. If we 719154941Sjhb * have waiters on both queues, we need to preserve the state of 720154941Sjhb * the waiter flag for the queue we don't wake up. For now this is 721154941Sjhb * hardcoded for the algorithm mentioned above. 722154941Sjhb * 723154941Sjhb * In the case of both readers and writers waiting we wakeup the 724154941Sjhb * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 725154941Sjhb * new writer comes in before a reader it will claim the lock up 726154941Sjhb * above. There is probably a potential priority inversion in 727154941Sjhb * there that could be worked around either by waking both queues 728154941Sjhb * of waiters or doing some complicated lock handoff gymnastics. 729154941Sjhb */ 730157846Sjhb v = RW_UNLOCKED; 731176076Sjeff if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 732176076Sjeff queue = TS_EXCLUSIVE_QUEUE; 733176076Sjeff v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 734176076Sjeff } else 735154941Sjhb queue = TS_SHARED_QUEUE; 736157846Sjhb 737157846Sjhb /* Wake up all waiters for the specific queue. */ 738167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 739154941Sjhb CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 740154941Sjhb queue == TS_SHARED_QUEUE ? "read" : "write"); 741154941Sjhb turnstile_broadcast(ts, queue); 742154941Sjhb atomic_store_rel_ptr(&rw->rw_lock, v); 743154941Sjhb turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 744170295Sjeff turnstile_chain_unlock(&rw->lock_object); 745154941Sjhb} 746154941Sjhb 747157882Sjhb/* 748157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write 749157882Sjhb * lock. This will only succeed if this thread holds a single read 750157882Sjhb * lock. Returns true if the upgrade succeeded and false otherwise. 751157882Sjhb */ 752157882Sjhbint 753157882Sjhb_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 754157882Sjhb{ 755176017Sjeff uintptr_t v, x, tid; 756170295Sjeff struct turnstile *ts; 757157882Sjhb int success; 758157882Sjhb 759169394Sjhb KASSERT(rw->rw_lock != RW_DESTROYED, 760169394Sjhb ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 761157882Sjhb _rw_assert(rw, RA_RLOCKED, file, line); 762157882Sjhb 763157882Sjhb /* 764157882Sjhb * Attempt to switch from one reader to a writer. If there 765157882Sjhb * are any write waiters, then we will have to lock the 766157882Sjhb * turnstile first to prevent races with another writer 767157882Sjhb * calling turnstile_wait() before we have claimed this 768157882Sjhb * turnstile. So, do the simple case of no waiters first. 769157882Sjhb */ 770157882Sjhb tid = (uintptr_t)curthread; 771176017Sjeff success = 0; 772176017Sjeff for (;;) { 773176017Sjeff v = rw->rw_lock; 774176017Sjeff if (RW_READERS(v) > 1) 775176017Sjeff break; 776176017Sjeff if (!(v & RW_LOCK_WAITERS)) { 777176017Sjeff success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 778176017Sjeff if (!success) 779176017Sjeff continue; 780176017Sjeff break; 781176017Sjeff } 782157882Sjhb 783176017Sjeff /* 784176017Sjeff * Ok, we think we have waiters, so lock the turnstile. 785176017Sjeff */ 786176017Sjeff ts = turnstile_trywait(&rw->lock_object); 787176017Sjeff v = rw->rw_lock; 788176017Sjeff if (RW_READERS(v) > 1) { 789176017Sjeff turnstile_cancel(ts); 790176017Sjeff break; 791176017Sjeff } 792176017Sjeff /* 793176017Sjeff * Try to switch from one reader to a writer again. This time 794176017Sjeff * we honor the current state of the waiters flags. 795176017Sjeff * If we obtain the lock with the flags set, then claim 796176017Sjeff * ownership of the turnstile. 797176017Sjeff */ 798176017Sjeff x = rw->rw_lock & RW_LOCK_WAITERS; 799176017Sjeff success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 800176017Sjeff if (success) { 801176017Sjeff if (x) 802176017Sjeff turnstile_claim(ts); 803176017Sjeff else 804176017Sjeff turnstile_cancel(ts); 805176017Sjeff break; 806176017Sjeff } 807170295Sjeff turnstile_cancel(ts); 808176017Sjeff } 809167787Sjhb LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 810176017Sjeff if (success) { 811176017Sjeff curthread->td_rw_rlocks--; 812167787Sjhb WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 813157882Sjhb file, line); 814176017Sjeff } 815157882Sjhb return (success); 816157882Sjhb} 817157882Sjhb 818157882Sjhb/* 819157882Sjhb * Downgrade a write lock into a single read lock. 820157882Sjhb */ 821157882Sjhbvoid 822157882Sjhb_rw_downgrade(struct rwlock *rw, const char *file, int line) 823157882Sjhb{ 824157882Sjhb struct turnstile *ts; 825157882Sjhb uintptr_t tid, v; 826176017Sjeff int rwait, wwait; 827157882Sjhb 828169394Sjhb KASSERT(rw->rw_lock != RW_DESTROYED, 829169394Sjhb ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 830171052Sattilio _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line); 831171052Sattilio#ifndef INVARIANTS 832171052Sattilio if (rw_recursed(rw)) 833171052Sattilio panic("downgrade of a recursed lock"); 834171052Sattilio#endif 835157882Sjhb 836167787Sjhb WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 837157882Sjhb 838157882Sjhb /* 839157882Sjhb * Convert from a writer to a single reader. First we handle 840157882Sjhb * the easy case with no waiters. If there are any waiters, we 841176017Sjeff * lock the turnstile and "disown" the lock. 842157882Sjhb */ 843157882Sjhb tid = (uintptr_t)curthread; 844157882Sjhb if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 845157882Sjhb goto out; 846157882Sjhb 847157882Sjhb /* 848157882Sjhb * Ok, we think we have waiters, so lock the turnstile so we can 849157882Sjhb * read the waiter flags without any races. 850157882Sjhb */ 851170295Sjeff turnstile_chain_lock(&rw->lock_object); 852176017Sjeff v = rw->rw_lock & RW_LOCK_WAITERS; 853176017Sjeff rwait = v & RW_LOCK_READ_WAITERS; 854176017Sjeff wwait = v & RW_LOCK_WRITE_WAITERS; 855176017Sjeff MPASS(rwait | wwait); 856157882Sjhb 857157882Sjhb /* 858176017Sjeff * Downgrade from a write lock while preserving waiters flag 859176017Sjeff * and give up ownership of the turnstile. 860157882Sjhb */ 861167787Sjhb ts = turnstile_lookup(&rw->lock_object); 862157882Sjhb MPASS(ts != NULL); 863176017Sjeff if (!wwait) 864176017Sjeff v &= ~RW_LOCK_READ_WAITERS; 865176017Sjeff atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 866176017Sjeff /* 867176017Sjeff * Wake other readers if there are no writers pending. Otherwise they 868176017Sjeff * won't be able to acquire the lock anyway. 869176017Sjeff */ 870176017Sjeff if (rwait && !wwait) { 871157882Sjhb turnstile_broadcast(ts, TS_SHARED_QUEUE); 872157882Sjhb turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 873176017Sjeff } else 874157882Sjhb turnstile_disown(ts); 875170295Sjeff turnstile_chain_unlock(&rw->lock_object); 876157882Sjhbout: 877176017Sjeff curthread->td_rw_rlocks++; 878167787Sjhb LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 879157882Sjhb} 880157882Sjhb 881154941Sjhb#ifdef INVARIANT_SUPPORT 882155162Sscottl#ifndef INVARIANTS 883154941Sjhb#undef _rw_assert 884154941Sjhb#endif 885154941Sjhb 886154941Sjhb/* 887154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least 888154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this* 889154941Sjhb * thread owns an rlock. 890154941Sjhb */ 891154941Sjhbvoid 892154941Sjhb_rw_assert(struct rwlock *rw, int what, const char *file, int line) 893154941Sjhb{ 894154941Sjhb 895154941Sjhb if (panicstr != NULL) 896154941Sjhb return; 897154941Sjhb switch (what) { 898154941Sjhb case RA_LOCKED: 899171052Sattilio case RA_LOCKED | RA_RECURSED: 900171052Sattilio case RA_LOCKED | RA_NOTRECURSED: 901154941Sjhb case RA_RLOCKED: 902154941Sjhb#ifdef WITNESS 903167787Sjhb witness_assert(&rw->lock_object, what, file, line); 904154941Sjhb#else 905154941Sjhb /* 906154941Sjhb * If some other thread has a write lock or we have one 907154941Sjhb * and are asserting a read lock, fail. Also, if no one 908154941Sjhb * has a lock at all, fail. 909154941Sjhb */ 910155061Sscottl if (rw->rw_lock == RW_UNLOCKED || 911155061Sscottl (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 912157826Sjhb rw_wowner(rw) != curthread))) 913154941Sjhb panic("Lock %s not %slocked @ %s:%d\n", 914167787Sjhb rw->lock_object.lo_name, (what == RA_RLOCKED) ? 915154941Sjhb "read " : "", file, line); 916171052Sattilio 917171052Sattilio if (!(rw->rw_lock & RW_LOCK_READ)) { 918171052Sattilio if (rw_recursed(rw)) { 919171052Sattilio if (what & RA_NOTRECURSED) 920171052Sattilio panic("Lock %s recursed @ %s:%d\n", 921171052Sattilio rw->lock_object.lo_name, file, 922171052Sattilio line); 923171052Sattilio } else if (what & RA_RECURSED) 924171052Sattilio panic("Lock %s not recursed @ %s:%d\n", 925171052Sattilio rw->lock_object.lo_name, file, line); 926171052Sattilio } 927154941Sjhb#endif 928154941Sjhb break; 929154941Sjhb case RA_WLOCKED: 930171052Sattilio case RA_WLOCKED | RA_RECURSED: 931171052Sattilio case RA_WLOCKED | RA_NOTRECURSED: 932157826Sjhb if (rw_wowner(rw) != curthread) 933154941Sjhb panic("Lock %s not exclusively locked @ %s:%d\n", 934167787Sjhb rw->lock_object.lo_name, file, line); 935171052Sattilio if (rw_recursed(rw)) { 936171052Sattilio if (what & RA_NOTRECURSED) 937171052Sattilio panic("Lock %s recursed @ %s:%d\n", 938171052Sattilio rw->lock_object.lo_name, file, line); 939171052Sattilio } else if (what & RA_RECURSED) 940171052Sattilio panic("Lock %s not recursed @ %s:%d\n", 941171052Sattilio rw->lock_object.lo_name, file, line); 942154941Sjhb break; 943154941Sjhb case RA_UNLOCKED: 944154941Sjhb#ifdef WITNESS 945167787Sjhb witness_assert(&rw->lock_object, what, file, line); 946154941Sjhb#else 947154941Sjhb /* 948154941Sjhb * If we hold a write lock fail. We can't reliably check 949154941Sjhb * to see if we hold a read lock or not. 950154941Sjhb */ 951157826Sjhb if (rw_wowner(rw) == curthread) 952154941Sjhb panic("Lock %s exclusively locked @ %s:%d\n", 953167787Sjhb rw->lock_object.lo_name, file, line); 954154941Sjhb#endif 955154941Sjhb break; 956154941Sjhb default: 957154941Sjhb panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 958154941Sjhb line); 959154941Sjhb } 960154941Sjhb} 961154941Sjhb#endif /* INVARIANT_SUPPORT */ 962154941Sjhb 963154941Sjhb#ifdef DDB 964154941Sjhbvoid 965154941Sjhbdb_show_rwlock(struct lock_object *lock) 966154941Sjhb{ 967154941Sjhb struct rwlock *rw; 968154941Sjhb struct thread *td; 969154941Sjhb 970154941Sjhb rw = (struct rwlock *)lock; 971154941Sjhb 972154941Sjhb db_printf(" state: "); 973154941Sjhb if (rw->rw_lock == RW_UNLOCKED) 974154941Sjhb db_printf("UNLOCKED\n"); 975169394Sjhb else if (rw->rw_lock == RW_DESTROYED) { 976169394Sjhb db_printf("DESTROYED\n"); 977169394Sjhb return; 978169394Sjhb } else if (rw->rw_lock & RW_LOCK_READ) 979167504Sjhb db_printf("RLOCK: %ju locks\n", 980167504Sjhb (uintmax_t)(RW_READERS(rw->rw_lock))); 981154941Sjhb else { 982157826Sjhb td = rw_wowner(rw); 983154941Sjhb db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 984173600Sjulian td->td_tid, td->td_proc->p_pid, td->td_name); 985171052Sattilio if (rw_recursed(rw)) 986171052Sattilio db_printf(" recursed: %u\n", rw->rw_recurse); 987154941Sjhb } 988154941Sjhb db_printf(" waiters: "); 989154941Sjhb switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 990154941Sjhb case RW_LOCK_READ_WAITERS: 991154941Sjhb db_printf("readers\n"); 992154941Sjhb break; 993154941Sjhb case RW_LOCK_WRITE_WAITERS: 994154941Sjhb db_printf("writers\n"); 995154941Sjhb break; 996154941Sjhb case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 997167492Sjhb db_printf("readers and writers\n"); 998154941Sjhb break; 999154941Sjhb default: 1000154941Sjhb db_printf("none\n"); 1001154941Sjhb break; 1002154941Sjhb } 1003154941Sjhb} 1004154941Sjhb 1005154941Sjhb#endif 1006