kern_rwlock.c revision 167801
1154941Sjhb/*- 2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3154941Sjhb * All rights reserved. 4154941Sjhb * 5154941Sjhb * Redistribution and use in source and binary forms, with or without 6154941Sjhb * modification, are permitted provided that the following conditions 7154941Sjhb * are met: 8154941Sjhb * 1. Redistributions of source code must retain the above copyright 9154941Sjhb * notice, this list of conditions and the following disclaimer. 10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright 11154941Sjhb * notice, this list of conditions and the following disclaimer in the 12154941Sjhb * documentation and/or other materials provided with the distribution. 13154941Sjhb * 3. Neither the name of the author nor the names of any co-contributors 14154941Sjhb * may be used to endorse or promote products derived from this software 15154941Sjhb * without specific prior written permission. 16154941Sjhb * 17154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20154941Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27154941Sjhb * SUCH DAMAGE. 28154941Sjhb */ 29154941Sjhb 30154941Sjhb/* 31154941Sjhb * Machine independent bits of reader/writer lock implementation. 32154941Sjhb */ 33154941Sjhb 34154941Sjhb#include <sys/cdefs.h> 35154941Sjhb__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 167801 2007-03-22 16:09:23Z jhb $"); 36154941Sjhb 37154941Sjhb#include "opt_ddb.h" 38167801Sjhb#include "opt_no_adaptive_rwlocks.h" 39154941Sjhb 40154941Sjhb#include <sys/param.h> 41154941Sjhb#include <sys/ktr.h> 42154941Sjhb#include <sys/lock.h> 43154941Sjhb#include <sys/mutex.h> 44154941Sjhb#include <sys/proc.h> 45154941Sjhb#include <sys/rwlock.h> 46154941Sjhb#include <sys/systm.h> 47154941Sjhb#include <sys/turnstile.h> 48164159Skmacy#include <sys/lock_profile.h> 49154941Sjhb#include <machine/cpu.h> 50154941Sjhb 51167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 52167801Sjhb#define ADAPTIVE_RWLOCKS 53167801Sjhb#endif 54167801Sjhb 55154941Sjhb#ifdef DDB 56154941Sjhb#include <ddb/ddb.h> 57154941Sjhb 58154941Sjhbstatic void db_show_rwlock(struct lock_object *lock); 59154941Sjhb#endif 60167368Sjhbstatic void lock_rw(struct lock_object *lock, int how); 61167368Sjhbstatic int unlock_rw(struct lock_object *lock); 62154941Sjhb 63154941Sjhbstruct lock_class lock_class_rw = { 64167365Sjhb .lc_name = "rw", 65167365Sjhb .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 66154941Sjhb#ifdef DDB 67167365Sjhb .lc_ddb_show = db_show_rwlock, 68154941Sjhb#endif 69167368Sjhb .lc_lock = lock_rw, 70167368Sjhb .lc_unlock = unlock_rw, 71154941Sjhb}; 72154941Sjhb 73157826Sjhb/* 74157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or 75157826Sjhb * NULL if the lock is unlocked or read-locked. 76157826Sjhb */ 77157826Sjhb#define rw_wowner(rw) \ 78154941Sjhb ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 79154941Sjhb (struct thread *)RW_OWNER((rw)->rw_lock)) 80154941Sjhb 81157826Sjhb/* 82157826Sjhb * Return a pointer to the owning thread for this lock who should receive 83157826Sjhb * any priority lent by threads that block on this lock. Currently this 84157826Sjhb * is identical to rw_wowner(). 85157826Sjhb */ 86157826Sjhb#define rw_owner(rw) rw_wowner(rw) 87157826Sjhb 88154941Sjhb#ifndef INVARIANTS 89154941Sjhb#define _rw_assert(rw, what, file, line) 90154941Sjhb#endif 91154941Sjhb 92154941Sjhbvoid 93167368Sjhblock_rw(struct lock_object *lock, int how) 94167368Sjhb{ 95167368Sjhb struct rwlock *rw; 96167368Sjhb 97167368Sjhb rw = (struct rwlock *)lock; 98167368Sjhb if (how) 99167368Sjhb rw_wlock(rw); 100167368Sjhb else 101167368Sjhb rw_rlock(rw); 102167368Sjhb} 103167368Sjhb 104167368Sjhbint 105167368Sjhbunlock_rw(struct lock_object *lock) 106167368Sjhb{ 107167368Sjhb struct rwlock *rw; 108167368Sjhb 109167368Sjhb rw = (struct rwlock *)lock; 110167368Sjhb rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 111167368Sjhb if (rw->rw_lock & RW_LOCK_READ) { 112167368Sjhb rw_runlock(rw); 113167368Sjhb return (0); 114167368Sjhb } else { 115167368Sjhb rw_wunlock(rw); 116167368Sjhb return (1); 117167368Sjhb } 118167368Sjhb} 119167368Sjhb 120167368Sjhbvoid 121154941Sjhbrw_init(struct rwlock *rw, const char *name) 122154941Sjhb{ 123154941Sjhb 124154941Sjhb rw->rw_lock = RW_UNLOCKED; 125154941Sjhb 126167787Sjhb lock_profile_object_init(&rw->lock_object, &lock_class_rw, name); 127167787Sjhb lock_init(&rw->lock_object, &lock_class_rw, name, NULL, LO_WITNESS | 128157882Sjhb LO_RECURSABLE | LO_UPGRADABLE); 129154941Sjhb} 130154941Sjhb 131154941Sjhbvoid 132154941Sjhbrw_destroy(struct rwlock *rw) 133154941Sjhb{ 134154941Sjhb 135154941Sjhb KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 136167787Sjhb lock_profile_object_destroy(&rw->lock_object); 137167787Sjhb lock_destroy(&rw->lock_object); 138154941Sjhb} 139154941Sjhb 140154941Sjhbvoid 141154941Sjhbrw_sysinit(void *arg) 142154941Sjhb{ 143154941Sjhb struct rw_args *args = arg; 144154941Sjhb 145154941Sjhb rw_init(args->ra_rw, args->ra_desc); 146154941Sjhb} 147154941Sjhb 148167024Srwatsonint 149167024Srwatsonrw_wowned(struct rwlock *rw) 150167024Srwatson{ 151167024Srwatson 152167024Srwatson return (rw_wowner(rw) == curthread); 153167024Srwatson} 154167024Srwatson 155154941Sjhbvoid 156154941Sjhb_rw_wlock(struct rwlock *rw, const char *file, int line) 157154941Sjhb{ 158154941Sjhb 159154941Sjhb MPASS(curthread != NULL); 160157826Sjhb KASSERT(rw_wowner(rw) != curthread, 161154941Sjhb ("%s (%s): wlock already held @ %s:%d", __func__, 162167787Sjhb rw->lock_object.lo_name, file, line)); 163167787Sjhb WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 164154941Sjhb line); 165154941Sjhb __rw_wlock(rw, curthread, file, line); 166167787Sjhb LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, 0, file, line); 167167787Sjhb WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 168160771Sjhb curthread->td_locks++; 169154941Sjhb} 170154941Sjhb 171154941Sjhbvoid 172154941Sjhb_rw_wunlock(struct rwlock *rw, const char *file, int line) 173154941Sjhb{ 174154941Sjhb 175154941Sjhb MPASS(curthread != NULL); 176154941Sjhb _rw_assert(rw, RA_WLOCKED, file, line); 177160771Sjhb curthread->td_locks--; 178167787Sjhb WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 179167787Sjhb LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line); 180167787Sjhb lock_profile_release_lock(&rw->lock_object); 181154941Sjhb __rw_wunlock(rw, curthread, file, line); 182154941Sjhb} 183154941Sjhb 184154941Sjhbvoid 185154941Sjhb_rw_rlock(struct rwlock *rw, const char *file, int line) 186154941Sjhb{ 187167801Sjhb#ifdef ADAPTIVE_RWLOCKS 188157846Sjhb volatile struct thread *owner; 189157851Swkoszek#endif 190167307Sjhb uint64_t waittime = 0; 191167054Skmacy int contested = 0; 192154941Sjhb uintptr_t x; 193154941Sjhb 194157826Sjhb KASSERT(rw_wowner(rw) != curthread, 195154941Sjhb ("%s (%s): wlock already held @ %s:%d", __func__, 196167787Sjhb rw->lock_object.lo_name, file, line)); 197167787Sjhb WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 198154941Sjhb 199154941Sjhb /* 200154941Sjhb * Note that we don't make any attempt to try to block read 201154941Sjhb * locks once a writer has blocked on the lock. The reason is 202154941Sjhb * that we currently allow for read locks to recurse and we 203154941Sjhb * don't keep track of all the holders of read locks. Thus, if 204154941Sjhb * we were to block readers once a writer blocked and a reader 205154941Sjhb * tried to recurse on their reader lock after a writer had 206154941Sjhb * blocked we would end up in a deadlock since the reader would 207154941Sjhb * be blocked on the writer, and the writer would be blocked 208154941Sjhb * waiting for the reader to release its original read lock. 209154941Sjhb */ 210154941Sjhb for (;;) { 211154941Sjhb /* 212154941Sjhb * Handle the easy case. If no other thread has a write 213154941Sjhb * lock, then try to bump up the count of read locks. Note 214154941Sjhb * that we have to preserve the current state of the 215154941Sjhb * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 216154941Sjhb * read lock, then rw_lock must have changed, so restart 217154941Sjhb * the loop. Note that this handles the case of a 218154941Sjhb * completely unlocked rwlock since such a lock is encoded 219154941Sjhb * as a read lock with no waiters. 220154941Sjhb */ 221154941Sjhb x = rw->rw_lock; 222154941Sjhb if (x & RW_LOCK_READ) { 223154941Sjhb 224154941Sjhb /* 225154941Sjhb * The RW_LOCK_READ_WAITERS flag should only be set 226154941Sjhb * if another thread currently holds a write lock, 227154941Sjhb * and in that case RW_LOCK_READ should be clear. 228154941Sjhb */ 229154941Sjhb MPASS((x & RW_LOCK_READ_WAITERS) == 0); 230154941Sjhb if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, 231154941Sjhb x + RW_ONE_READER)) { 232167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 233154941Sjhb CTR4(KTR_LOCK, 234154941Sjhb "%s: %p succeed %p -> %p", __func__, 235154941Sjhb rw, (void *)x, 236154941Sjhb (void *)(x + RW_ONE_READER)); 237167307Sjhb if (RW_READERS(x) == 0) 238167307Sjhb lock_profile_obtain_lock_success( 239167787Sjhb &rw->lock_object, contested, waittime, 240167307Sjhb file, line); 241154941Sjhb break; 242154941Sjhb } 243157846Sjhb cpu_spinwait(); 244154941Sjhb continue; 245154941Sjhb } 246167787Sjhb lock_profile_obtain_lock_failed(&rw->lock_object, &contested, 247167307Sjhb &waittime); 248154941Sjhb 249154941Sjhb /* 250154941Sjhb * Okay, now it's the hard case. Some other thread already 251154941Sjhb * has a write lock, so acquire the turnstile lock so we can 252154941Sjhb * begin the process of blocking. 253154941Sjhb */ 254167787Sjhb turnstile_lock(&rw->lock_object); 255154941Sjhb 256154941Sjhb /* 257154941Sjhb * The lock might have been released while we spun, so 258154941Sjhb * recheck its state and restart the loop if there is no 259154941Sjhb * longer a write lock. 260154941Sjhb */ 261154941Sjhb x = rw->rw_lock; 262154941Sjhb if (x & RW_LOCK_READ) { 263167787Sjhb turnstile_release(&rw->lock_object); 264157846Sjhb cpu_spinwait(); 265154941Sjhb continue; 266154941Sjhb } 267154941Sjhb 268154941Sjhb /* 269154941Sjhb * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS 270154941Sjhb * flag is already set, then we can go ahead and block. If 271154941Sjhb * it is not set then try to set it. If we fail to set it 272154941Sjhb * drop the turnstile lock and restart the loop. 273154941Sjhb */ 274157826Sjhb if (!(x & RW_LOCK_READ_WAITERS)) { 275157826Sjhb if (!atomic_cmpset_ptr(&rw->rw_lock, x, 276157826Sjhb x | RW_LOCK_READ_WAITERS)) { 277167787Sjhb turnstile_release(&rw->lock_object); 278157826Sjhb cpu_spinwait(); 279157826Sjhb continue; 280157826Sjhb } 281167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 282157826Sjhb CTR2(KTR_LOCK, "%s: %p set read waiters flag", 283157826Sjhb __func__, rw); 284154941Sjhb } 285154941Sjhb 286167801Sjhb#ifdef ADAPTIVE_RWLOCKS 287154941Sjhb /* 288157846Sjhb * If the owner is running on another CPU, spin until 289157846Sjhb * the owner stops running or the state of the lock 290157846Sjhb * changes. 291157846Sjhb */ 292157846Sjhb owner = (struct thread *)RW_OWNER(x); 293157846Sjhb if (TD_IS_RUNNING(owner)) { 294167787Sjhb turnstile_release(&rw->lock_object); 295167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 296157846Sjhb CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 297157846Sjhb __func__, rw, owner); 298157846Sjhb while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 299157846Sjhb TD_IS_RUNNING(owner)) 300157846Sjhb cpu_spinwait(); 301157846Sjhb continue; 302157846Sjhb } 303157846Sjhb#endif 304157846Sjhb 305157846Sjhb /* 306154941Sjhb * We were unable to acquire the lock and the read waiters 307154941Sjhb * flag is set, so we must block on the turnstile. 308154941Sjhb */ 309167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 310154941Sjhb CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 311154941Sjhb rw); 312167787Sjhb turnstile_wait(&rw->lock_object, rw_owner(rw), TS_SHARED_QUEUE); 313167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 314154941Sjhb CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 315154941Sjhb __func__, rw); 316154941Sjhb } 317154941Sjhb 318154941Sjhb /* 319154941Sjhb * TODO: acquire "owner of record" here. Here be turnstile dragons 320154941Sjhb * however. turnstiles don't like owners changing between calls to 321154941Sjhb * turnstile_wait() currently. 322154941Sjhb */ 323154941Sjhb 324167787Sjhb LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 325167787Sjhb WITNESS_LOCK(&rw->lock_object, 0, file, line); 326160771Sjhb curthread->td_locks++; 327154941Sjhb} 328154941Sjhb 329154941Sjhbvoid 330154941Sjhb_rw_runlock(struct rwlock *rw, const char *file, int line) 331154941Sjhb{ 332154941Sjhb struct turnstile *ts; 333154941Sjhb uintptr_t x; 334154941Sjhb 335154941Sjhb _rw_assert(rw, RA_RLOCKED, file, line); 336160771Sjhb curthread->td_locks--; 337167787Sjhb WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 338167787Sjhb LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 339154941Sjhb 340154941Sjhb /* TODO: drop "owner of record" here. */ 341154941Sjhb 342154941Sjhb for (;;) { 343154941Sjhb /* 344154941Sjhb * See if there is more than one read lock held. If so, 345154941Sjhb * just drop one and return. 346154941Sjhb */ 347154941Sjhb x = rw->rw_lock; 348154941Sjhb if (RW_READERS(x) > 1) { 349154941Sjhb if (atomic_cmpset_ptr(&rw->rw_lock, x, 350154941Sjhb x - RW_ONE_READER)) { 351167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 352154941Sjhb CTR4(KTR_LOCK, 353154941Sjhb "%s: %p succeeded %p -> %p", 354154941Sjhb __func__, rw, (void *)x, 355154941Sjhb (void *)(x - RW_ONE_READER)); 356154941Sjhb break; 357154941Sjhb } 358154941Sjhb continue; 359167307Sjhb } 360154941Sjhb 361164159Skmacy 362154941Sjhb /* 363154941Sjhb * We should never have read waiters while at least one 364154941Sjhb * thread holds a read lock. (See note above) 365154941Sjhb */ 366154941Sjhb KASSERT(!(x & RW_LOCK_READ_WAITERS), 367154941Sjhb ("%s: waiting readers", __func__)); 368154941Sjhb 369154941Sjhb /* 370154941Sjhb * If there aren't any waiters for a write lock, then try 371154941Sjhb * to drop it quickly. 372154941Sjhb */ 373154941Sjhb if (!(x & RW_LOCK_WRITE_WAITERS)) { 374154941Sjhb 375154941Sjhb /* 376154941Sjhb * There shouldn't be any flags set and we should 377154941Sjhb * be the only read lock. If we fail to release 378154941Sjhb * the single read lock, then another thread might 379154941Sjhb * have just acquired a read lock, so go back up 380154941Sjhb * to the multiple read locks case. 381154941Sjhb */ 382154941Sjhb MPASS(x == RW_READERS_LOCK(1)); 383154941Sjhb if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), 384154941Sjhb RW_UNLOCKED)) { 385167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 386154941Sjhb CTR2(KTR_LOCK, "%s: %p last succeeded", 387154941Sjhb __func__, rw); 388154941Sjhb break; 389154941Sjhb } 390154941Sjhb continue; 391154941Sjhb } 392154941Sjhb 393154941Sjhb /* 394154941Sjhb * There should just be one reader with one or more 395154941Sjhb * writers waiting. 396154941Sjhb */ 397154941Sjhb MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS)); 398154941Sjhb 399154941Sjhb /* 400154941Sjhb * Ok, we know we have a waiting writer and we think we 401154941Sjhb * are the last reader, so grab the turnstile lock. 402154941Sjhb */ 403167787Sjhb turnstile_lock(&rw->lock_object); 404154941Sjhb 405154941Sjhb /* 406154941Sjhb * Try to drop our lock leaving the lock in a unlocked 407154941Sjhb * state. 408154941Sjhb * 409154941Sjhb * If you wanted to do explicit lock handoff you'd have to 410154941Sjhb * do it here. You'd also want to use turnstile_signal() 411154941Sjhb * and you'd have to handle the race where a higher 412154941Sjhb * priority thread blocks on the write lock before the 413154941Sjhb * thread you wakeup actually runs and have the new thread 414154941Sjhb * "steal" the lock. For now it's a lot simpler to just 415154941Sjhb * wakeup all of the waiters. 416154941Sjhb * 417154941Sjhb * As above, if we fail, then another thread might have 418154941Sjhb * acquired a read lock, so drop the turnstile lock and 419154941Sjhb * restart. 420154941Sjhb */ 421154941Sjhb if (!atomic_cmpset_ptr(&rw->rw_lock, 422154941Sjhb RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) { 423167787Sjhb turnstile_release(&rw->lock_object); 424154941Sjhb continue; 425154941Sjhb } 426167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 427154941Sjhb CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 428154941Sjhb __func__, rw); 429154941Sjhb 430154941Sjhb /* 431154941Sjhb * Ok. The lock is released and all that's left is to 432154941Sjhb * wake up the waiters. Note that the lock might not be 433154941Sjhb * free anymore, but in that case the writers will just 434154941Sjhb * block again if they run before the new lock holder(s) 435154941Sjhb * release the lock. 436154941Sjhb */ 437167787Sjhb ts = turnstile_lookup(&rw->lock_object); 438157846Sjhb MPASS(ts != NULL); 439154941Sjhb turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 440154941Sjhb turnstile_unpend(ts, TS_SHARED_LOCK); 441154941Sjhb break; 442154941Sjhb } 443167787Sjhb lock_profile_release_lock(&rw->lock_object); 444154941Sjhb} 445154941Sjhb 446154941Sjhb/* 447154941Sjhb * This function is called when we are unable to obtain a write lock on the 448154941Sjhb * first try. This means that at least one other thread holds either a 449154941Sjhb * read or write lock. 450154941Sjhb */ 451154941Sjhbvoid 452154941Sjhb_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 453154941Sjhb{ 454167801Sjhb#ifdef ADAPTIVE_RWLOCKS 455157846Sjhb volatile struct thread *owner; 456157851Swkoszek#endif 457154941Sjhb uintptr_t v; 458154941Sjhb 459167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 460154941Sjhb CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 461167787Sjhb rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 462154941Sjhb 463154941Sjhb while (!_rw_write_lock(rw, tid)) { 464167787Sjhb turnstile_lock(&rw->lock_object); 465154941Sjhb v = rw->rw_lock; 466154941Sjhb 467154941Sjhb /* 468154941Sjhb * If the lock was released while spinning on the 469154941Sjhb * turnstile chain lock, try again. 470154941Sjhb */ 471154941Sjhb if (v == RW_UNLOCKED) { 472167787Sjhb turnstile_release(&rw->lock_object); 473154941Sjhb cpu_spinwait(); 474154941Sjhb continue; 475154941Sjhb } 476154941Sjhb 477154941Sjhb /* 478154941Sjhb * If the lock was released by a writer with both readers 479154941Sjhb * and writers waiting and a reader hasn't woken up and 480154941Sjhb * acquired the lock yet, rw_lock will be set to the 481154941Sjhb * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS. If we see 482154941Sjhb * that value, try to acquire it once. Note that we have 483154941Sjhb * to preserve the RW_LOCK_WRITE_WAITERS flag as there are 484154941Sjhb * other writers waiting still. If we fail, restart the 485154941Sjhb * loop. 486154941Sjhb */ 487154941Sjhb if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) { 488154941Sjhb if (atomic_cmpset_acq_ptr(&rw->rw_lock, 489154941Sjhb RW_UNLOCKED | RW_LOCK_WRITE_WAITERS, 490154941Sjhb tid | RW_LOCK_WRITE_WAITERS)) { 491167787Sjhb turnstile_claim(&rw->lock_object); 492154941Sjhb CTR2(KTR_LOCK, "%s: %p claimed by new writer", 493154941Sjhb __func__, rw); 494154941Sjhb break; 495154941Sjhb } 496167787Sjhb turnstile_release(&rw->lock_object); 497154941Sjhb cpu_spinwait(); 498154941Sjhb continue; 499154941Sjhb } 500154941Sjhb 501154941Sjhb /* 502154941Sjhb * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 503154941Sjhb * set it. If we fail to set it, then loop back and try 504154941Sjhb * again. 505154941Sjhb */ 506157826Sjhb if (!(v & RW_LOCK_WRITE_WAITERS)) { 507157826Sjhb if (!atomic_cmpset_ptr(&rw->rw_lock, v, 508157826Sjhb v | RW_LOCK_WRITE_WAITERS)) { 509167787Sjhb turnstile_release(&rw->lock_object); 510157826Sjhb cpu_spinwait(); 511157826Sjhb continue; 512157826Sjhb } 513167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 514157826Sjhb CTR2(KTR_LOCK, "%s: %p set write waiters flag", 515157826Sjhb __func__, rw); 516154941Sjhb } 517154941Sjhb 518167801Sjhb#ifdef ADAPTIVE_RWLOCKS 519157846Sjhb /* 520157846Sjhb * If the lock is write locked and the owner is 521157846Sjhb * running on another CPU, spin until the owner stops 522157846Sjhb * running or the state of the lock changes. 523157846Sjhb */ 524157846Sjhb owner = (struct thread *)RW_OWNER(v); 525157846Sjhb if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 526167787Sjhb turnstile_release(&rw->lock_object); 527167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 528157846Sjhb CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 529157846Sjhb __func__, rw, owner); 530157846Sjhb while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 531157846Sjhb TD_IS_RUNNING(owner)) 532157846Sjhb cpu_spinwait(); 533157846Sjhb continue; 534157846Sjhb } 535157846Sjhb#endif 536154941Sjhb 537154941Sjhb /* 538154941Sjhb * We were unable to acquire the lock and the write waiters 539154941Sjhb * flag is set, so we must block on the turnstile. 540154941Sjhb */ 541167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 542154941Sjhb CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 543154941Sjhb rw); 544167787Sjhb turnstile_wait(&rw->lock_object, rw_owner(rw), 545154941Sjhb TS_EXCLUSIVE_QUEUE); 546167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 547154941Sjhb CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 548154941Sjhb __func__, rw); 549154941Sjhb } 550154941Sjhb} 551154941Sjhb 552154941Sjhb/* 553154941Sjhb * This function is called if the first try at releasing a write lock failed. 554154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at 555154941Sjhb * least one thread is waiting on this lock. 556154941Sjhb */ 557154941Sjhbvoid 558154941Sjhb_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 559154941Sjhb{ 560154941Sjhb struct turnstile *ts; 561154941Sjhb uintptr_t v; 562154941Sjhb int queue; 563154941Sjhb 564154941Sjhb KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 565154941Sjhb ("%s: neither of the waiter flags are set", __func__)); 566154941Sjhb 567167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 568154941Sjhb CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 569154941Sjhb 570167787Sjhb turnstile_lock(&rw->lock_object); 571167787Sjhb ts = turnstile_lookup(&rw->lock_object); 572154941Sjhb 573167801Sjhb#ifdef ADAPTIVE_RWLOCKS 574157846Sjhb /* 575157846Sjhb * There might not be a turnstile for this lock if all of 576157846Sjhb * the waiters are adaptively spinning. In that case, just 577157846Sjhb * reset the lock to the unlocked state and return. 578157846Sjhb */ 579157846Sjhb if (ts == NULL) { 580157846Sjhb atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED); 581167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 582157846Sjhb CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw); 583167787Sjhb turnstile_release(&rw->lock_object); 584157846Sjhb return; 585157846Sjhb } 586157846Sjhb#else 587154941Sjhb MPASS(ts != NULL); 588157846Sjhb#endif 589154941Sjhb 590154941Sjhb /* 591154941Sjhb * Use the same algo as sx locks for now. Prefer waking up shared 592154941Sjhb * waiters if we have any over writers. This is probably not ideal. 593154941Sjhb * 594154941Sjhb * 'v' is the value we are going to write back to rw_lock. If we 595154941Sjhb * have waiters on both queues, we need to preserve the state of 596154941Sjhb * the waiter flag for the queue we don't wake up. For now this is 597154941Sjhb * hardcoded for the algorithm mentioned above. 598154941Sjhb * 599154941Sjhb * In the case of both readers and writers waiting we wakeup the 600154941Sjhb * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 601154941Sjhb * new writer comes in before a reader it will claim the lock up 602154941Sjhb * above. There is probably a potential priority inversion in 603154941Sjhb * there that could be worked around either by waking both queues 604154941Sjhb * of waiters or doing some complicated lock handoff gymnastics. 605157846Sjhb * 606167801Sjhb * Note that in the ADAPTIVE_RWLOCKS case, if both flags are 607167801Sjhb * set, there might not be any actual writers on the turnstile 608167801Sjhb * as they might all be spinning. In that case, we don't want 609167801Sjhb * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile 610167801Sjhb * is going to go away once we wakeup all the readers. 611154941Sjhb */ 612157846Sjhb v = RW_UNLOCKED; 613154941Sjhb if (rw->rw_lock & RW_LOCK_READ_WAITERS) { 614154941Sjhb queue = TS_SHARED_QUEUE; 615167801Sjhb#ifdef ADAPTIVE_RWLOCKS 616157846Sjhb if (rw->rw_lock & RW_LOCK_WRITE_WAITERS && 617157846Sjhb !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 618157846Sjhb v |= RW_LOCK_WRITE_WAITERS; 619157846Sjhb#else 620157846Sjhb v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS); 621157846Sjhb#endif 622157846Sjhb } else 623154941Sjhb queue = TS_EXCLUSIVE_QUEUE; 624157846Sjhb 625167801Sjhb#ifdef ADAPTIVE_RWLOCKS 626157846Sjhb /* 627157846Sjhb * We have to make sure that we actually have waiters to 628157846Sjhb * wakeup. If they are all spinning, then we just need to 629157846Sjhb * disown the turnstile and return. 630157846Sjhb */ 631157846Sjhb if (turnstile_empty(ts, queue)) { 632167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 633157846Sjhb CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw); 634157846Sjhb atomic_store_rel_ptr(&rw->rw_lock, v); 635157846Sjhb turnstile_disown(ts); 636157846Sjhb return; 637154941Sjhb } 638157846Sjhb#endif 639157846Sjhb 640157846Sjhb /* Wake up all waiters for the specific queue. */ 641167787Sjhb if (LOCK_LOG_TEST(&rw->lock_object, 0)) 642154941Sjhb CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 643154941Sjhb queue == TS_SHARED_QUEUE ? "read" : "write"); 644154941Sjhb turnstile_broadcast(ts, queue); 645154941Sjhb atomic_store_rel_ptr(&rw->rw_lock, v); 646154941Sjhb turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 647154941Sjhb} 648154941Sjhb 649157882Sjhb/* 650157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write 651157882Sjhb * lock. This will only succeed if this thread holds a single read 652157882Sjhb * lock. Returns true if the upgrade succeeded and false otherwise. 653157882Sjhb */ 654157882Sjhbint 655157882Sjhb_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 656157882Sjhb{ 657157882Sjhb uintptr_t v, tid; 658157882Sjhb int success; 659157882Sjhb 660157882Sjhb _rw_assert(rw, RA_RLOCKED, file, line); 661157882Sjhb 662157882Sjhb /* 663157882Sjhb * Attempt to switch from one reader to a writer. If there 664157882Sjhb * are any write waiters, then we will have to lock the 665157882Sjhb * turnstile first to prevent races with another writer 666157882Sjhb * calling turnstile_wait() before we have claimed this 667157882Sjhb * turnstile. So, do the simple case of no waiters first. 668157882Sjhb */ 669157882Sjhb tid = (uintptr_t)curthread; 670157882Sjhb if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) { 671157882Sjhb success = atomic_cmpset_acq_ptr(&rw->rw_lock, 672157882Sjhb RW_READERS_LOCK(1), tid); 673157882Sjhb goto out; 674157882Sjhb } 675157882Sjhb 676157882Sjhb /* 677157882Sjhb * Ok, we think we have write waiters, so lock the 678157882Sjhb * turnstile. 679157882Sjhb */ 680167787Sjhb turnstile_lock(&rw->lock_object); 681157882Sjhb 682157882Sjhb /* 683157882Sjhb * Try to switch from one reader to a writer again. This time 684157882Sjhb * we honor the current state of the RW_LOCK_WRITE_WAITERS 685157882Sjhb * flag. If we obtain the lock with the flag set, then claim 686167801Sjhb * ownership of the turnstile. In the ADAPTIVE_RWLOCKS case 687167801Sjhb * it is possible for there to not be an associated turnstile 688167801Sjhb * even though there are waiters if all of the waiters are 689167801Sjhb * spinning. 690157882Sjhb */ 691157882Sjhb v = rw->rw_lock & RW_LOCK_WRITE_WAITERS; 692157882Sjhb success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 693157882Sjhb tid | v); 694167801Sjhb#ifdef ADAPTIVE_RWLOCKS 695167787Sjhb if (success && v && turnstile_lookup(&rw->lock_object) != NULL) 696157882Sjhb#else 697157882Sjhb if (success && v) 698157882Sjhb#endif 699167787Sjhb turnstile_claim(&rw->lock_object); 700157882Sjhb else 701167787Sjhb turnstile_release(&rw->lock_object); 702157882Sjhbout: 703167787Sjhb LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 704157882Sjhb if (success) 705167787Sjhb WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 706157882Sjhb file, line); 707157882Sjhb return (success); 708157882Sjhb} 709157882Sjhb 710157882Sjhb/* 711157882Sjhb * Downgrade a write lock into a single read lock. 712157882Sjhb */ 713157882Sjhbvoid 714157882Sjhb_rw_downgrade(struct rwlock *rw, const char *file, int line) 715157882Sjhb{ 716157882Sjhb struct turnstile *ts; 717157882Sjhb uintptr_t tid, v; 718157882Sjhb 719157882Sjhb _rw_assert(rw, RA_WLOCKED, file, line); 720157882Sjhb 721167787Sjhb WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 722157882Sjhb 723157882Sjhb /* 724157882Sjhb * Convert from a writer to a single reader. First we handle 725157882Sjhb * the easy case with no waiters. If there are any waiters, we 726157882Sjhb * lock the turnstile, "disown" the lock, and awaken any read 727157882Sjhb * waiters. 728157882Sjhb */ 729157882Sjhb tid = (uintptr_t)curthread; 730157882Sjhb if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 731157882Sjhb goto out; 732157882Sjhb 733157882Sjhb /* 734157882Sjhb * Ok, we think we have waiters, so lock the turnstile so we can 735157882Sjhb * read the waiter flags without any races. 736157882Sjhb */ 737167787Sjhb turnstile_lock(&rw->lock_object); 738157882Sjhb v = rw->rw_lock; 739157882Sjhb MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)); 740157882Sjhb 741157882Sjhb /* 742157882Sjhb * Downgrade from a write lock while preserving 743157882Sjhb * RW_LOCK_WRITE_WAITERS and give up ownership of the 744157882Sjhb * turnstile. If there are any read waiters, wake them up. 745157882Sjhb * 746167801Sjhb * For ADAPTIVE_RWLOCKS, we have to allow for the fact that 747167801Sjhb * all of the read waiters might be spinning. In that case, 748167801Sjhb * act as if RW_LOCK_READ_WAITERS is not set. Also, only 749167801Sjhb * preserve the RW_LOCK_WRITE_WAITERS flag if at least one 750167801Sjhb * writer is blocked on the turnstile. 751157882Sjhb */ 752167787Sjhb ts = turnstile_lookup(&rw->lock_object); 753167801Sjhb#ifdef ADAPTIVE_RWLOCKS 754157882Sjhb if (ts == NULL) 755157882Sjhb v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS); 756157882Sjhb else if (v & RW_LOCK_READ_WAITERS && 757157882Sjhb turnstile_empty(ts, TS_SHARED_QUEUE)) 758157882Sjhb v &= ~RW_LOCK_READ_WAITERS; 759157882Sjhb else if (v & RW_LOCK_WRITE_WAITERS && 760157882Sjhb turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 761157882Sjhb v &= ~RW_LOCK_WRITE_WAITERS; 762157882Sjhb#else 763157882Sjhb MPASS(ts != NULL); 764157882Sjhb#endif 765157882Sjhb if (v & RW_LOCK_READ_WAITERS) 766157882Sjhb turnstile_broadcast(ts, TS_SHARED_QUEUE); 767157882Sjhb atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | 768157882Sjhb (v & RW_LOCK_WRITE_WAITERS)); 769157882Sjhb if (v & RW_LOCK_READ_WAITERS) 770157882Sjhb turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 771167801Sjhb#ifdef ADAPTIVE_RWLOCKS 772157882Sjhb else if (ts == NULL) 773167787Sjhb turnstile_release(&rw->lock_object); 774157882Sjhb#endif 775157882Sjhb else 776157882Sjhb turnstile_disown(ts); 777157882Sjhbout: 778167787Sjhb LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 779157882Sjhb} 780157882Sjhb 781154941Sjhb#ifdef INVARIANT_SUPPORT 782155162Sscottl#ifndef INVARIANTS 783154941Sjhb#undef _rw_assert 784154941Sjhb#endif 785154941Sjhb 786154941Sjhb/* 787154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least 788154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this* 789154941Sjhb * thread owns an rlock. 790154941Sjhb */ 791154941Sjhbvoid 792154941Sjhb_rw_assert(struct rwlock *rw, int what, const char *file, int line) 793154941Sjhb{ 794154941Sjhb 795154941Sjhb if (panicstr != NULL) 796154941Sjhb return; 797154941Sjhb switch (what) { 798154941Sjhb case RA_LOCKED: 799167368Sjhb case RA_LOCKED | LA_NOTRECURSED: 800154941Sjhb case RA_RLOCKED: 801154941Sjhb#ifdef WITNESS 802167787Sjhb witness_assert(&rw->lock_object, what, file, line); 803154941Sjhb#else 804154941Sjhb /* 805154941Sjhb * If some other thread has a write lock or we have one 806154941Sjhb * and are asserting a read lock, fail. Also, if no one 807154941Sjhb * has a lock at all, fail. 808154941Sjhb */ 809155061Sscottl if (rw->rw_lock == RW_UNLOCKED || 810155061Sscottl (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 811157826Sjhb rw_wowner(rw) != curthread))) 812154941Sjhb panic("Lock %s not %slocked @ %s:%d\n", 813167787Sjhb rw->lock_object.lo_name, (what == RA_RLOCKED) ? 814154941Sjhb "read " : "", file, line); 815154941Sjhb#endif 816154941Sjhb break; 817154941Sjhb case RA_WLOCKED: 818157826Sjhb if (rw_wowner(rw) != curthread) 819154941Sjhb panic("Lock %s not exclusively locked @ %s:%d\n", 820167787Sjhb rw->lock_object.lo_name, file, line); 821154941Sjhb break; 822154941Sjhb case RA_UNLOCKED: 823154941Sjhb#ifdef WITNESS 824167787Sjhb witness_assert(&rw->lock_object, what, file, line); 825154941Sjhb#else 826154941Sjhb /* 827154941Sjhb * If we hold a write lock fail. We can't reliably check 828154941Sjhb * to see if we hold a read lock or not. 829154941Sjhb */ 830157826Sjhb if (rw_wowner(rw) == curthread) 831154941Sjhb panic("Lock %s exclusively locked @ %s:%d\n", 832167787Sjhb rw->lock_object.lo_name, file, line); 833154941Sjhb#endif 834154941Sjhb break; 835154941Sjhb default: 836154941Sjhb panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 837154941Sjhb line); 838154941Sjhb } 839154941Sjhb} 840154941Sjhb#endif /* INVARIANT_SUPPORT */ 841154941Sjhb 842154941Sjhb#ifdef DDB 843154941Sjhbvoid 844154941Sjhbdb_show_rwlock(struct lock_object *lock) 845154941Sjhb{ 846154941Sjhb struct rwlock *rw; 847154941Sjhb struct thread *td; 848154941Sjhb 849154941Sjhb rw = (struct rwlock *)lock; 850154941Sjhb 851154941Sjhb db_printf(" state: "); 852154941Sjhb if (rw->rw_lock == RW_UNLOCKED) 853154941Sjhb db_printf("UNLOCKED\n"); 854154941Sjhb else if (rw->rw_lock & RW_LOCK_READ) 855167504Sjhb db_printf("RLOCK: %ju locks\n", 856167504Sjhb (uintmax_t)(RW_READERS(rw->rw_lock))); 857154941Sjhb else { 858157826Sjhb td = rw_wowner(rw); 859154941Sjhb db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 860154941Sjhb td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 861154941Sjhb } 862154941Sjhb db_printf(" waiters: "); 863154941Sjhb switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 864154941Sjhb case RW_LOCK_READ_WAITERS: 865154941Sjhb db_printf("readers\n"); 866154941Sjhb break; 867154941Sjhb case RW_LOCK_WRITE_WAITERS: 868154941Sjhb db_printf("writers\n"); 869154941Sjhb break; 870154941Sjhb case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 871167492Sjhb db_printf("readers and writers\n"); 872154941Sjhb break; 873154941Sjhb default: 874154941Sjhb db_printf("none\n"); 875154941Sjhb break; 876154941Sjhb } 877154941Sjhb} 878154941Sjhb 879154941Sjhb#endif 880