kern_rwlock.c revision 173733
1/*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * Machine independent bits of reader/writer lock implementation. 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 173733 2007-11-18 14:43:53Z attilio $"); 36 37#include "opt_ddb.h" 38#include "opt_no_adaptive_rwlocks.h" 39 40#include <sys/param.h> 41#include <sys/ktr.h> 42#include <sys/lock.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/rwlock.h> 46#include <sys/systm.h> 47#include <sys/turnstile.h> 48 49#include <machine/cpu.h> 50 51CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); 52 53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 54#define ADAPTIVE_RWLOCKS 55#endif 56 57#ifdef DDB 58#include <ddb/ddb.h> 59 60static void db_show_rwlock(struct lock_object *lock); 61#endif 62static void assert_rw(struct lock_object *lock, int what); 63static void lock_rw(struct lock_object *lock, int how); 64static int unlock_rw(struct lock_object *lock); 65 66struct lock_class lock_class_rw = { 67 .lc_name = "rw", 68 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 69 .lc_assert = assert_rw, 70#ifdef DDB 71 .lc_ddb_show = db_show_rwlock, 72#endif 73 .lc_lock = lock_rw, 74 .lc_unlock = unlock_rw, 75}; 76 77/* 78 * Return a pointer to the owning thread if the lock is write-locked or 79 * NULL if the lock is unlocked or read-locked. 80 */ 81#define rw_wowner(rw) \ 82 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 83 (struct thread *)RW_OWNER((rw)->rw_lock)) 84 85/* 86 * Returns if a write owner is recursed. Write ownership is not assured 87 * here and should be previously checked. 88 */ 89#define rw_recursed(rw) ((rw)->rw_recurse != 0) 90 91/* 92 * Return true if curthread helds the lock. 93 */ 94#define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 95 96/* 97 * Return a pointer to the owning thread for this lock who should receive 98 * any priority lent by threads that block on this lock. Currently this 99 * is identical to rw_wowner(). 100 */ 101#define rw_owner(rw) rw_wowner(rw) 102 103#ifndef INVARIANTS 104#define _rw_assert(rw, what, file, line) 105#endif 106 107void 108assert_rw(struct lock_object *lock, int what) 109{ 110 111 rw_assert((struct rwlock *)lock, what); 112} 113 114void 115lock_rw(struct lock_object *lock, int how) 116{ 117 struct rwlock *rw; 118 119 rw = (struct rwlock *)lock; 120 if (how) 121 rw_wlock(rw); 122 else 123 rw_rlock(rw); 124} 125 126int 127unlock_rw(struct lock_object *lock) 128{ 129 struct rwlock *rw; 130 131 rw = (struct rwlock *)lock; 132 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 133 if (rw->rw_lock & RW_LOCK_READ) { 134 rw_runlock(rw); 135 return (0); 136 } else { 137 rw_wunlock(rw); 138 return (1); 139 } 140} 141 142void 143rw_init_flags(struct rwlock *rw, const char *name, int opts) 144{ 145 int flags; 146 147 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 148 RW_RECURSE)) == 0); 149 150 flags = LO_UPGRADABLE | LO_RECURSABLE; 151 if (opts & RW_DUPOK) 152 flags |= LO_DUPOK; 153 if (opts & RW_NOPROFILE) 154 flags |= LO_NOPROFILE; 155 if (!(opts & RW_NOWITNESS)) 156 flags |= LO_WITNESS; 157 if (opts & RW_QUIET) 158 flags |= LO_QUIET; 159 flags |= opts & RW_RECURSE; 160 161 rw->rw_lock = RW_UNLOCKED; 162 rw->rw_recurse = 0; 163 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 164} 165 166void 167rw_destroy(struct rwlock *rw) 168{ 169 170 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 171 KASSERT(rw->rw_recurse == 0, ("rw lock still recursed")); 172 rw->rw_lock = RW_DESTROYED; 173 lock_destroy(&rw->lock_object); 174} 175 176void 177rw_sysinit(void *arg) 178{ 179 struct rw_args *args = arg; 180 181 rw_init(args->ra_rw, args->ra_desc); 182} 183 184int 185rw_wowned(struct rwlock *rw) 186{ 187 188 return (rw_wowner(rw) == curthread); 189} 190 191void 192_rw_wlock(struct rwlock *rw, const char *file, int line) 193{ 194 195 MPASS(curthread != NULL); 196 KASSERT(rw->rw_lock != RW_DESTROYED, 197 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 198 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 199 line); 200 __rw_wlock(rw, curthread, file, line); 201 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 202 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 203 curthread->td_locks++; 204} 205 206void 207_rw_wunlock(struct rwlock *rw, const char *file, int line) 208{ 209 210 MPASS(curthread != NULL); 211 KASSERT(rw->rw_lock != RW_DESTROYED, 212 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 213 _rw_assert(rw, RA_WLOCKED, file, line); 214 curthread->td_locks--; 215 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 216 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 217 line); 218 if (!rw_recursed(rw)) 219 lock_profile_release_lock(&rw->lock_object); 220 __rw_wunlock(rw, curthread, file, line); 221} 222 223void 224_rw_rlock(struct rwlock *rw, const char *file, int line) 225{ 226 struct turnstile *ts; 227#ifdef ADAPTIVE_RWLOCKS 228 volatile struct thread *owner; 229#endif 230#ifdef LOCK_PROFILING_SHARED 231 uint64_t waittime = 0; 232 int contested = 0; 233#endif 234 uintptr_t x; 235 236 KASSERT(rw->rw_lock != RW_DESTROYED, 237 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 238 KASSERT(rw_wowner(rw) != curthread, 239 ("%s (%s): wlock already held @ %s:%d", __func__, 240 rw->lock_object.lo_name, file, line)); 241 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 242 243 /* 244 * Note that we don't make any attempt to try to block read 245 * locks once a writer has blocked on the lock. The reason is 246 * that we currently allow for read locks to recurse and we 247 * don't keep track of all the holders of read locks. Thus, if 248 * we were to block readers once a writer blocked and a reader 249 * tried to recurse on their reader lock after a writer had 250 * blocked we would end up in a deadlock since the reader would 251 * be blocked on the writer, and the writer would be blocked 252 * waiting for the reader to release its original read lock. 253 */ 254 for (;;) { 255 /* 256 * Handle the easy case. If no other thread has a write 257 * lock, then try to bump up the count of read locks. Note 258 * that we have to preserve the current state of the 259 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 260 * read lock, then rw_lock must have changed, so restart 261 * the loop. Note that this handles the case of a 262 * completely unlocked rwlock since such a lock is encoded 263 * as a read lock with no waiters. 264 */ 265 x = rw->rw_lock; 266 if (x & RW_LOCK_READ) { 267 268 /* 269 * The RW_LOCK_READ_WAITERS flag should only be set 270 * if another thread currently holds a write lock, 271 * and in that case RW_LOCK_READ should be clear. 272 */ 273 MPASS((x & RW_LOCK_READ_WAITERS) == 0); 274 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, 275 x + RW_ONE_READER)) { 276#ifdef LOCK_PROFILING_SHARED 277 if (RW_READERS(x) == 0) 278 lock_profile_obtain_lock_success( 279 &rw->lock_object, contested, 280 waittime, file, line); 281#endif 282 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 283 CTR4(KTR_LOCK, 284 "%s: %p succeed %p -> %p", __func__, 285 rw, (void *)x, 286 (void *)(x + RW_ONE_READER)); 287 break; 288 } 289 cpu_spinwait(); 290 continue; 291 } 292 293 /* 294 * Okay, now it's the hard case. Some other thread already 295 * has a write lock, so acquire the turnstile lock so we can 296 * begin the process of blocking. 297 */ 298 ts = turnstile_trywait(&rw->lock_object); 299 300 /* 301 * The lock might have been released while we spun, so 302 * recheck its state and restart the loop if there is no 303 * longer a write lock. 304 */ 305 x = rw->rw_lock; 306 if (x & RW_LOCK_READ) { 307 turnstile_cancel(ts); 308 cpu_spinwait(); 309 continue; 310 } 311 312 /* 313 * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS 314 * flag is already set, then we can go ahead and block. If 315 * it is not set then try to set it. If we fail to set it 316 * drop the turnstile lock and restart the loop. 317 */ 318 if (!(x & RW_LOCK_READ_WAITERS)) { 319 if (!atomic_cmpset_ptr(&rw->rw_lock, x, 320 x | RW_LOCK_READ_WAITERS)) { 321 turnstile_cancel(ts); 322 cpu_spinwait(); 323 continue; 324 } 325 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 326 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 327 __func__, rw); 328 } 329 330#ifdef ADAPTIVE_RWLOCKS 331 /* 332 * If the owner is running on another CPU, spin until 333 * the owner stops running or the state of the lock 334 * changes. 335 */ 336 owner = (struct thread *)RW_OWNER(x); 337 if (TD_IS_RUNNING(owner)) { 338 turnstile_cancel(ts); 339 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 340 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 341 __func__, rw, owner); 342#ifdef LOCK_PROFILING_SHARED 343 lock_profile_obtain_lock_failed(&rw->lock_object, 344 &contested, &waittime); 345#endif 346 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 347 TD_IS_RUNNING(owner)) 348 cpu_spinwait(); 349 continue; 350 } 351#endif 352 353 /* 354 * We were unable to acquire the lock and the read waiters 355 * flag is set, so we must block on the turnstile. 356 */ 357 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 358 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 359 rw); 360#ifdef LOCK_PROFILING_SHARED 361 lock_profile_obtain_lock_failed(&rw->lock_object, &contested, 362 &waittime); 363#endif 364 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 365 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 366 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 367 __func__, rw); 368 } 369 370 /* 371 * TODO: acquire "owner of record" here. Here be turnstile dragons 372 * however. turnstiles don't like owners changing between calls to 373 * turnstile_wait() currently. 374 */ 375 376 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 377 WITNESS_LOCK(&rw->lock_object, 0, file, line); 378 curthread->td_locks++; 379} 380 381void 382_rw_runlock(struct rwlock *rw, const char *file, int line) 383{ 384 struct turnstile *ts; 385 uintptr_t x; 386 387 KASSERT(rw->rw_lock != RW_DESTROYED, 388 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 389 _rw_assert(rw, RA_RLOCKED, file, line); 390 curthread->td_locks--; 391 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 392 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 393 394 /* TODO: drop "owner of record" here. */ 395 396 for (;;) { 397 /* 398 * See if there is more than one read lock held. If so, 399 * just drop one and return. 400 */ 401 x = rw->rw_lock; 402 if (RW_READERS(x) > 1) { 403 if (atomic_cmpset_ptr(&rw->rw_lock, x, 404 x - RW_ONE_READER)) { 405 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 406 CTR4(KTR_LOCK, 407 "%s: %p succeeded %p -> %p", 408 __func__, rw, (void *)x, 409 (void *)(x - RW_ONE_READER)); 410 break; 411 } 412 continue; 413 } 414 415 416 /* 417 * We should never have read waiters while at least one 418 * thread holds a read lock. (See note above) 419 */ 420 KASSERT(!(x & RW_LOCK_READ_WAITERS), 421 ("%s: waiting readers", __func__)); 422#ifdef LOCK_PROFILING_SHARED 423 lock_profile_release_lock(&rw->lock_object); 424#endif 425 426 /* 427 * If there aren't any waiters for a write lock, then try 428 * to drop it quickly. 429 */ 430 if (!(x & RW_LOCK_WRITE_WAITERS)) { 431 432 /* 433 * There shouldn't be any flags set and we should 434 * be the only read lock. If we fail to release 435 * the single read lock, then another thread might 436 * have just acquired a read lock, so go back up 437 * to the multiple read locks case. 438 */ 439 MPASS(x == RW_READERS_LOCK(1)); 440 if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), 441 RW_UNLOCKED)) { 442 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 443 CTR2(KTR_LOCK, "%s: %p last succeeded", 444 __func__, rw); 445 break; 446 } 447 continue; 448 } 449 450 /* 451 * There should just be one reader with one or more 452 * writers waiting. 453 */ 454 MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS)); 455 456 /* 457 * Ok, we know we have a waiting writer and we think we 458 * are the last reader, so grab the turnstile lock. 459 */ 460 turnstile_chain_lock(&rw->lock_object); 461 462 /* 463 * Try to drop our lock leaving the lock in a unlocked 464 * state. 465 * 466 * If you wanted to do explicit lock handoff you'd have to 467 * do it here. You'd also want to use turnstile_signal() 468 * and you'd have to handle the race where a higher 469 * priority thread blocks on the write lock before the 470 * thread you wakeup actually runs and have the new thread 471 * "steal" the lock. For now it's a lot simpler to just 472 * wakeup all of the waiters. 473 * 474 * As above, if we fail, then another thread might have 475 * acquired a read lock, so drop the turnstile lock and 476 * restart. 477 */ 478 if (!atomic_cmpset_ptr(&rw->rw_lock, 479 RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) { 480 turnstile_chain_unlock(&rw->lock_object); 481 continue; 482 } 483 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 484 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 485 __func__, rw); 486 487 /* 488 * Ok. The lock is released and all that's left is to 489 * wake up the waiters. Note that the lock might not be 490 * free anymore, but in that case the writers will just 491 * block again if they run before the new lock holder(s) 492 * release the lock. 493 */ 494 ts = turnstile_lookup(&rw->lock_object); 495 MPASS(ts != NULL); 496 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 497 turnstile_unpend(ts, TS_SHARED_LOCK); 498 turnstile_chain_unlock(&rw->lock_object); 499 break; 500 } 501} 502 503/* 504 * This function is called when we are unable to obtain a write lock on the 505 * first try. This means that at least one other thread holds either a 506 * read or write lock. 507 */ 508void 509_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 510{ 511 struct turnstile *ts; 512#ifdef ADAPTIVE_RWLOCKS 513 volatile struct thread *owner; 514#endif 515 uint64_t waittime = 0; 516 uintptr_t v; 517 int contested = 0; 518 519 if (rw_wlocked(rw)) { 520 KASSERT(rw->lock_object.lo_flags & RW_RECURSE, 521 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 522 __func__, rw->lock_object.lo_name, file, line)); 523 rw->rw_recurse++; 524 atomic_set_ptr(&rw->rw_lock, RW_LOCK_RECURSED); 525 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 526 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 527 return; 528 } 529 530 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 531 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 532 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 533 534 while (!_rw_write_lock(rw, tid)) { 535 ts = turnstile_trywait(&rw->lock_object); 536 v = rw->rw_lock; 537 538 /* 539 * If the lock was released while spinning on the 540 * turnstile chain lock, try again. 541 */ 542 if (v == RW_UNLOCKED) { 543 turnstile_cancel(ts); 544 cpu_spinwait(); 545 continue; 546 } 547 548 /* 549 * If the lock was released by a writer with both readers 550 * and writers waiting and a reader hasn't woken up and 551 * acquired the lock yet, rw_lock will be set to the 552 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS. If we see 553 * that value, try to acquire it once. Note that we have 554 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are 555 * other writers waiting still. If we fail, restart the 556 * loop. 557 */ 558 if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) { 559 if (atomic_cmpset_acq_ptr(&rw->rw_lock, 560 RW_UNLOCKED | RW_LOCK_WRITE_WAITERS, 561 tid | RW_LOCK_WRITE_WAITERS)) { 562 turnstile_claim(ts); 563 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 564 __func__, rw); 565 break; 566 } 567 turnstile_cancel(ts); 568 cpu_spinwait(); 569 continue; 570 } 571 572 /* 573 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 574 * set it. If we fail to set it, then loop back and try 575 * again. 576 */ 577 if (!(v & RW_LOCK_WRITE_WAITERS)) { 578 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 579 v | RW_LOCK_WRITE_WAITERS)) { 580 turnstile_cancel(ts); 581 cpu_spinwait(); 582 continue; 583 } 584 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 585 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 586 __func__, rw); 587 } 588 589#ifdef ADAPTIVE_RWLOCKS 590 /* 591 * If the lock is write locked and the owner is 592 * running on another CPU, spin until the owner stops 593 * running or the state of the lock changes. 594 */ 595 owner = (struct thread *)RW_OWNER(v); 596 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 597 turnstile_cancel(ts); 598 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 599 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 600 __func__, rw, owner); 601 lock_profile_obtain_lock_failed(&rw->lock_object, 602 &contested, &waittime); 603 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 604 TD_IS_RUNNING(owner)) 605 cpu_spinwait(); 606 continue; 607 } 608#endif 609 610 /* 611 * We were unable to acquire the lock and the write waiters 612 * flag is set, so we must block on the turnstile. 613 */ 614 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 615 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 616 rw); 617 lock_profile_obtain_lock_failed(&rw->lock_object, &contested, 618 &waittime); 619 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 620 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 621 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 622 __func__, rw); 623 } 624 lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime, 625 file, line); 626} 627 628/* 629 * This function is called if the first try at releasing a write lock failed. 630 * This means that one of the 2 waiter bits must be set indicating that at 631 * least one thread is waiting on this lock. 632 */ 633void 634_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 635{ 636 struct turnstile *ts; 637 uintptr_t v; 638 int queue; 639 640 if (rw_wlocked(rw) && rw_recursed(rw)) { 641 if ((--rw->rw_recurse) == 0) 642 atomic_clear_ptr(&rw->rw_lock, RW_LOCK_RECURSED); 643 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 644 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 645 return; 646 } 647 648 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 649 ("%s: neither of the waiter flags are set", __func__)); 650 651 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 652 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 653 654 turnstile_chain_lock(&rw->lock_object); 655 ts = turnstile_lookup(&rw->lock_object); 656 657#ifdef ADAPTIVE_RWLOCKS 658 /* 659 * There might not be a turnstile for this lock if all of 660 * the waiters are adaptively spinning. In that case, just 661 * reset the lock to the unlocked state and return. 662 */ 663 if (ts == NULL) { 664 atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED); 665 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 666 CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw); 667 turnstile_chain_unlock(&rw->lock_object); 668 return; 669 } 670#else 671 MPASS(ts != NULL); 672#endif 673 674 /* 675 * Use the same algo as sx locks for now. Prefer waking up shared 676 * waiters if we have any over writers. This is probably not ideal. 677 * 678 * 'v' is the value we are going to write back to rw_lock. If we 679 * have waiters on both queues, we need to preserve the state of 680 * the waiter flag for the queue we don't wake up. For now this is 681 * hardcoded for the algorithm mentioned above. 682 * 683 * In the case of both readers and writers waiting we wakeup the 684 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 685 * new writer comes in before a reader it will claim the lock up 686 * above. There is probably a potential priority inversion in 687 * there that could be worked around either by waking both queues 688 * of waiters or doing some complicated lock handoff gymnastics. 689 * 690 * Note that in the ADAPTIVE_RWLOCKS case, if both flags are 691 * set, there might not be any actual writers on the turnstile 692 * as they might all be spinning. In that case, we don't want 693 * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile 694 * is going to go away once we wakeup all the readers. 695 */ 696 v = RW_UNLOCKED; 697 if (rw->rw_lock & RW_LOCK_READ_WAITERS) { 698 queue = TS_SHARED_QUEUE; 699#ifdef ADAPTIVE_RWLOCKS 700 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS && 701 !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 702 v |= RW_LOCK_WRITE_WAITERS; 703#else 704 v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS); 705#endif 706 } else 707 queue = TS_EXCLUSIVE_QUEUE; 708 709#ifdef ADAPTIVE_RWLOCKS 710 /* 711 * We have to make sure that we actually have waiters to 712 * wakeup. If they are all spinning, then we just need to 713 * disown the turnstile and return. 714 */ 715 if (turnstile_empty(ts, queue)) { 716 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 717 CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw); 718 atomic_store_rel_ptr(&rw->rw_lock, v); 719 turnstile_disown(ts); 720 turnstile_chain_unlock(&rw->lock_object); 721 return; 722 } 723#endif 724 725 /* Wake up all waiters for the specific queue. */ 726 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 727 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 728 queue == TS_SHARED_QUEUE ? "read" : "write"); 729 turnstile_broadcast(ts, queue); 730 atomic_store_rel_ptr(&rw->rw_lock, v); 731 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 732 turnstile_chain_unlock(&rw->lock_object); 733} 734 735/* 736 * Attempt to do a non-blocking upgrade from a read lock to a write 737 * lock. This will only succeed if this thread holds a single read 738 * lock. Returns true if the upgrade succeeded and false otherwise. 739 */ 740int 741_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 742{ 743 uintptr_t v, tid; 744 struct turnstile *ts; 745 int success; 746 747 KASSERT(rw->rw_lock != RW_DESTROYED, 748 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 749 _rw_assert(rw, RA_RLOCKED, file, line); 750 751 /* 752 * Attempt to switch from one reader to a writer. If there 753 * are any write waiters, then we will have to lock the 754 * turnstile first to prevent races with another writer 755 * calling turnstile_wait() before we have claimed this 756 * turnstile. So, do the simple case of no waiters first. 757 */ 758 tid = (uintptr_t)curthread; 759 if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) { 760 success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), 761 tid); 762 goto out; 763 } 764 765 /* 766 * Ok, we think we have write waiters, so lock the 767 * turnstile. 768 */ 769 ts = turnstile_trywait(&rw->lock_object); 770 771 /* 772 * Try to switch from one reader to a writer again. This time 773 * we honor the current state of the RW_LOCK_WRITE_WAITERS 774 * flag. If we obtain the lock with the flag set, then claim 775 * ownership of the turnstile. In the ADAPTIVE_RWLOCKS case 776 * it is possible for there to not be an associated turnstile 777 * even though there are waiters if all of the waiters are 778 * spinning. 779 */ 780 v = rw->rw_lock & RW_LOCK_WRITE_WAITERS; 781 success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 782 tid | v); 783#ifdef ADAPTIVE_RWLOCKS 784 if (success && v && turnstile_lookup(&rw->lock_object) != NULL) 785#else 786 if (success && v) 787#endif 788 turnstile_claim(ts); 789 else 790 turnstile_cancel(ts); 791out: 792 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 793 if (success) 794 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 795 file, line); 796 return (success); 797} 798 799/* 800 * Downgrade a write lock into a single read lock. 801 */ 802void 803_rw_downgrade(struct rwlock *rw, const char *file, int line) 804{ 805 struct turnstile *ts; 806 uintptr_t tid, v; 807 808 KASSERT(rw->rw_lock != RW_DESTROYED, 809 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 810 _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line); 811#ifndef INVARIANTS 812 if (rw_recursed(rw)) 813 panic("downgrade of a recursed lock"); 814#endif 815 816 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 817 818 /* 819 * Convert from a writer to a single reader. First we handle 820 * the easy case with no waiters. If there are any waiters, we 821 * lock the turnstile, "disown" the lock, and awaken any read 822 * waiters. 823 */ 824 tid = (uintptr_t)curthread; 825 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 826 goto out; 827 828 /* 829 * Ok, we think we have waiters, so lock the turnstile so we can 830 * read the waiter flags without any races. 831 */ 832 turnstile_chain_lock(&rw->lock_object); 833 v = rw->rw_lock; 834 MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)); 835 836 /* 837 * Downgrade from a write lock while preserving 838 * RW_LOCK_WRITE_WAITERS and give up ownership of the 839 * turnstile. If there are any read waiters, wake them up. 840 * 841 * For ADAPTIVE_RWLOCKS, we have to allow for the fact that 842 * all of the read waiters might be spinning. In that case, 843 * act as if RW_LOCK_READ_WAITERS is not set. Also, only 844 * preserve the RW_LOCK_WRITE_WAITERS flag if at least one 845 * writer is blocked on the turnstile. 846 */ 847 ts = turnstile_lookup(&rw->lock_object); 848#ifdef ADAPTIVE_RWLOCKS 849 if (ts == NULL) 850 v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS); 851 else if (v & RW_LOCK_READ_WAITERS && 852 turnstile_empty(ts, TS_SHARED_QUEUE)) 853 v &= ~RW_LOCK_READ_WAITERS; 854 else if (v & RW_LOCK_WRITE_WAITERS && 855 turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 856 v &= ~RW_LOCK_WRITE_WAITERS; 857#else 858 MPASS(ts != NULL); 859#endif 860 if (v & RW_LOCK_READ_WAITERS) 861 turnstile_broadcast(ts, TS_SHARED_QUEUE); 862 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | 863 (v & RW_LOCK_WRITE_WAITERS)); 864 if (v & RW_LOCK_READ_WAITERS) 865 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 866 else if (ts) 867 turnstile_disown(ts); 868 turnstile_chain_unlock(&rw->lock_object); 869out: 870 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 871} 872 873#ifdef INVARIANT_SUPPORT 874#ifndef INVARIANTS 875#undef _rw_assert 876#endif 877 878/* 879 * In the non-WITNESS case, rw_assert() can only detect that at least 880 * *some* thread owns an rlock, but it cannot guarantee that *this* 881 * thread owns an rlock. 882 */ 883void 884_rw_assert(struct rwlock *rw, int what, const char *file, int line) 885{ 886 887 if (panicstr != NULL) 888 return; 889 switch (what) { 890 case RA_LOCKED: 891 case RA_LOCKED | RA_RECURSED: 892 case RA_LOCKED | RA_NOTRECURSED: 893 case RA_RLOCKED: 894#ifdef WITNESS 895 witness_assert(&rw->lock_object, what, file, line); 896#else 897 /* 898 * If some other thread has a write lock or we have one 899 * and are asserting a read lock, fail. Also, if no one 900 * has a lock at all, fail. 901 */ 902 if (rw->rw_lock == RW_UNLOCKED || 903 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 904 rw_wowner(rw) != curthread))) 905 panic("Lock %s not %slocked @ %s:%d\n", 906 rw->lock_object.lo_name, (what == RA_RLOCKED) ? 907 "read " : "", file, line); 908 909 if (!(rw->rw_lock & RW_LOCK_READ)) { 910 if (rw_recursed(rw)) { 911 if (what & RA_NOTRECURSED) 912 panic("Lock %s recursed @ %s:%d\n", 913 rw->lock_object.lo_name, file, 914 line); 915 } else if (what & RA_RECURSED) 916 panic("Lock %s not recursed @ %s:%d\n", 917 rw->lock_object.lo_name, file, line); 918 } 919#endif 920 break; 921 case RA_WLOCKED: 922 case RA_WLOCKED | RA_RECURSED: 923 case RA_WLOCKED | RA_NOTRECURSED: 924 if (rw_wowner(rw) != curthread) 925 panic("Lock %s not exclusively locked @ %s:%d\n", 926 rw->lock_object.lo_name, file, line); 927 if (rw_recursed(rw)) { 928 if (what & RA_NOTRECURSED) 929 panic("Lock %s recursed @ %s:%d\n", 930 rw->lock_object.lo_name, file, line); 931 } else if (what & RA_RECURSED) 932 panic("Lock %s not recursed @ %s:%d\n", 933 rw->lock_object.lo_name, file, line); 934 break; 935 case RA_UNLOCKED: 936#ifdef WITNESS 937 witness_assert(&rw->lock_object, what, file, line); 938#else 939 /* 940 * If we hold a write lock fail. We can't reliably check 941 * to see if we hold a read lock or not. 942 */ 943 if (rw_wowner(rw) == curthread) 944 panic("Lock %s exclusively locked @ %s:%d\n", 945 rw->lock_object.lo_name, file, line); 946#endif 947 break; 948 default: 949 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 950 line); 951 } 952} 953#endif /* INVARIANT_SUPPORT */ 954 955#ifdef DDB 956void 957db_show_rwlock(struct lock_object *lock) 958{ 959 struct rwlock *rw; 960 struct thread *td; 961 962 rw = (struct rwlock *)lock; 963 964 db_printf(" state: "); 965 if (rw->rw_lock == RW_UNLOCKED) 966 db_printf("UNLOCKED\n"); 967 else if (rw->rw_lock == RW_DESTROYED) { 968 db_printf("DESTROYED\n"); 969 return; 970 } else if (rw->rw_lock & RW_LOCK_READ) 971 db_printf("RLOCK: %ju locks\n", 972 (uintmax_t)(RW_READERS(rw->rw_lock))); 973 else { 974 td = rw_wowner(rw); 975 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 976 td->td_tid, td->td_proc->p_pid, td->td_name); 977 if (rw_recursed(rw)) 978 db_printf(" recursed: %u\n", rw->rw_recurse); 979 } 980 db_printf(" waiters: "); 981 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 982 case RW_LOCK_READ_WAITERS: 983 db_printf("readers\n"); 984 break; 985 case RW_LOCK_WRITE_WAITERS: 986 db_printf("writers\n"); 987 break; 988 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 989 db_printf("readers and writers\n"); 990 break; 991 default: 992 db_printf("none\n"); 993 break; 994 } 995} 996 997#endif 998