kern_rwlock.c revision 278694
1/*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27/* 28 * Machine independent bits of reader/writer lock implementation. 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: stable/10/sys/kern/kern_rwlock.c 278694 2015-02-13 19:06:22Z sbruno $"); 33 34#include "opt_ddb.h" 35#include "opt_hwpmc_hooks.h" 36#include "opt_kdtrace.h" 37#include "opt_no_adaptive_rwlocks.h" 38 39#include <sys/param.h> 40#include <sys/kdb.h> 41#include <sys/ktr.h> 42#include <sys/kernel.h> 43#include <sys/lock.h> 44#include <sys/mutex.h> 45#include <sys/proc.h> 46#include <sys/rwlock.h> 47#include <sys/sched.h> 48#include <sys/sysctl.h> 49#include <sys/systm.h> 50#include <sys/turnstile.h> 51 52#include <machine/cpu.h> 53 54#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 55#define ADAPTIVE_RWLOCKS 56#endif 57 58#ifdef HWPMC_HOOKS 59#include <sys/pmckern.h> 60PMC_SOFT_DECLARE( , , lock, failed); 61#endif 62 63/* 64 * Return the rwlock address when the lock cookie address is provided. 65 * This functionality assumes that struct rwlock* have a member named rw_lock. 66 */ 67#define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) 68 69#ifdef ADAPTIVE_RWLOCKS 70static int rowner_retries = 10; 71static int rowner_loops = 10000; 72static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, 73 "rwlock debugging"); 74SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); 75SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); 76#endif 77 78#ifdef DDB 79#include <ddb/ddb.h> 80 81static void db_show_rwlock(const struct lock_object *lock); 82#endif 83static void assert_rw(const struct lock_object *lock, int what); 84static void lock_rw(struct lock_object *lock, uintptr_t how); 85#ifdef KDTRACE_HOOKS 86static int owner_rw(const struct lock_object *lock, struct thread **owner); 87#endif 88static uintptr_t unlock_rw(struct lock_object *lock); 89 90struct lock_class lock_class_rw = { 91 .lc_name = "rw", 92 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 93 .lc_assert = assert_rw, 94#ifdef DDB 95 .lc_ddb_show = db_show_rwlock, 96#endif 97 .lc_lock = lock_rw, 98 .lc_unlock = unlock_rw, 99#ifdef KDTRACE_HOOKS 100 .lc_owner = owner_rw, 101#endif 102}; 103 104/* 105 * Return a pointer to the owning thread if the lock is write-locked or 106 * NULL if the lock is unlocked or read-locked. 107 */ 108#define rw_wowner(rw) \ 109 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 110 (struct thread *)RW_OWNER((rw)->rw_lock)) 111 112/* 113 * Returns if a write owner is recursed. Write ownership is not assured 114 * here and should be previously checked. 115 */ 116#define rw_recursed(rw) ((rw)->rw_recurse != 0) 117 118/* 119 * Return true if curthread helds the lock. 120 */ 121#define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 122 123/* 124 * Return a pointer to the owning thread for this lock who should receive 125 * any priority lent by threads that block on this lock. Currently this 126 * is identical to rw_wowner(). 127 */ 128#define rw_owner(rw) rw_wowner(rw) 129 130#ifndef INVARIANTS 131#define __rw_assert(c, what, file, line) 132#endif 133 134void 135assert_rw(const struct lock_object *lock, int what) 136{ 137 138 rw_assert((const struct rwlock *)lock, what); 139} 140 141void 142lock_rw(struct lock_object *lock, uintptr_t how) 143{ 144 struct rwlock *rw; 145 146 rw = (struct rwlock *)lock; 147 if (how) 148 rw_rlock(rw); 149 else 150 rw_wlock(rw); 151} 152 153uintptr_t 154unlock_rw(struct lock_object *lock) 155{ 156 struct rwlock *rw; 157 158 rw = (struct rwlock *)lock; 159 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 160 if (rw->rw_lock & RW_LOCK_READ) { 161 rw_runlock(rw); 162 return (1); 163 } else { 164 rw_wunlock(rw); 165 return (0); 166 } 167} 168 169#ifdef KDTRACE_HOOKS 170int 171owner_rw(const struct lock_object *lock, struct thread **owner) 172{ 173 const struct rwlock *rw = (const struct rwlock *)lock; 174 uintptr_t x = rw->rw_lock; 175 176 *owner = rw_wowner(rw); 177 return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) : 178 (*owner != NULL)); 179} 180#endif 181 182void 183_rw_init_flags(volatile uintptr_t *c, const char *name, int opts) 184{ 185 struct rwlock *rw; 186 int flags; 187 188 rw = rwlock2rw(c); 189 190 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 191 RW_RECURSE)) == 0); 192 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, 193 ("%s: rw_lock not aligned for %s: %p", __func__, name, 194 &rw->rw_lock)); 195 196 flags = LO_UPGRADABLE; 197 if (opts & RW_DUPOK) 198 flags |= LO_DUPOK; 199 if (opts & RW_NOPROFILE) 200 flags |= LO_NOPROFILE; 201 if (!(opts & RW_NOWITNESS)) 202 flags |= LO_WITNESS; 203 if (opts & RW_RECURSE) 204 flags |= LO_RECURSABLE; 205 if (opts & RW_QUIET) 206 flags |= LO_QUIET; 207 208 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 209 rw->rw_lock = RW_UNLOCKED; 210 rw->rw_recurse = 0; 211} 212 213void 214_rw_destroy(volatile uintptr_t *c) 215{ 216 struct rwlock *rw; 217 218 rw = rwlock2rw(c); 219 220 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); 221 KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); 222 rw->rw_lock = RW_DESTROYED; 223 lock_destroy(&rw->lock_object); 224} 225 226void 227rw_sysinit(void *arg) 228{ 229 struct rw_args *args = arg; 230 231 rw_init((struct rwlock *)args->ra_rw, args->ra_desc); 232} 233 234void 235rw_sysinit_flags(void *arg) 236{ 237 struct rw_args_flags *args = arg; 238 239 rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, 240 args->ra_flags); 241} 242 243int 244_rw_wowned(const volatile uintptr_t *c) 245{ 246 247 return (rw_wowner(rwlock2rw(c)) == curthread); 248} 249 250void 251_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line) 252{ 253 struct rwlock *rw; 254 255 if (SCHEDULER_STOPPED()) 256 return; 257 258 rw = rwlock2rw(c); 259 260 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 261 ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", 262 curthread, rw->lock_object.lo_name, file, line)); 263 KASSERT(rw->rw_lock != RW_DESTROYED, 264 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 265 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 266 line, NULL); 267 __rw_wlock(rw, curthread, file, line); 268 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 269 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 270 curthread->td_locks++; 271} 272 273int 274__rw_try_wlock(volatile uintptr_t *c, const char *file, int line) 275{ 276 struct rwlock *rw; 277 int rval; 278 279 if (SCHEDULER_STOPPED()) 280 return (1); 281 282 rw = rwlock2rw(c); 283 284 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 285 ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", 286 curthread, rw->lock_object.lo_name, file, line)); 287 KASSERT(rw->rw_lock != RW_DESTROYED, 288 ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); 289 290 if (rw_wlocked(rw) && 291 (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { 292 rw->rw_recurse++; 293 rval = 1; 294 } else 295 rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, 296 (uintptr_t)curthread); 297 298 LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); 299 if (rval) { 300 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 301 file, line); 302 curthread->td_locks++; 303 } 304 return (rval); 305} 306 307void 308_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) 309{ 310 struct rwlock *rw; 311 312 if (SCHEDULER_STOPPED()) 313 return; 314 315 rw = rwlock2rw(c); 316 317 KASSERT(rw->rw_lock != RW_DESTROYED, 318 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 319 __rw_assert(c, RA_WLOCKED, file, line); 320 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 321 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 322 line); 323 if (!rw_recursed(rw)) 324 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw); 325 __rw_wunlock(rw, curthread, file, line); 326 curthread->td_locks--; 327} 328/* 329 * Determines whether a new reader can acquire a lock. Succeeds if the 330 * reader already owns a read lock and the lock is locked for read to 331 * prevent deadlock from reader recursion. Also succeeds if the lock 332 * is unlocked and has no writer waiters or spinners. Failing otherwise 333 * prioritizes writers before readers. 334 */ 335#define RW_CAN_READ(_rw) \ 336 ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 337 (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 338 RW_LOCK_READ) 339 340void 341__rw_rlock(volatile uintptr_t *c, const char *file, int line) 342{ 343 struct rwlock *rw; 344 struct turnstile *ts; 345#ifdef ADAPTIVE_RWLOCKS 346 volatile struct thread *owner; 347 int spintries = 0; 348 int i; 349#endif 350#ifdef LOCK_PROFILING 351 uint64_t waittime = 0; 352 int contested = 0; 353#endif 354 uintptr_t v; 355#ifdef KDTRACE_HOOKS 356 uint64_t spin_cnt = 0; 357 uint64_t sleep_cnt = 0; 358 int64_t sleep_time = 0; 359#endif 360 361 if (SCHEDULER_STOPPED()) 362 return; 363 364 rw = rwlock2rw(c); 365 366 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 367 ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", 368 curthread, rw->lock_object.lo_name, file, line)); 369 KASSERT(rw->rw_lock != RW_DESTROYED, 370 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 371 KASSERT(rw_wowner(rw) != curthread, 372 ("rw_rlock: wlock already held for %s @ %s:%d", 373 rw->lock_object.lo_name, file, line)); 374 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); 375 376 for (;;) { 377#ifdef KDTRACE_HOOKS 378 spin_cnt++; 379#endif 380 /* 381 * Handle the easy case. If no other thread has a write 382 * lock, then try to bump up the count of read locks. Note 383 * that we have to preserve the current state of the 384 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 385 * read lock, then rw_lock must have changed, so restart 386 * the loop. Note that this handles the case of a 387 * completely unlocked rwlock since such a lock is encoded 388 * as a read lock with no waiters. 389 */ 390 v = rw->rw_lock; 391 if (RW_CAN_READ(v)) { 392 /* 393 * The RW_LOCK_READ_WAITERS flag should only be set 394 * if the lock has been unlocked and write waiters 395 * were present. 396 */ 397 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, 398 v + RW_ONE_READER)) { 399 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 400 CTR4(KTR_LOCK, 401 "%s: %p succeed %p -> %p", __func__, 402 rw, (void *)v, 403 (void *)(v + RW_ONE_READER)); 404 break; 405 } 406 continue; 407 } 408#ifdef HWPMC_HOOKS 409 PMC_SOFT_CALL( , , lock, failed); 410#endif 411 lock_profile_obtain_lock_failed(&rw->lock_object, 412 &contested, &waittime); 413 414#ifdef ADAPTIVE_RWLOCKS 415 /* 416 * If the owner is running on another CPU, spin until 417 * the owner stops running or the state of the lock 418 * changes. 419 */ 420 if ((v & RW_LOCK_READ) == 0) { 421 owner = (struct thread *)RW_OWNER(v); 422 if (TD_IS_RUNNING(owner)) { 423 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 424 CTR3(KTR_LOCK, 425 "%s: spinning on %p held by %p", 426 __func__, rw, owner); 427 KTR_STATE1(KTR_SCHED, "thread", 428 sched_tdname(curthread), "spinning", 429 "lockname:\"%s\"", rw->lock_object.lo_name); 430 while ((struct thread*)RW_OWNER(rw->rw_lock) == 431 owner && TD_IS_RUNNING(owner)) { 432 cpu_spinwait(); 433#ifdef KDTRACE_HOOKS 434 spin_cnt++; 435#endif 436 } 437 KTR_STATE0(KTR_SCHED, "thread", 438 sched_tdname(curthread), "running"); 439 continue; 440 } 441 } else if (spintries < rowner_retries) { 442 spintries++; 443 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 444 "spinning", "lockname:\"%s\"", 445 rw->lock_object.lo_name); 446 for (i = 0; i < rowner_loops; i++) { 447 v = rw->rw_lock; 448 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) 449 break; 450 cpu_spinwait(); 451 } 452 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 453 "running"); 454 if (i != rowner_loops) 455 continue; 456 } 457#endif 458 459 /* 460 * Okay, now it's the hard case. Some other thread already 461 * has a write lock or there are write waiters present, 462 * acquire the turnstile lock so we can begin the process 463 * of blocking. 464 */ 465 ts = turnstile_trywait(&rw->lock_object); 466 467 /* 468 * The lock might have been released while we spun, so 469 * recheck its state and restart the loop if needed. 470 */ 471 v = rw->rw_lock; 472 if (RW_CAN_READ(v)) { 473 turnstile_cancel(ts); 474 continue; 475 } 476 477#ifdef ADAPTIVE_RWLOCKS 478 /* 479 * The current lock owner might have started executing 480 * on another CPU (or the lock could have changed 481 * owners) while we were waiting on the turnstile 482 * chain lock. If so, drop the turnstile lock and try 483 * again. 484 */ 485 if ((v & RW_LOCK_READ) == 0) { 486 owner = (struct thread *)RW_OWNER(v); 487 if (TD_IS_RUNNING(owner)) { 488 turnstile_cancel(ts); 489 continue; 490 } 491 } 492#endif 493 494 /* 495 * The lock is held in write mode or it already has waiters. 496 */ 497 MPASS(!RW_CAN_READ(v)); 498 499 /* 500 * If the RW_LOCK_READ_WAITERS flag is already set, then 501 * we can go ahead and block. If it is not set then try 502 * to set it. If we fail to set it drop the turnstile 503 * lock and restart the loop. 504 */ 505 if (!(v & RW_LOCK_READ_WAITERS)) { 506 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 507 v | RW_LOCK_READ_WAITERS)) { 508 turnstile_cancel(ts); 509 continue; 510 } 511 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 512 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 513 __func__, rw); 514 } 515 516 /* 517 * We were unable to acquire the lock and the read waiters 518 * flag is set, so we must block on the turnstile. 519 */ 520 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 521 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 522 rw); 523#ifdef KDTRACE_HOOKS 524 sleep_time -= lockstat_nsecs(); 525#endif 526 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 527#ifdef KDTRACE_HOOKS 528 sleep_time += lockstat_nsecs(); 529 sleep_cnt++; 530#endif 531 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 532 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 533 __func__, rw); 534 } 535 536 /* 537 * TODO: acquire "owner of record" here. Here be turnstile dragons 538 * however. turnstiles don't like owners changing between calls to 539 * turnstile_wait() currently. 540 */ 541 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested, 542 waittime, file, line); 543 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 544 WITNESS_LOCK(&rw->lock_object, 0, file, line); 545 curthread->td_locks++; 546 curthread->td_rw_rlocks++; 547#ifdef KDTRACE_HOOKS 548 if (sleep_time) 549 LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time); 550 551 /* 552 * Record only the loops spinning and not sleeping. 553 */ 554 if (spin_cnt > sleep_cnt) 555 LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt)); 556#endif 557} 558 559int 560__rw_try_rlock(volatile uintptr_t *c, const char *file, int line) 561{ 562 struct rwlock *rw; 563 uintptr_t x; 564 565 if (SCHEDULER_STOPPED()) 566 return (1); 567 568 rw = rwlock2rw(c); 569 570 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 571 ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", 572 curthread, rw->lock_object.lo_name, file, line)); 573 574 for (;;) { 575 x = rw->rw_lock; 576 KASSERT(rw->rw_lock != RW_DESTROYED, 577 ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); 578 if (!(x & RW_LOCK_READ)) 579 break; 580 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { 581 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, 582 line); 583 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); 584 curthread->td_locks++; 585 curthread->td_rw_rlocks++; 586 return (1); 587 } 588 } 589 590 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); 591 return (0); 592} 593 594void 595_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line) 596{ 597 struct rwlock *rw; 598 struct turnstile *ts; 599 uintptr_t x, v, queue; 600 601 if (SCHEDULER_STOPPED()) 602 return; 603 604 rw = rwlock2rw(c); 605 606 KASSERT(rw->rw_lock != RW_DESTROYED, 607 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 608 __rw_assert(c, RA_RLOCKED, file, line); 609 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 610 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 611 612 /* TODO: drop "owner of record" here. */ 613 614 for (;;) { 615 /* 616 * See if there is more than one read lock held. If so, 617 * just drop one and return. 618 */ 619 x = rw->rw_lock; 620 if (RW_READERS(x) > 1) { 621 if (atomic_cmpset_rel_ptr(&rw->rw_lock, x, 622 x - RW_ONE_READER)) { 623 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 624 CTR4(KTR_LOCK, 625 "%s: %p succeeded %p -> %p", 626 __func__, rw, (void *)x, 627 (void *)(x - RW_ONE_READER)); 628 break; 629 } 630 continue; 631 } 632 /* 633 * If there aren't any waiters for a write lock, then try 634 * to drop it quickly. 635 */ 636 if (!(x & RW_LOCK_WAITERS)) { 637 MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 638 RW_READERS_LOCK(1)); 639 if (atomic_cmpset_rel_ptr(&rw->rw_lock, x, 640 RW_UNLOCKED)) { 641 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 642 CTR2(KTR_LOCK, "%s: %p last succeeded", 643 __func__, rw); 644 break; 645 } 646 continue; 647 } 648 /* 649 * Ok, we know we have waiters and we think we are the 650 * last reader, so grab the turnstile lock. 651 */ 652 turnstile_chain_lock(&rw->lock_object); 653 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 654 MPASS(v & RW_LOCK_WAITERS); 655 656 /* 657 * Try to drop our lock leaving the lock in a unlocked 658 * state. 659 * 660 * If you wanted to do explicit lock handoff you'd have to 661 * do it here. You'd also want to use turnstile_signal() 662 * and you'd have to handle the race where a higher 663 * priority thread blocks on the write lock before the 664 * thread you wakeup actually runs and have the new thread 665 * "steal" the lock. For now it's a lot simpler to just 666 * wakeup all of the waiters. 667 * 668 * As above, if we fail, then another thread might have 669 * acquired a read lock, so drop the turnstile lock and 670 * restart. 671 */ 672 x = RW_UNLOCKED; 673 if (v & RW_LOCK_WRITE_WAITERS) { 674 queue = TS_EXCLUSIVE_QUEUE; 675 x |= (v & RW_LOCK_READ_WAITERS); 676 } else 677 queue = TS_SHARED_QUEUE; 678 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 679 x)) { 680 turnstile_chain_unlock(&rw->lock_object); 681 continue; 682 } 683 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 684 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 685 __func__, rw); 686 687 /* 688 * Ok. The lock is released and all that's left is to 689 * wake up the waiters. Note that the lock might not be 690 * free anymore, but in that case the writers will just 691 * block again if they run before the new lock holder(s) 692 * release the lock. 693 */ 694 ts = turnstile_lookup(&rw->lock_object); 695 MPASS(ts != NULL); 696 turnstile_broadcast(ts, queue); 697 turnstile_unpend(ts, TS_SHARED_LOCK); 698 turnstile_chain_unlock(&rw->lock_object); 699 break; 700 } 701 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw); 702 curthread->td_locks--; 703 curthread->td_rw_rlocks--; 704} 705 706/* 707 * This function is called when we are unable to obtain a write lock on the 708 * first try. This means that at least one other thread holds either a 709 * read or write lock. 710 */ 711void 712__rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, 713 int line) 714{ 715 struct rwlock *rw; 716 struct turnstile *ts; 717#ifdef ADAPTIVE_RWLOCKS 718 volatile struct thread *owner; 719 int spintries = 0; 720 int i; 721#endif 722 uintptr_t v, x; 723#ifdef LOCK_PROFILING 724 uint64_t waittime = 0; 725 int contested = 0; 726#endif 727#ifdef KDTRACE_HOOKS 728 uint64_t spin_cnt = 0; 729 uint64_t sleep_cnt = 0; 730 int64_t sleep_time = 0; 731#endif 732 733 if (SCHEDULER_STOPPED()) 734 return; 735 736 rw = rwlock2rw(c); 737 738 if (rw_wlocked(rw)) { 739 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, 740 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 741 __func__, rw->lock_object.lo_name, file, line)); 742 rw->rw_recurse++; 743 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 744 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 745 return; 746 } 747 748 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 749 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 750 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 751 752 while (!_rw_write_lock(rw, tid)) { 753#ifdef KDTRACE_HOOKS 754 spin_cnt++; 755#endif 756#ifdef HWPMC_HOOKS 757 PMC_SOFT_CALL( , , lock, failed); 758#endif 759 lock_profile_obtain_lock_failed(&rw->lock_object, 760 &contested, &waittime); 761#ifdef ADAPTIVE_RWLOCKS 762 /* 763 * If the lock is write locked and the owner is 764 * running on another CPU, spin until the owner stops 765 * running or the state of the lock changes. 766 */ 767 v = rw->rw_lock; 768 owner = (struct thread *)RW_OWNER(v); 769 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 770 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 771 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 772 __func__, rw, owner); 773 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 774 "spinning", "lockname:\"%s\"", 775 rw->lock_object.lo_name); 776 while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && 777 TD_IS_RUNNING(owner)) { 778 cpu_spinwait(); 779#ifdef KDTRACE_HOOKS 780 spin_cnt++; 781#endif 782 } 783 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 784 "running"); 785 continue; 786 } 787 if ((v & RW_LOCK_READ) && RW_READERS(v) && 788 spintries < rowner_retries) { 789 if (!(v & RW_LOCK_WRITE_SPINNER)) { 790 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 791 v | RW_LOCK_WRITE_SPINNER)) { 792 continue; 793 } 794 } 795 spintries++; 796 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 797 "spinning", "lockname:\"%s\"", 798 rw->lock_object.lo_name); 799 for (i = 0; i < rowner_loops; i++) { 800 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 801 break; 802 cpu_spinwait(); 803 } 804 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 805 "running"); 806#ifdef KDTRACE_HOOKS 807 spin_cnt += rowner_loops - i; 808#endif 809 if (i != rowner_loops) 810 continue; 811 } 812#endif 813 ts = turnstile_trywait(&rw->lock_object); 814 v = rw->rw_lock; 815 816#ifdef ADAPTIVE_RWLOCKS 817 /* 818 * The current lock owner might have started executing 819 * on another CPU (or the lock could have changed 820 * owners) while we were waiting on the turnstile 821 * chain lock. If so, drop the turnstile lock and try 822 * again. 823 */ 824 if (!(v & RW_LOCK_READ)) { 825 owner = (struct thread *)RW_OWNER(v); 826 if (TD_IS_RUNNING(owner)) { 827 turnstile_cancel(ts); 828 continue; 829 } 830 } 831#endif 832 /* 833 * Check for the waiters flags about this rwlock. 834 * If the lock was released, without maintain any pending 835 * waiters queue, simply try to acquire it. 836 * If a pending waiters queue is present, claim the lock 837 * ownership and maintain the pending queue. 838 */ 839 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 840 if ((v & ~x) == RW_UNLOCKED) { 841 x &= ~RW_LOCK_WRITE_SPINNER; 842 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 843 if (x) 844 turnstile_claim(ts); 845 else 846 turnstile_cancel(ts); 847 break; 848 } 849 turnstile_cancel(ts); 850 continue; 851 } 852 /* 853 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 854 * set it. If we fail to set it, then loop back and try 855 * again. 856 */ 857 if (!(v & RW_LOCK_WRITE_WAITERS)) { 858 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 859 v | RW_LOCK_WRITE_WAITERS)) { 860 turnstile_cancel(ts); 861 continue; 862 } 863 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 864 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 865 __func__, rw); 866 } 867 /* 868 * We were unable to acquire the lock and the write waiters 869 * flag is set, so we must block on the turnstile. 870 */ 871 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 872 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 873 rw); 874#ifdef KDTRACE_HOOKS 875 sleep_time -= lockstat_nsecs(); 876#endif 877 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 878#ifdef KDTRACE_HOOKS 879 sleep_time += lockstat_nsecs(); 880 sleep_cnt++; 881#endif 882 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 883 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 884 __func__, rw); 885#ifdef ADAPTIVE_RWLOCKS 886 spintries = 0; 887#endif 888 } 889 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested, 890 waittime, file, line); 891#ifdef KDTRACE_HOOKS 892 if (sleep_time) 893 LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time); 894 895 /* 896 * Record only the loops spinning and not sleeping. 897 */ 898 if (spin_cnt > sleep_cnt) 899 LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt)); 900#endif 901} 902 903/* 904 * This function is called if the first try at releasing a write lock failed. 905 * This means that one of the 2 waiter bits must be set indicating that at 906 * least one thread is waiting on this lock. 907 */ 908void 909__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, 910 int line) 911{ 912 struct rwlock *rw; 913 struct turnstile *ts; 914 uintptr_t v; 915 int queue; 916 917 if (SCHEDULER_STOPPED()) 918 return; 919 920 rw = rwlock2rw(c); 921 922 if (rw_wlocked(rw) && rw_recursed(rw)) { 923 rw->rw_recurse--; 924 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 925 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 926 return; 927 } 928 929 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 930 ("%s: neither of the waiter flags are set", __func__)); 931 932 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 933 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 934 935 turnstile_chain_lock(&rw->lock_object); 936 ts = turnstile_lookup(&rw->lock_object); 937 MPASS(ts != NULL); 938 939 /* 940 * Use the same algo as sx locks for now. Prefer waking up shared 941 * waiters if we have any over writers. This is probably not ideal. 942 * 943 * 'v' is the value we are going to write back to rw_lock. If we 944 * have waiters on both queues, we need to preserve the state of 945 * the waiter flag for the queue we don't wake up. For now this is 946 * hardcoded for the algorithm mentioned above. 947 * 948 * In the case of both readers and writers waiting we wakeup the 949 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 950 * new writer comes in before a reader it will claim the lock up 951 * above. There is probably a potential priority inversion in 952 * there that could be worked around either by waking both queues 953 * of waiters or doing some complicated lock handoff gymnastics. 954 */ 955 v = RW_UNLOCKED; 956 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 957 queue = TS_EXCLUSIVE_QUEUE; 958 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 959 } else 960 queue = TS_SHARED_QUEUE; 961 962 /* Wake up all waiters for the specific queue. */ 963 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 964 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 965 queue == TS_SHARED_QUEUE ? "read" : "write"); 966 turnstile_broadcast(ts, queue); 967 atomic_store_rel_ptr(&rw->rw_lock, v); 968 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 969 turnstile_chain_unlock(&rw->lock_object); 970} 971 972/* 973 * Attempt to do a non-blocking upgrade from a read lock to a write 974 * lock. This will only succeed if this thread holds a single read 975 * lock. Returns true if the upgrade succeeded and false otherwise. 976 */ 977int 978__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line) 979{ 980 struct rwlock *rw; 981 uintptr_t v, x, tid; 982 struct turnstile *ts; 983 int success; 984 985 if (SCHEDULER_STOPPED()) 986 return (1); 987 988 rw = rwlock2rw(c); 989 990 KASSERT(rw->rw_lock != RW_DESTROYED, 991 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 992 __rw_assert(c, RA_RLOCKED, file, line); 993 994 /* 995 * Attempt to switch from one reader to a writer. If there 996 * are any write waiters, then we will have to lock the 997 * turnstile first to prevent races with another writer 998 * calling turnstile_wait() before we have claimed this 999 * turnstile. So, do the simple case of no waiters first. 1000 */ 1001 tid = (uintptr_t)curthread; 1002 success = 0; 1003 for (;;) { 1004 v = rw->rw_lock; 1005 if (RW_READERS(v) > 1) 1006 break; 1007 if (!(v & RW_LOCK_WAITERS)) { 1008 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 1009 if (!success) 1010 continue; 1011 break; 1012 } 1013 1014 /* 1015 * Ok, we think we have waiters, so lock the turnstile. 1016 */ 1017 ts = turnstile_trywait(&rw->lock_object); 1018 v = rw->rw_lock; 1019 if (RW_READERS(v) > 1) { 1020 turnstile_cancel(ts); 1021 break; 1022 } 1023 /* 1024 * Try to switch from one reader to a writer again. This time 1025 * we honor the current state of the waiters flags. 1026 * If we obtain the lock with the flags set, then claim 1027 * ownership of the turnstile. 1028 */ 1029 x = rw->rw_lock & RW_LOCK_WAITERS; 1030 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 1031 if (success) { 1032 if (x) 1033 turnstile_claim(ts); 1034 else 1035 turnstile_cancel(ts); 1036 break; 1037 } 1038 turnstile_cancel(ts); 1039 } 1040 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 1041 if (success) { 1042 curthread->td_rw_rlocks--; 1043 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 1044 file, line); 1045 LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw); 1046 } 1047 return (success); 1048} 1049 1050/* 1051 * Downgrade a write lock into a single read lock. 1052 */ 1053void 1054__rw_downgrade(volatile uintptr_t *c, const char *file, int line) 1055{ 1056 struct rwlock *rw; 1057 struct turnstile *ts; 1058 uintptr_t tid, v; 1059 int rwait, wwait; 1060 1061 if (SCHEDULER_STOPPED()) 1062 return; 1063 1064 rw = rwlock2rw(c); 1065 1066 KASSERT(rw->rw_lock != RW_DESTROYED, 1067 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 1068 __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line); 1069#ifndef INVARIANTS 1070 if (rw_recursed(rw)) 1071 panic("downgrade of a recursed lock"); 1072#endif 1073 1074 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 1075 1076 /* 1077 * Convert from a writer to a single reader. First we handle 1078 * the easy case with no waiters. If there are any waiters, we 1079 * lock the turnstile and "disown" the lock. 1080 */ 1081 tid = (uintptr_t)curthread; 1082 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 1083 goto out; 1084 1085 /* 1086 * Ok, we think we have waiters, so lock the turnstile so we can 1087 * read the waiter flags without any races. 1088 */ 1089 turnstile_chain_lock(&rw->lock_object); 1090 v = rw->rw_lock & RW_LOCK_WAITERS; 1091 rwait = v & RW_LOCK_READ_WAITERS; 1092 wwait = v & RW_LOCK_WRITE_WAITERS; 1093 MPASS(rwait | wwait); 1094 1095 /* 1096 * Downgrade from a write lock while preserving waiters flag 1097 * and give up ownership of the turnstile. 1098 */ 1099 ts = turnstile_lookup(&rw->lock_object); 1100 MPASS(ts != NULL); 1101 if (!wwait) 1102 v &= ~RW_LOCK_READ_WAITERS; 1103 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 1104 /* 1105 * Wake other readers if there are no writers pending. Otherwise they 1106 * won't be able to acquire the lock anyway. 1107 */ 1108 if (rwait && !wwait) { 1109 turnstile_broadcast(ts, TS_SHARED_QUEUE); 1110 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1111 } else 1112 turnstile_disown(ts); 1113 turnstile_chain_unlock(&rw->lock_object); 1114out: 1115 curthread->td_rw_rlocks++; 1116 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 1117 LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw); 1118} 1119 1120#ifdef INVARIANT_SUPPORT 1121#ifndef INVARIANTS 1122#undef __rw_assert 1123#endif 1124 1125/* 1126 * In the non-WITNESS case, rw_assert() can only detect that at least 1127 * *some* thread owns an rlock, but it cannot guarantee that *this* 1128 * thread owns an rlock. 1129 */ 1130void 1131__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1132{ 1133 const struct rwlock *rw; 1134 1135 if (panicstr != NULL) 1136 return; 1137 1138 rw = rwlock2rw(c); 1139 1140 switch (what) { 1141 case RA_LOCKED: 1142 case RA_LOCKED | RA_RECURSED: 1143 case RA_LOCKED | RA_NOTRECURSED: 1144 case RA_RLOCKED: 1145 case RA_RLOCKED | RA_RECURSED: 1146 case RA_RLOCKED | RA_NOTRECURSED: 1147#ifdef WITNESS 1148 witness_assert(&rw->lock_object, what, file, line); 1149#else 1150 /* 1151 * If some other thread has a write lock or we have one 1152 * and are asserting a read lock, fail. Also, if no one 1153 * has a lock at all, fail. 1154 */ 1155 if (rw->rw_lock == RW_UNLOCKED || 1156 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || 1157 rw_wowner(rw) != curthread))) 1158 panic("Lock %s not %slocked @ %s:%d\n", 1159 rw->lock_object.lo_name, (what & RA_RLOCKED) ? 1160 "read " : "", file, line); 1161 1162 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { 1163 if (rw_recursed(rw)) { 1164 if (what & RA_NOTRECURSED) 1165 panic("Lock %s recursed @ %s:%d\n", 1166 rw->lock_object.lo_name, file, 1167 line); 1168 } else if (what & RA_RECURSED) 1169 panic("Lock %s not recursed @ %s:%d\n", 1170 rw->lock_object.lo_name, file, line); 1171 } 1172#endif 1173 break; 1174 case RA_WLOCKED: 1175 case RA_WLOCKED | RA_RECURSED: 1176 case RA_WLOCKED | RA_NOTRECURSED: 1177 if (rw_wowner(rw) != curthread) 1178 panic("Lock %s not exclusively locked @ %s:%d\n", 1179 rw->lock_object.lo_name, file, line); 1180 if (rw_recursed(rw)) { 1181 if (what & RA_NOTRECURSED) 1182 panic("Lock %s recursed @ %s:%d\n", 1183 rw->lock_object.lo_name, file, line); 1184 } else if (what & RA_RECURSED) 1185 panic("Lock %s not recursed @ %s:%d\n", 1186 rw->lock_object.lo_name, file, line); 1187 break; 1188 case RA_UNLOCKED: 1189#ifdef WITNESS 1190 witness_assert(&rw->lock_object, what, file, line); 1191#else 1192 /* 1193 * If we hold a write lock fail. We can't reliably check 1194 * to see if we hold a read lock or not. 1195 */ 1196 if (rw_wowner(rw) == curthread) 1197 panic("Lock %s exclusively locked @ %s:%d\n", 1198 rw->lock_object.lo_name, file, line); 1199#endif 1200 break; 1201 default: 1202 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 1203 line); 1204 } 1205} 1206#endif /* INVARIANT_SUPPORT */ 1207 1208#ifdef DDB 1209void 1210db_show_rwlock(const struct lock_object *lock) 1211{ 1212 const struct rwlock *rw; 1213 struct thread *td; 1214 1215 rw = (const struct rwlock *)lock; 1216 1217 db_printf(" state: "); 1218 if (rw->rw_lock == RW_UNLOCKED) 1219 db_printf("UNLOCKED\n"); 1220 else if (rw->rw_lock == RW_DESTROYED) { 1221 db_printf("DESTROYED\n"); 1222 return; 1223 } else if (rw->rw_lock & RW_LOCK_READ) 1224 db_printf("RLOCK: %ju locks\n", 1225 (uintmax_t)(RW_READERS(rw->rw_lock))); 1226 else { 1227 td = rw_wowner(rw); 1228 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1229 td->td_tid, td->td_proc->p_pid, td->td_name); 1230 if (rw_recursed(rw)) 1231 db_printf(" recursed: %u\n", rw->rw_recurse); 1232 } 1233 db_printf(" waiters: "); 1234 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 1235 case RW_LOCK_READ_WAITERS: 1236 db_printf("readers\n"); 1237 break; 1238 case RW_LOCK_WRITE_WAITERS: 1239 db_printf("writers\n"); 1240 break; 1241 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 1242 db_printf("readers and writers\n"); 1243 break; 1244 default: 1245 db_printf("none\n"); 1246 break; 1247 } 1248} 1249 1250#endif 1251