kern_rwlock.c revision 170295
1/*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * Machine independent bits of reader/writer lock implementation. 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 170295 2007-06-04 23:51:44Z jeff $"); 36 37#include "opt_ddb.h" 38#include "opt_no_adaptive_rwlocks.h" 39 40#include <sys/param.h> 41#include <sys/ktr.h> 42#include <sys/lock.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/rwlock.h> 46#include <sys/systm.h> 47#include <sys/turnstile.h> 48#include <sys/lock_profile.h> 49#include <machine/cpu.h> 50 51#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 52#define ADAPTIVE_RWLOCKS 53#endif 54 55#ifdef DDB 56#include <ddb/ddb.h> 57 58static void db_show_rwlock(struct lock_object *lock); 59#endif 60static void lock_rw(struct lock_object *lock, int how); 61static int unlock_rw(struct lock_object *lock); 62 63struct lock_class lock_class_rw = { 64 .lc_name = "rw", 65 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 66#ifdef DDB 67 .lc_ddb_show = db_show_rwlock, 68#endif 69 .lc_lock = lock_rw, 70 .lc_unlock = unlock_rw, 71}; 72 73/* 74 * Return a pointer to the owning thread if the lock is write-locked or 75 * NULL if the lock is unlocked or read-locked. 76 */ 77#define rw_wowner(rw) \ 78 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 79 (struct thread *)RW_OWNER((rw)->rw_lock)) 80 81/* 82 * Return a pointer to the owning thread for this lock who should receive 83 * any priority lent by threads that block on this lock. Currently this 84 * is identical to rw_wowner(). 85 */ 86#define rw_owner(rw) rw_wowner(rw) 87 88#ifndef INVARIANTS 89#define _rw_assert(rw, what, file, line) 90#endif 91 92void 93lock_rw(struct lock_object *lock, int how) 94{ 95 struct rwlock *rw; 96 97 rw = (struct rwlock *)lock; 98 if (how) 99 rw_wlock(rw); 100 else 101 rw_rlock(rw); 102} 103 104int 105unlock_rw(struct lock_object *lock) 106{ 107 struct rwlock *rw; 108 109 rw = (struct rwlock *)lock; 110 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 111 if (rw->rw_lock & RW_LOCK_READ) { 112 rw_runlock(rw); 113 return (0); 114 } else { 115 rw_wunlock(rw); 116 return (1); 117 } 118} 119 120void 121rw_init(struct rwlock *rw, const char *name) 122{ 123 124 rw->rw_lock = RW_UNLOCKED; 125 126 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, LO_WITNESS | 127 LO_RECURSABLE | LO_UPGRADABLE); 128} 129 130void 131rw_destroy(struct rwlock *rw) 132{ 133 134 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 135 rw->rw_lock = RW_DESTROYED; 136 lock_destroy(&rw->lock_object); 137} 138 139void 140rw_sysinit(void *arg) 141{ 142 struct rw_args *args = arg; 143 144 rw_init(args->ra_rw, args->ra_desc); 145} 146 147int 148rw_wowned(struct rwlock *rw) 149{ 150 151 return (rw_wowner(rw) == curthread); 152} 153 154void 155_rw_wlock(struct rwlock *rw, const char *file, int line) 156{ 157 158 MPASS(curthread != NULL); 159 KASSERT(rw->rw_lock != RW_DESTROYED, 160 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 161 KASSERT(rw_wowner(rw) != curthread, 162 ("%s (%s): wlock already held @ %s:%d", __func__, 163 rw->lock_object.lo_name, file, line)); 164 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 165 line); 166 __rw_wlock(rw, curthread, file, line); 167 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, 0, file, line); 168 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 169 curthread->td_locks++; 170} 171 172void 173_rw_wunlock(struct rwlock *rw, const char *file, int line) 174{ 175 176 MPASS(curthread != NULL); 177 KASSERT(rw->rw_lock != RW_DESTROYED, 178 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 179 _rw_assert(rw, RA_WLOCKED, file, line); 180 curthread->td_locks--; 181 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 182 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line); 183 lock_profile_release_lock(&rw->lock_object); 184 __rw_wunlock(rw, curthread, file, line); 185} 186 187void 188_rw_rlock(struct rwlock *rw, const char *file, int line) 189{ 190 struct turnstile *ts; 191#ifdef ADAPTIVE_RWLOCKS 192 volatile struct thread *owner; 193#endif 194 uint64_t waittime = 0; 195 int contested = 0; 196 uintptr_t x; 197 198 KASSERT(rw->rw_lock != RW_DESTROYED, 199 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 200 KASSERT(rw_wowner(rw) != curthread, 201 ("%s (%s): wlock already held @ %s:%d", __func__, 202 rw->lock_object.lo_name, file, line)); 203 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 204 205 /* 206 * Note that we don't make any attempt to try to block read 207 * locks once a writer has blocked on the lock. The reason is 208 * that we currently allow for read locks to recurse and we 209 * don't keep track of all the holders of read locks. Thus, if 210 * we were to block readers once a writer blocked and a reader 211 * tried to recurse on their reader lock after a writer had 212 * blocked we would end up in a deadlock since the reader would 213 * be blocked on the writer, and the writer would be blocked 214 * waiting for the reader to release its original read lock. 215 */ 216 for (;;) { 217 /* 218 * Handle the easy case. If no other thread has a write 219 * lock, then try to bump up the count of read locks. Note 220 * that we have to preserve the current state of the 221 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 222 * read lock, then rw_lock must have changed, so restart 223 * the loop. Note that this handles the case of a 224 * completely unlocked rwlock since such a lock is encoded 225 * as a read lock with no waiters. 226 */ 227 x = rw->rw_lock; 228 if (x & RW_LOCK_READ) { 229 230 /* 231 * The RW_LOCK_READ_WAITERS flag should only be set 232 * if another thread currently holds a write lock, 233 * and in that case RW_LOCK_READ should be clear. 234 */ 235 MPASS((x & RW_LOCK_READ_WAITERS) == 0); 236 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, 237 x + RW_ONE_READER)) { 238 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 239 CTR4(KTR_LOCK, 240 "%s: %p succeed %p -> %p", __func__, 241 rw, (void *)x, 242 (void *)(x + RW_ONE_READER)); 243 if (RW_READERS(x) == 0) 244 lock_profile_obtain_lock_success( 245 &rw->lock_object, contested, waittime, 246 file, line); 247 break; 248 } 249 cpu_spinwait(); 250 continue; 251 } 252 lock_profile_obtain_lock_failed(&rw->lock_object, &contested, 253 &waittime); 254 255 /* 256 * Okay, now it's the hard case. Some other thread already 257 * has a write lock, so acquire the turnstile lock so we can 258 * begin the process of blocking. 259 */ 260 ts = turnstile_trywait(&rw->lock_object); 261 262 /* 263 * The lock might have been released while we spun, so 264 * recheck its state and restart the loop if there is no 265 * longer a write lock. 266 */ 267 x = rw->rw_lock; 268 if (x & RW_LOCK_READ) { 269 turnstile_cancel(ts); 270 cpu_spinwait(); 271 continue; 272 } 273 274 /* 275 * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS 276 * flag is already set, then we can go ahead and block. If 277 * it is not set then try to set it. If we fail to set it 278 * drop the turnstile lock and restart the loop. 279 */ 280 if (!(x & RW_LOCK_READ_WAITERS)) { 281 if (!atomic_cmpset_ptr(&rw->rw_lock, x, 282 x | RW_LOCK_READ_WAITERS)) { 283 turnstile_cancel(ts); 284 cpu_spinwait(); 285 continue; 286 } 287 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 288 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 289 __func__, rw); 290 } 291 292#ifdef ADAPTIVE_RWLOCKS 293 /* 294 * If the owner is running on another CPU, spin until 295 * the owner stops running or the state of the lock 296 * changes. 297 */ 298 owner = (struct thread *)RW_OWNER(x); 299 if (TD_IS_RUNNING(owner)) { 300 turnstile_cancel(ts); 301 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 302 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 303 __func__, rw, owner); 304 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 305 TD_IS_RUNNING(owner)) 306 cpu_spinwait(); 307 continue; 308 } 309#endif 310 311 /* 312 * We were unable to acquire the lock and the read waiters 313 * flag is set, so we must block on the turnstile. 314 */ 315 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 316 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 317 rw); 318 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 319 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 320 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 321 __func__, rw); 322 } 323 324 /* 325 * TODO: acquire "owner of record" here. Here be turnstile dragons 326 * however. turnstiles don't like owners changing between calls to 327 * turnstile_wait() currently. 328 */ 329 330 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 331 WITNESS_LOCK(&rw->lock_object, 0, file, line); 332 curthread->td_locks++; 333} 334 335void 336_rw_runlock(struct rwlock *rw, const char *file, int line) 337{ 338 struct turnstile *ts; 339 uintptr_t x; 340 341 KASSERT(rw->rw_lock != RW_DESTROYED, 342 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 343 _rw_assert(rw, RA_RLOCKED, file, line); 344 curthread->td_locks--; 345 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 346 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 347 348 /* TODO: drop "owner of record" here. */ 349 350 for (;;) { 351 /* 352 * See if there is more than one read lock held. If so, 353 * just drop one and return. 354 */ 355 x = rw->rw_lock; 356 if (RW_READERS(x) > 1) { 357 if (atomic_cmpset_ptr(&rw->rw_lock, x, 358 x - RW_ONE_READER)) { 359 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 360 CTR4(KTR_LOCK, 361 "%s: %p succeeded %p -> %p", 362 __func__, rw, (void *)x, 363 (void *)(x - RW_ONE_READER)); 364 break; 365 } 366 continue; 367 } 368 369 370 /* 371 * We should never have read waiters while at least one 372 * thread holds a read lock. (See note above) 373 */ 374 KASSERT(!(x & RW_LOCK_READ_WAITERS), 375 ("%s: waiting readers", __func__)); 376 377 /* 378 * If there aren't any waiters for a write lock, then try 379 * to drop it quickly. 380 */ 381 if (!(x & RW_LOCK_WRITE_WAITERS)) { 382 383 /* 384 * There shouldn't be any flags set and we should 385 * be the only read lock. If we fail to release 386 * the single read lock, then another thread might 387 * have just acquired a read lock, so go back up 388 * to the multiple read locks case. 389 */ 390 MPASS(x == RW_READERS_LOCK(1)); 391 if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), 392 RW_UNLOCKED)) { 393 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 394 CTR2(KTR_LOCK, "%s: %p last succeeded", 395 __func__, rw); 396 break; 397 } 398 continue; 399 } 400 401 /* 402 * There should just be one reader with one or more 403 * writers waiting. 404 */ 405 MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS)); 406 407 /* 408 * Ok, we know we have a waiting writer and we think we 409 * are the last reader, so grab the turnstile lock. 410 */ 411 turnstile_chain_lock(&rw->lock_object); 412 413 /* 414 * Try to drop our lock leaving the lock in a unlocked 415 * state. 416 * 417 * If you wanted to do explicit lock handoff you'd have to 418 * do it here. You'd also want to use turnstile_signal() 419 * and you'd have to handle the race where a higher 420 * priority thread blocks on the write lock before the 421 * thread you wakeup actually runs and have the new thread 422 * "steal" the lock. For now it's a lot simpler to just 423 * wakeup all of the waiters. 424 * 425 * As above, if we fail, then another thread might have 426 * acquired a read lock, so drop the turnstile lock and 427 * restart. 428 */ 429 if (!atomic_cmpset_ptr(&rw->rw_lock, 430 RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) { 431 turnstile_chain_unlock(&rw->lock_object); 432 continue; 433 } 434 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 435 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 436 __func__, rw); 437 438 /* 439 * Ok. The lock is released and all that's left is to 440 * wake up the waiters. Note that the lock might not be 441 * free anymore, but in that case the writers will just 442 * block again if they run before the new lock holder(s) 443 * release the lock. 444 */ 445 ts = turnstile_lookup(&rw->lock_object); 446 MPASS(ts != NULL); 447 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 448 turnstile_unpend(ts, TS_SHARED_LOCK); 449 turnstile_chain_unlock(&rw->lock_object); 450 break; 451 } 452 lock_profile_release_lock(&rw->lock_object); 453} 454 455/* 456 * This function is called when we are unable to obtain a write lock on the 457 * first try. This means that at least one other thread holds either a 458 * read or write lock. 459 */ 460void 461_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 462{ 463 struct turnstile *ts; 464#ifdef ADAPTIVE_RWLOCKS 465 volatile struct thread *owner; 466#endif 467 uintptr_t v; 468 469 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 470 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 471 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 472 473 while (!_rw_write_lock(rw, tid)) { 474 ts = turnstile_trywait(&rw->lock_object); 475 v = rw->rw_lock; 476 477 /* 478 * If the lock was released while spinning on the 479 * turnstile chain lock, try again. 480 */ 481 if (v == RW_UNLOCKED) { 482 turnstile_cancel(ts); 483 cpu_spinwait(); 484 continue; 485 } 486 487 /* 488 * If the lock was released by a writer with both readers 489 * and writers waiting and a reader hasn't woken up and 490 * acquired the lock yet, rw_lock will be set to the 491 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS. If we see 492 * that value, try to acquire it once. Note that we have 493 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are 494 * other writers waiting still. If we fail, restart the 495 * loop. 496 */ 497 if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) { 498 if (atomic_cmpset_acq_ptr(&rw->rw_lock, 499 RW_UNLOCKED | RW_LOCK_WRITE_WAITERS, 500 tid | RW_LOCK_WRITE_WAITERS)) { 501 turnstile_claim(ts); 502 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 503 __func__, rw); 504 break; 505 } 506 turnstile_cancel(ts); 507 cpu_spinwait(); 508 continue; 509 } 510 511 /* 512 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 513 * set it. If we fail to set it, then loop back and try 514 * again. 515 */ 516 if (!(v & RW_LOCK_WRITE_WAITERS)) { 517 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 518 v | RW_LOCK_WRITE_WAITERS)) { 519 turnstile_cancel(ts); 520 cpu_spinwait(); 521 continue; 522 } 523 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 524 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 525 __func__, rw); 526 } 527 528#ifdef ADAPTIVE_RWLOCKS 529 /* 530 * If the lock is write locked and the owner is 531 * running on another CPU, spin until the owner stops 532 * running or the state of the lock changes. 533 */ 534 owner = (struct thread *)RW_OWNER(v); 535 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 536 turnstile_cancel(ts); 537 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 538 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 539 __func__, rw, owner); 540 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 541 TD_IS_RUNNING(owner)) 542 cpu_spinwait(); 543 continue; 544 } 545#endif 546 547 /* 548 * We were unable to acquire the lock and the write waiters 549 * flag is set, so we must block on the turnstile. 550 */ 551 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 552 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 553 rw); 554 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 555 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 556 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 557 __func__, rw); 558 } 559} 560 561/* 562 * This function is called if the first try at releasing a write lock failed. 563 * This means that one of the 2 waiter bits must be set indicating that at 564 * least one thread is waiting on this lock. 565 */ 566void 567_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 568{ 569 struct turnstile *ts; 570 uintptr_t v; 571 int queue; 572 573 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 574 ("%s: neither of the waiter flags are set", __func__)); 575 576 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 577 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 578 579 turnstile_chain_lock(&rw->lock_object); 580 ts = turnstile_lookup(&rw->lock_object); 581 582#ifdef ADAPTIVE_RWLOCKS 583 /* 584 * There might not be a turnstile for this lock if all of 585 * the waiters are adaptively spinning. In that case, just 586 * reset the lock to the unlocked state and return. 587 */ 588 if (ts == NULL) { 589 atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED); 590 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 591 CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw); 592 turnstile_chain_unlock(&rw->lock_object); 593 return; 594 } 595#else 596 MPASS(ts != NULL); 597#endif 598 599 /* 600 * Use the same algo as sx locks for now. Prefer waking up shared 601 * waiters if we have any over writers. This is probably not ideal. 602 * 603 * 'v' is the value we are going to write back to rw_lock. If we 604 * have waiters on both queues, we need to preserve the state of 605 * the waiter flag for the queue we don't wake up. For now this is 606 * hardcoded for the algorithm mentioned above. 607 * 608 * In the case of both readers and writers waiting we wakeup the 609 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 610 * new writer comes in before a reader it will claim the lock up 611 * above. There is probably a potential priority inversion in 612 * there that could be worked around either by waking both queues 613 * of waiters or doing some complicated lock handoff gymnastics. 614 * 615 * Note that in the ADAPTIVE_RWLOCKS case, if both flags are 616 * set, there might not be any actual writers on the turnstile 617 * as they might all be spinning. In that case, we don't want 618 * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile 619 * is going to go away once we wakeup all the readers. 620 */ 621 v = RW_UNLOCKED; 622 if (rw->rw_lock & RW_LOCK_READ_WAITERS) { 623 queue = TS_SHARED_QUEUE; 624#ifdef ADAPTIVE_RWLOCKS 625 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS && 626 !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 627 v |= RW_LOCK_WRITE_WAITERS; 628#else 629 v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS); 630#endif 631 } else 632 queue = TS_EXCLUSIVE_QUEUE; 633 634#ifdef ADAPTIVE_RWLOCKS 635 /* 636 * We have to make sure that we actually have waiters to 637 * wakeup. If they are all spinning, then we just need to 638 * disown the turnstile and return. 639 */ 640 if (turnstile_empty(ts, queue)) { 641 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 642 CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw); 643 atomic_store_rel_ptr(&rw->rw_lock, v); 644 turnstile_disown(ts); 645 turnstile_chain_unlock(&rw->lock_object); 646 return; 647 } 648#endif 649 650 /* Wake up all waiters for the specific queue. */ 651 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 652 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 653 queue == TS_SHARED_QUEUE ? "read" : "write"); 654 turnstile_broadcast(ts, queue); 655 atomic_store_rel_ptr(&rw->rw_lock, v); 656 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 657 turnstile_chain_unlock(&rw->lock_object); 658} 659 660/* 661 * Attempt to do a non-blocking upgrade from a read lock to a write 662 * lock. This will only succeed if this thread holds a single read 663 * lock. Returns true if the upgrade succeeded and false otherwise. 664 */ 665int 666_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 667{ 668 uintptr_t v, tid; 669 struct turnstile *ts; 670 int success; 671 672 KASSERT(rw->rw_lock != RW_DESTROYED, 673 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 674 _rw_assert(rw, RA_RLOCKED, file, line); 675 676 /* 677 * Attempt to switch from one reader to a writer. If there 678 * are any write waiters, then we will have to lock the 679 * turnstile first to prevent races with another writer 680 * calling turnstile_wait() before we have claimed this 681 * turnstile. So, do the simple case of no waiters first. 682 */ 683 tid = (uintptr_t)curthread; 684 if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) { 685 success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), 686 tid); 687 goto out; 688 } 689 690 /* 691 * Ok, we think we have write waiters, so lock the 692 * turnstile. 693 */ 694 ts = turnstile_trywait(&rw->lock_object); 695 696 /* 697 * Try to switch from one reader to a writer again. This time 698 * we honor the current state of the RW_LOCK_WRITE_WAITERS 699 * flag. If we obtain the lock with the flag set, then claim 700 * ownership of the turnstile. In the ADAPTIVE_RWLOCKS case 701 * it is possible for there to not be an associated turnstile 702 * even though there are waiters if all of the waiters are 703 * spinning. 704 */ 705 v = rw->rw_lock & RW_LOCK_WRITE_WAITERS; 706 success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 707 tid | v); 708#ifdef ADAPTIVE_RWLOCKS 709 if (success && v && turnstile_lookup(&rw->lock_object) != NULL) 710#else 711 if (success && v) 712#endif 713 turnstile_claim(ts); 714 else 715 turnstile_cancel(ts); 716out: 717 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 718 if (success) 719 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 720 file, line); 721 return (success); 722} 723 724/* 725 * Downgrade a write lock into a single read lock. 726 */ 727void 728_rw_downgrade(struct rwlock *rw, const char *file, int line) 729{ 730 struct turnstile *ts; 731 uintptr_t tid, v; 732 733 KASSERT(rw->rw_lock != RW_DESTROYED, 734 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 735 _rw_assert(rw, RA_WLOCKED, file, line); 736 737 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 738 739 /* 740 * Convert from a writer to a single reader. First we handle 741 * the easy case with no waiters. If there are any waiters, we 742 * lock the turnstile, "disown" the lock, and awaken any read 743 * waiters. 744 */ 745 tid = (uintptr_t)curthread; 746 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 747 goto out; 748 749 /* 750 * Ok, we think we have waiters, so lock the turnstile so we can 751 * read the waiter flags without any races. 752 */ 753 turnstile_chain_lock(&rw->lock_object); 754 v = rw->rw_lock; 755 MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)); 756 757 /* 758 * Downgrade from a write lock while preserving 759 * RW_LOCK_WRITE_WAITERS and give up ownership of the 760 * turnstile. If there are any read waiters, wake them up. 761 * 762 * For ADAPTIVE_RWLOCKS, we have to allow for the fact that 763 * all of the read waiters might be spinning. In that case, 764 * act as if RW_LOCK_READ_WAITERS is not set. Also, only 765 * preserve the RW_LOCK_WRITE_WAITERS flag if at least one 766 * writer is blocked on the turnstile. 767 */ 768 ts = turnstile_lookup(&rw->lock_object); 769#ifdef ADAPTIVE_RWLOCKS 770 if (ts == NULL) 771 v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS); 772 else if (v & RW_LOCK_READ_WAITERS && 773 turnstile_empty(ts, TS_SHARED_QUEUE)) 774 v &= ~RW_LOCK_READ_WAITERS; 775 else if (v & RW_LOCK_WRITE_WAITERS && 776 turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 777 v &= ~RW_LOCK_WRITE_WAITERS; 778#else 779 MPASS(ts != NULL); 780#endif 781 if (v & RW_LOCK_READ_WAITERS) 782 turnstile_broadcast(ts, TS_SHARED_QUEUE); 783 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | 784 (v & RW_LOCK_WRITE_WAITERS)); 785 if (v & RW_LOCK_READ_WAITERS) 786 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 787 else if (ts) 788 turnstile_disown(ts); 789 turnstile_chain_unlock(&rw->lock_object); 790out: 791 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 792} 793 794#ifdef INVARIANT_SUPPORT 795#ifndef INVARIANTS 796#undef _rw_assert 797#endif 798 799/* 800 * In the non-WITNESS case, rw_assert() can only detect that at least 801 * *some* thread owns an rlock, but it cannot guarantee that *this* 802 * thread owns an rlock. 803 */ 804void 805_rw_assert(struct rwlock *rw, int what, const char *file, int line) 806{ 807 808 if (panicstr != NULL) 809 return; 810 switch (what) { 811 case RA_LOCKED: 812 case RA_LOCKED | LA_NOTRECURSED: 813 case RA_RLOCKED: 814#ifdef WITNESS 815 witness_assert(&rw->lock_object, what, file, line); 816#else 817 /* 818 * If some other thread has a write lock or we have one 819 * and are asserting a read lock, fail. Also, if no one 820 * has a lock at all, fail. 821 */ 822 if (rw->rw_lock == RW_UNLOCKED || 823 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 824 rw_wowner(rw) != curthread))) 825 panic("Lock %s not %slocked @ %s:%d\n", 826 rw->lock_object.lo_name, (what == RA_RLOCKED) ? 827 "read " : "", file, line); 828#endif 829 break; 830 case RA_WLOCKED: 831 if (rw_wowner(rw) != curthread) 832 panic("Lock %s not exclusively locked @ %s:%d\n", 833 rw->lock_object.lo_name, file, line); 834 break; 835 case RA_UNLOCKED: 836#ifdef WITNESS 837 witness_assert(&rw->lock_object, what, file, line); 838#else 839 /* 840 * If we hold a write lock fail. We can't reliably check 841 * to see if we hold a read lock or not. 842 */ 843 if (rw_wowner(rw) == curthread) 844 panic("Lock %s exclusively locked @ %s:%d\n", 845 rw->lock_object.lo_name, file, line); 846#endif 847 break; 848 default: 849 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 850 line); 851 } 852} 853#endif /* INVARIANT_SUPPORT */ 854 855#ifdef DDB 856void 857db_show_rwlock(struct lock_object *lock) 858{ 859 struct rwlock *rw; 860 struct thread *td; 861 862 rw = (struct rwlock *)lock; 863 864 db_printf(" state: "); 865 if (rw->rw_lock == RW_UNLOCKED) 866 db_printf("UNLOCKED\n"); 867 else if (rw->rw_lock == RW_DESTROYED) { 868 db_printf("DESTROYED\n"); 869 return; 870 } else if (rw->rw_lock & RW_LOCK_READ) 871 db_printf("RLOCK: %ju locks\n", 872 (uintmax_t)(RW_READERS(rw->rw_lock))); 873 else { 874 td = rw_wowner(rw); 875 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 876 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 877 } 878 db_printf(" waiters: "); 879 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 880 case RW_LOCK_READ_WAITERS: 881 db_printf("readers\n"); 882 break; 883 case RW_LOCK_WRITE_WAITERS: 884 db_printf("writers\n"); 885 break; 886 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 887 db_printf("readers and writers\n"); 888 break; 889 default: 890 db_printf("none\n"); 891 break; 892 } 893} 894 895#endif 896