kern_rwlock.c revision 160771
1/*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * Machine independent bits of reader/writer lock implementation. 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 160771 2006-07-27 21:45:55Z jhb $"); 36 37#include "opt_ddb.h" 38 39#include <sys/param.h> 40#include <sys/ktr.h> 41#include <sys/lock.h> 42#include <sys/mutex.h> 43#include <sys/proc.h> 44#include <sys/rwlock.h> 45#include <sys/systm.h> 46#include <sys/turnstile.h> 47 48#include <machine/cpu.h> 49 50#ifdef DDB 51#include <ddb/ddb.h> 52 53static void db_show_rwlock(struct lock_object *lock); 54#endif 55 56struct lock_class lock_class_rw = { 57 "rw", 58 LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 59#ifdef DDB 60 db_show_rwlock 61#endif 62}; 63 64/* 65 * Return a pointer to the owning thread if the lock is write-locked or 66 * NULL if the lock is unlocked or read-locked. 67 */ 68#define rw_wowner(rw) \ 69 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 70 (struct thread *)RW_OWNER((rw)->rw_lock)) 71 72/* 73 * Return a pointer to the owning thread for this lock who should receive 74 * any priority lent by threads that block on this lock. Currently this 75 * is identical to rw_wowner(). 76 */ 77#define rw_owner(rw) rw_wowner(rw) 78 79#ifndef INVARIANTS 80#define _rw_assert(rw, what, file, line) 81#endif 82 83void 84rw_init(struct rwlock *rw, const char *name) 85{ 86 87 rw->rw_lock = RW_UNLOCKED; 88 89 lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS | 90 LO_RECURSABLE | LO_UPGRADABLE); 91} 92 93void 94rw_destroy(struct rwlock *rw) 95{ 96 97 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 98 lock_destroy(&rw->rw_object); 99} 100 101void 102rw_sysinit(void *arg) 103{ 104 struct rw_args *args = arg; 105 106 rw_init(args->ra_rw, args->ra_desc); 107} 108 109void 110_rw_wlock(struct rwlock *rw, const char *file, int line) 111{ 112 113 MPASS(curthread != NULL); 114 KASSERT(rw_wowner(rw) != curthread, 115 ("%s (%s): wlock already held @ %s:%d", __func__, 116 rw->rw_object.lo_name, file, line)); 117 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 118 line); 119 __rw_wlock(rw, curthread, file, line); 120 LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line); 121 WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line); 122 curthread->td_locks++; 123} 124 125void 126_rw_wunlock(struct rwlock *rw, const char *file, int line) 127{ 128 129 MPASS(curthread != NULL); 130 _rw_assert(rw, RA_WLOCKED, file, line); 131 curthread->td_locks--; 132 WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line); 133 LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line); 134 __rw_wunlock(rw, curthread, file, line); 135} 136 137void 138_rw_rlock(struct rwlock *rw, const char *file, int line) 139{ 140#ifdef SMP 141 volatile struct thread *owner; 142#endif 143 uintptr_t x; 144 145 KASSERT(rw_wowner(rw) != curthread, 146 ("%s (%s): wlock already held @ %s:%d", __func__, 147 rw->rw_object.lo_name, file, line)); 148 WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line); 149 150 /* 151 * Note that we don't make any attempt to try to block read 152 * locks once a writer has blocked on the lock. The reason is 153 * that we currently allow for read locks to recurse and we 154 * don't keep track of all the holders of read locks. Thus, if 155 * we were to block readers once a writer blocked and a reader 156 * tried to recurse on their reader lock after a writer had 157 * blocked we would end up in a deadlock since the reader would 158 * be blocked on the writer, and the writer would be blocked 159 * waiting for the reader to release its original read lock. 160 */ 161 for (;;) { 162 /* 163 * Handle the easy case. If no other thread has a write 164 * lock, then try to bump up the count of read locks. Note 165 * that we have to preserve the current state of the 166 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 167 * read lock, then rw_lock must have changed, so restart 168 * the loop. Note that this handles the case of a 169 * completely unlocked rwlock since such a lock is encoded 170 * as a read lock with no waiters. 171 */ 172 x = rw->rw_lock; 173 if (x & RW_LOCK_READ) { 174 175 /* 176 * The RW_LOCK_READ_WAITERS flag should only be set 177 * if another thread currently holds a write lock, 178 * and in that case RW_LOCK_READ should be clear. 179 */ 180 MPASS((x & RW_LOCK_READ_WAITERS) == 0); 181 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, 182 x + RW_ONE_READER)) { 183 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 184 CTR4(KTR_LOCK, 185 "%s: %p succeed %p -> %p", __func__, 186 rw, (void *)x, 187 (void *)(x + RW_ONE_READER)); 188 break; 189 } 190 cpu_spinwait(); 191 continue; 192 } 193 194 /* 195 * Okay, now it's the hard case. Some other thread already 196 * has a write lock, so acquire the turnstile lock so we can 197 * begin the process of blocking. 198 */ 199 turnstile_lock(&rw->rw_object); 200 201 /* 202 * The lock might have been released while we spun, so 203 * recheck its state and restart the loop if there is no 204 * longer a write lock. 205 */ 206 x = rw->rw_lock; 207 if (x & RW_LOCK_READ) { 208 turnstile_release(&rw->rw_object); 209 cpu_spinwait(); 210 continue; 211 } 212 213 /* 214 * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS 215 * flag is already set, then we can go ahead and block. If 216 * it is not set then try to set it. If we fail to set it 217 * drop the turnstile lock and restart the loop. 218 */ 219 if (!(x & RW_LOCK_READ_WAITERS)) { 220 if (!atomic_cmpset_ptr(&rw->rw_lock, x, 221 x | RW_LOCK_READ_WAITERS)) { 222 turnstile_release(&rw->rw_object); 223 cpu_spinwait(); 224 continue; 225 } 226 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 227 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 228 __func__, rw); 229 } 230 231#ifdef SMP 232 /* 233 * If the owner is running on another CPU, spin until 234 * the owner stops running or the state of the lock 235 * changes. 236 */ 237 owner = (struct thread *)RW_OWNER(x); 238 if (TD_IS_RUNNING(owner)) { 239 turnstile_release(&rw->rw_object); 240 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 241 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 242 __func__, rw, owner); 243 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 244 TD_IS_RUNNING(owner)) 245 cpu_spinwait(); 246 continue; 247 } 248#endif 249 250 /* 251 * We were unable to acquire the lock and the read waiters 252 * flag is set, so we must block on the turnstile. 253 */ 254 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 255 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 256 rw); 257 turnstile_wait(&rw->rw_object, rw_owner(rw), TS_SHARED_QUEUE); 258 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 259 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 260 __func__, rw); 261 } 262 263 /* 264 * TODO: acquire "owner of record" here. Here be turnstile dragons 265 * however. turnstiles don't like owners changing between calls to 266 * turnstile_wait() currently. 267 */ 268 269 LOCK_LOG_LOCK("RLOCK", &rw->rw_object, 0, 0, file, line); 270 WITNESS_LOCK(&rw->rw_object, 0, file, line); 271 curthread->td_locks++; 272} 273 274void 275_rw_runlock(struct rwlock *rw, const char *file, int line) 276{ 277 struct turnstile *ts; 278 uintptr_t x; 279 280 _rw_assert(rw, RA_RLOCKED, file, line); 281 curthread->td_locks--; 282 WITNESS_UNLOCK(&rw->rw_object, 0, file, line); 283 LOCK_LOG_LOCK("RUNLOCK", &rw->rw_object, 0, 0, file, line); 284 285 /* TODO: drop "owner of record" here. */ 286 287 for (;;) { 288 /* 289 * See if there is more than one read lock held. If so, 290 * just drop one and return. 291 */ 292 x = rw->rw_lock; 293 if (RW_READERS(x) > 1) { 294 if (atomic_cmpset_ptr(&rw->rw_lock, x, 295 x - RW_ONE_READER)) { 296 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 297 CTR4(KTR_LOCK, 298 "%s: %p succeeded %p -> %p", 299 __func__, rw, (void *)x, 300 (void *)(x - RW_ONE_READER)); 301 break; 302 } 303 continue; 304 } 305 306 /* 307 * We should never have read waiters while at least one 308 * thread holds a read lock. (See note above) 309 */ 310 KASSERT(!(x & RW_LOCK_READ_WAITERS), 311 ("%s: waiting readers", __func__)); 312 313 /* 314 * If there aren't any waiters for a write lock, then try 315 * to drop it quickly. 316 */ 317 if (!(x & RW_LOCK_WRITE_WAITERS)) { 318 319 /* 320 * There shouldn't be any flags set and we should 321 * be the only read lock. If we fail to release 322 * the single read lock, then another thread might 323 * have just acquired a read lock, so go back up 324 * to the multiple read locks case. 325 */ 326 MPASS(x == RW_READERS_LOCK(1)); 327 if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), 328 RW_UNLOCKED)) { 329 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 330 CTR2(KTR_LOCK, "%s: %p last succeeded", 331 __func__, rw); 332 break; 333 } 334 continue; 335 } 336 337 /* 338 * There should just be one reader with one or more 339 * writers waiting. 340 */ 341 MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS)); 342 343 /* 344 * Ok, we know we have a waiting writer and we think we 345 * are the last reader, so grab the turnstile lock. 346 */ 347 turnstile_lock(&rw->rw_object); 348 349 /* 350 * Try to drop our lock leaving the lock in a unlocked 351 * state. 352 * 353 * If you wanted to do explicit lock handoff you'd have to 354 * do it here. You'd also want to use turnstile_signal() 355 * and you'd have to handle the race where a higher 356 * priority thread blocks on the write lock before the 357 * thread you wakeup actually runs and have the new thread 358 * "steal" the lock. For now it's a lot simpler to just 359 * wakeup all of the waiters. 360 * 361 * As above, if we fail, then another thread might have 362 * acquired a read lock, so drop the turnstile lock and 363 * restart. 364 */ 365 if (!atomic_cmpset_ptr(&rw->rw_lock, 366 RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) { 367 turnstile_release(&rw->rw_object); 368 continue; 369 } 370 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 371 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 372 __func__, rw); 373 374 /* 375 * Ok. The lock is released and all that's left is to 376 * wake up the waiters. Note that the lock might not be 377 * free anymore, but in that case the writers will just 378 * block again if they run before the new lock holder(s) 379 * release the lock. 380 */ 381 ts = turnstile_lookup(&rw->rw_object); 382 MPASS(ts != NULL); 383 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); 384 turnstile_unpend(ts, TS_SHARED_LOCK); 385 break; 386 } 387} 388 389/* 390 * This function is called when we are unable to obtain a write lock on the 391 * first try. This means that at least one other thread holds either a 392 * read or write lock. 393 */ 394void 395_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 396{ 397#ifdef SMP 398 volatile struct thread *owner; 399#endif 400 uintptr_t v; 401 402 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 403 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 404 rw->rw_object.lo_name, (void *)rw->rw_lock, file, line); 405 406 while (!_rw_write_lock(rw, tid)) { 407 turnstile_lock(&rw->rw_object); 408 v = rw->rw_lock; 409 410 /* 411 * If the lock was released while spinning on the 412 * turnstile chain lock, try again. 413 */ 414 if (v == RW_UNLOCKED) { 415 turnstile_release(&rw->rw_object); 416 cpu_spinwait(); 417 continue; 418 } 419 420 /* 421 * If the lock was released by a writer with both readers 422 * and writers waiting and a reader hasn't woken up and 423 * acquired the lock yet, rw_lock will be set to the 424 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS. If we see 425 * that value, try to acquire it once. Note that we have 426 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are 427 * other writers waiting still. If we fail, restart the 428 * loop. 429 */ 430 if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) { 431 if (atomic_cmpset_acq_ptr(&rw->rw_lock, 432 RW_UNLOCKED | RW_LOCK_WRITE_WAITERS, 433 tid | RW_LOCK_WRITE_WAITERS)) { 434 turnstile_claim(&rw->rw_object); 435 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 436 __func__, rw); 437 break; 438 } 439 turnstile_release(&rw->rw_object); 440 cpu_spinwait(); 441 continue; 442 } 443 444 /* 445 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 446 * set it. If we fail to set it, then loop back and try 447 * again. 448 */ 449 if (!(v & RW_LOCK_WRITE_WAITERS)) { 450 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 451 v | RW_LOCK_WRITE_WAITERS)) { 452 turnstile_release(&rw->rw_object); 453 cpu_spinwait(); 454 continue; 455 } 456 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 457 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 458 __func__, rw); 459 } 460 461#ifdef SMP 462 /* 463 * If the lock is write locked and the owner is 464 * running on another CPU, spin until the owner stops 465 * running or the state of the lock changes. 466 */ 467 owner = (struct thread *)RW_OWNER(v); 468 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 469 turnstile_release(&rw->rw_object); 470 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 471 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 472 __func__, rw, owner); 473 while ((struct thread*)RW_OWNER(rw->rw_lock)== owner && 474 TD_IS_RUNNING(owner)) 475 cpu_spinwait(); 476 continue; 477 } 478#endif 479 480 /* 481 * We were unable to acquire the lock and the write waiters 482 * flag is set, so we must block on the turnstile. 483 */ 484 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 485 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 486 rw); 487 turnstile_wait(&rw->rw_object, rw_owner(rw), 488 TS_EXCLUSIVE_QUEUE); 489 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 490 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 491 __func__, rw); 492 } 493} 494 495/* 496 * This function is called if the first try at releasing a write lock failed. 497 * This means that one of the 2 waiter bits must be set indicating that at 498 * least one thread is waiting on this lock. 499 */ 500void 501_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 502{ 503 struct turnstile *ts; 504 uintptr_t v; 505 int queue; 506 507 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 508 ("%s: neither of the waiter flags are set", __func__)); 509 510 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 511 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 512 513 turnstile_lock(&rw->rw_object); 514 ts = turnstile_lookup(&rw->rw_object); 515 516#ifdef SMP 517 /* 518 * There might not be a turnstile for this lock if all of 519 * the waiters are adaptively spinning. In that case, just 520 * reset the lock to the unlocked state and return. 521 */ 522 if (ts == NULL) { 523 atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED); 524 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 525 CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw); 526 turnstile_release(&rw->rw_object); 527 return; 528 } 529#else 530 MPASS(ts != NULL); 531#endif 532 533 /* 534 * Use the same algo as sx locks for now. Prefer waking up shared 535 * waiters if we have any over writers. This is probably not ideal. 536 * 537 * 'v' is the value we are going to write back to rw_lock. If we 538 * have waiters on both queues, we need to preserve the state of 539 * the waiter flag for the queue we don't wake up. For now this is 540 * hardcoded for the algorithm mentioned above. 541 * 542 * In the case of both readers and writers waiting we wakeup the 543 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 544 * new writer comes in before a reader it will claim the lock up 545 * above. There is probably a potential priority inversion in 546 * there that could be worked around either by waking both queues 547 * of waiters or doing some complicated lock handoff gymnastics. 548 * 549 * Note that in the SMP case, if both flags are set, there might 550 * not be any actual writers on the turnstile as they might all 551 * be spinning. In that case, we don't want to preserve the 552 * RW_LOCK_WRITE_WAITERS flag as the turnstile is going to go 553 * away once we wakeup all the readers. 554 */ 555 v = RW_UNLOCKED; 556 if (rw->rw_lock & RW_LOCK_READ_WAITERS) { 557 queue = TS_SHARED_QUEUE; 558#ifdef SMP 559 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS && 560 !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 561 v |= RW_LOCK_WRITE_WAITERS; 562#else 563 v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS); 564#endif 565 } else 566 queue = TS_EXCLUSIVE_QUEUE; 567 568#ifdef SMP 569 /* 570 * We have to make sure that we actually have waiters to 571 * wakeup. If they are all spinning, then we just need to 572 * disown the turnstile and return. 573 */ 574 if (turnstile_empty(ts, queue)) { 575 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 576 CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw); 577 atomic_store_rel_ptr(&rw->rw_lock, v); 578 turnstile_disown(ts); 579 return; 580 } 581#endif 582 583 /* Wake up all waiters for the specific queue. */ 584 if (LOCK_LOG_TEST(&rw->rw_object, 0)) 585 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 586 queue == TS_SHARED_QUEUE ? "read" : "write"); 587 turnstile_broadcast(ts, queue); 588 atomic_store_rel_ptr(&rw->rw_lock, v); 589 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 590} 591 592/* 593 * Attempt to do a non-blocking upgrade from a read lock to a write 594 * lock. This will only succeed if this thread holds a single read 595 * lock. Returns true if the upgrade succeeded and false otherwise. 596 */ 597int 598_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 599{ 600 uintptr_t v, tid; 601 int success; 602 603 _rw_assert(rw, RA_RLOCKED, file, line); 604 605 /* 606 * Attempt to switch from one reader to a writer. If there 607 * are any write waiters, then we will have to lock the 608 * turnstile first to prevent races with another writer 609 * calling turnstile_wait() before we have claimed this 610 * turnstile. So, do the simple case of no waiters first. 611 */ 612 tid = (uintptr_t)curthread; 613 if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) { 614 success = atomic_cmpset_acq_ptr(&rw->rw_lock, 615 RW_READERS_LOCK(1), tid); 616 goto out; 617 } 618 619 /* 620 * Ok, we think we have write waiters, so lock the 621 * turnstile. 622 */ 623 turnstile_lock(&rw->rw_object); 624 625 /* 626 * Try to switch from one reader to a writer again. This time 627 * we honor the current state of the RW_LOCK_WRITE_WAITERS 628 * flag. If we obtain the lock with the flag set, then claim 629 * ownership of the turnstile. In the SMP case it is possible 630 * for there to not be an associated turnstile even though there 631 * are waiters if all of the waiters are spinning. 632 */ 633 v = rw->rw_lock & RW_LOCK_WRITE_WAITERS; 634 success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 635 tid | v); 636#ifdef SMP 637 if (success && v && turnstile_lookup(&rw->rw_object) != NULL) 638#else 639 if (success && v) 640#endif 641 turnstile_claim(&rw->rw_object); 642 else 643 turnstile_release(&rw->rw_object); 644out: 645 LOCK_LOG_TRY("WUPGRADE", &rw->rw_object, 0, success, file, line); 646 if (success) 647 WITNESS_UPGRADE(&rw->rw_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 648 file, line); 649 return (success); 650} 651 652/* 653 * Downgrade a write lock into a single read lock. 654 */ 655void 656_rw_downgrade(struct rwlock *rw, const char *file, int line) 657{ 658 struct turnstile *ts; 659 uintptr_t tid, v; 660 661 _rw_assert(rw, RA_WLOCKED, file, line); 662 663 WITNESS_DOWNGRADE(&rw->rw_object, 0, file, line); 664 665 /* 666 * Convert from a writer to a single reader. First we handle 667 * the easy case with no waiters. If there are any waiters, we 668 * lock the turnstile, "disown" the lock, and awaken any read 669 * waiters. 670 */ 671 tid = (uintptr_t)curthread; 672 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 673 goto out; 674 675 /* 676 * Ok, we think we have waiters, so lock the turnstile so we can 677 * read the waiter flags without any races. 678 */ 679 turnstile_lock(&rw->rw_object); 680 v = rw->rw_lock; 681 MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)); 682 683 /* 684 * Downgrade from a write lock while preserving 685 * RW_LOCK_WRITE_WAITERS and give up ownership of the 686 * turnstile. If there are any read waiters, wake them up. 687 * 688 * For SMP, we have to allow for the fact that all of the 689 * read waiters might be spinning. In that case, act as if 690 * RW_LOCK_READ_WAITERS is not set. Also, only preserve 691 * the RW_LOCK_WRITE_WAITERS flag if at least one writer is 692 * blocked on the turnstile. 693 */ 694 ts = turnstile_lookup(&rw->rw_object); 695#ifdef SMP 696 if (ts == NULL) 697 v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS); 698 else if (v & RW_LOCK_READ_WAITERS && 699 turnstile_empty(ts, TS_SHARED_QUEUE)) 700 v &= ~RW_LOCK_READ_WAITERS; 701 else if (v & RW_LOCK_WRITE_WAITERS && 702 turnstile_empty(ts, TS_EXCLUSIVE_QUEUE)) 703 v &= ~RW_LOCK_WRITE_WAITERS; 704#else 705 MPASS(ts != NULL); 706#endif 707 if (v & RW_LOCK_READ_WAITERS) 708 turnstile_broadcast(ts, TS_SHARED_QUEUE); 709 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | 710 (v & RW_LOCK_WRITE_WAITERS)); 711 if (v & RW_LOCK_READ_WAITERS) 712 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 713#ifdef SMP 714 else if (ts == NULL) 715 turnstile_release(&rw->rw_object); 716#endif 717 else 718 turnstile_disown(ts); 719out: 720 LOCK_LOG_LOCK("WDOWNGRADE", &rw->rw_object, 0, 0, file, line); 721} 722 723#ifdef INVARIANT_SUPPORT 724#ifndef INVARIANTS 725#undef _rw_assert 726#endif 727 728/* 729 * In the non-WITNESS case, rw_assert() can only detect that at least 730 * *some* thread owns an rlock, but it cannot guarantee that *this* 731 * thread owns an rlock. 732 */ 733void 734_rw_assert(struct rwlock *rw, int what, const char *file, int line) 735{ 736 737 if (panicstr != NULL) 738 return; 739 switch (what) { 740 case RA_LOCKED: 741 case RA_RLOCKED: 742#ifdef WITNESS 743 witness_assert(&rw->rw_object, what, file, line); 744#else 745 /* 746 * If some other thread has a write lock or we have one 747 * and are asserting a read lock, fail. Also, if no one 748 * has a lock at all, fail. 749 */ 750 if (rw->rw_lock == RW_UNLOCKED || 751 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 752 rw_wowner(rw) != curthread))) 753 panic("Lock %s not %slocked @ %s:%d\n", 754 rw->rw_object.lo_name, (what == RA_RLOCKED) ? 755 "read " : "", file, line); 756#endif 757 break; 758 case RA_WLOCKED: 759 if (rw_wowner(rw) != curthread) 760 panic("Lock %s not exclusively locked @ %s:%d\n", 761 rw->rw_object.lo_name, file, line); 762 break; 763 case RA_UNLOCKED: 764#ifdef WITNESS 765 witness_assert(&rw->rw_object, what, file, line); 766#else 767 /* 768 * If we hold a write lock fail. We can't reliably check 769 * to see if we hold a read lock or not. 770 */ 771 if (rw_wowner(rw) == curthread) 772 panic("Lock %s exclusively locked @ %s:%d\n", 773 rw->rw_object.lo_name, file, line); 774#endif 775 break; 776 default: 777 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 778 line); 779 } 780} 781#endif /* INVARIANT_SUPPORT */ 782 783#ifdef DDB 784void 785db_show_rwlock(struct lock_object *lock) 786{ 787 struct rwlock *rw; 788 struct thread *td; 789 790 rw = (struct rwlock *)lock; 791 792 db_printf(" state: "); 793 if (rw->rw_lock == RW_UNLOCKED) 794 db_printf("UNLOCKED\n"); 795 else if (rw->rw_lock & RW_LOCK_READ) 796 db_printf("RLOCK: %jd locks\n", 797 (intmax_t)(RW_READERS(rw->rw_lock))); 798 else { 799 td = rw_wowner(rw); 800 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 801 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm); 802 } 803 db_printf(" waiters: "); 804 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 805 case RW_LOCK_READ_WAITERS: 806 db_printf("readers\n"); 807 break; 808 case RW_LOCK_WRITE_WAITERS: 809 db_printf("writers\n"); 810 break; 811 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 812 db_printf("readers and waiters\n"); 813 break; 814 default: 815 db_printf("none\n"); 816 break; 817 } 818} 819 820#endif 821