36 37#include "opt_ddb.h" 38#include "opt_no_adaptive_rwlocks.h" 39 40#include <sys/param.h> 41#include <sys/ktr.h> 42#include <sys/lock.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/rwlock.h> 46#include <sys/systm.h> 47#include <sys/turnstile.h> 48 49#include <machine/cpu.h> 50 51CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); 52 53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 54#define ADAPTIVE_RWLOCKS 55#endif 56 57#ifdef DDB 58#include <ddb/ddb.h> 59 60static void db_show_rwlock(struct lock_object *lock); 61#endif 62static void assert_rw(struct lock_object *lock, int what); 63static void lock_rw(struct lock_object *lock, int how); 64static int unlock_rw(struct lock_object *lock); 65 66struct lock_class lock_class_rw = { 67 .lc_name = "rw", 68 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 69 .lc_assert = assert_rw, 70#ifdef DDB 71 .lc_ddb_show = db_show_rwlock, 72#endif 73 .lc_lock = lock_rw, 74 .lc_unlock = unlock_rw, 75}; 76 77/* 78 * Return a pointer to the owning thread if the lock is write-locked or 79 * NULL if the lock is unlocked or read-locked. 80 */ 81#define rw_wowner(rw) \ 82 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 83 (struct thread *)RW_OWNER((rw)->rw_lock)) 84 85/* 86 * Returns if a write owner is recursed. Write ownership is not assured 87 * here and should be previously checked. 88 */ 89#define rw_recursed(rw) ((rw)->rw_recurse != 0) 90 91/* 92 * Return true if curthread helds the lock. 93 */ 94#define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 95 96/* 97 * Return a pointer to the owning thread for this lock who should receive 98 * any priority lent by threads that block on this lock. Currently this 99 * is identical to rw_wowner(). 100 */ 101#define rw_owner(rw) rw_wowner(rw) 102 103#ifndef INVARIANTS 104#define _rw_assert(rw, what, file, line) 105#endif 106 107void 108assert_rw(struct lock_object *lock, int what) 109{ 110 111 rw_assert((struct rwlock *)lock, what); 112} 113 114void 115lock_rw(struct lock_object *lock, int how) 116{ 117 struct rwlock *rw; 118 119 rw = (struct rwlock *)lock; 120 if (how) 121 rw_wlock(rw); 122 else 123 rw_rlock(rw); 124} 125 126int 127unlock_rw(struct lock_object *lock) 128{ 129 struct rwlock *rw; 130 131 rw = (struct rwlock *)lock; 132 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 133 if (rw->rw_lock & RW_LOCK_READ) { 134 rw_runlock(rw); 135 return (0); 136 } else { 137 rw_wunlock(rw); 138 return (1); 139 } 140} 141 142void 143rw_init_flags(struct rwlock *rw, const char *name, int opts) 144{ 145 int flags; 146 147 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 148 RW_RECURSE)) == 0); 149 150 flags = LO_UPGRADABLE | LO_RECURSABLE; 151 if (opts & RW_DUPOK) 152 flags |= LO_DUPOK; 153 if (opts & RW_NOPROFILE) 154 flags |= LO_NOPROFILE; 155 if (!(opts & RW_NOWITNESS)) 156 flags |= LO_WITNESS; 157 if (opts & RW_QUIET) 158 flags |= LO_QUIET; 159 flags |= opts & RW_RECURSE; 160 161 rw->rw_lock = RW_UNLOCKED; 162 rw->rw_recurse = 0; 163 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 164} 165 166void 167rw_destroy(struct rwlock *rw) 168{ 169 170 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 171 KASSERT(rw->rw_recurse == 0, ("rw lock still recursed")); 172 rw->rw_lock = RW_DESTROYED; 173 lock_destroy(&rw->lock_object); 174} 175 176void 177rw_sysinit(void *arg) 178{ 179 struct rw_args *args = arg; 180 181 rw_init(args->ra_rw, args->ra_desc); 182} 183 184int 185rw_wowned(struct rwlock *rw) 186{ 187 188 return (rw_wowner(rw) == curthread); 189} 190 191void 192_rw_wlock(struct rwlock *rw, const char *file, int line) 193{ 194 195 MPASS(curthread != NULL); 196 KASSERT(rw->rw_lock != RW_DESTROYED, 197 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 198 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 199 line); 200 __rw_wlock(rw, curthread, file, line); 201 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 202 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 203 curthread->td_locks++; 204} 205
| 36 37#include "opt_ddb.h" 38#include "opt_no_adaptive_rwlocks.h" 39 40#include <sys/param.h> 41#include <sys/ktr.h> 42#include <sys/lock.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/rwlock.h> 46#include <sys/systm.h> 47#include <sys/turnstile.h> 48 49#include <machine/cpu.h> 50 51CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); 52 53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 54#define ADAPTIVE_RWLOCKS 55#endif 56 57#ifdef DDB 58#include <ddb/ddb.h> 59 60static void db_show_rwlock(struct lock_object *lock); 61#endif 62static void assert_rw(struct lock_object *lock, int what); 63static void lock_rw(struct lock_object *lock, int how); 64static int unlock_rw(struct lock_object *lock); 65 66struct lock_class lock_class_rw = { 67 .lc_name = "rw", 68 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 69 .lc_assert = assert_rw, 70#ifdef DDB 71 .lc_ddb_show = db_show_rwlock, 72#endif 73 .lc_lock = lock_rw, 74 .lc_unlock = unlock_rw, 75}; 76 77/* 78 * Return a pointer to the owning thread if the lock is write-locked or 79 * NULL if the lock is unlocked or read-locked. 80 */ 81#define rw_wowner(rw) \ 82 ((rw)->rw_lock & RW_LOCK_READ ? NULL : \ 83 (struct thread *)RW_OWNER((rw)->rw_lock)) 84 85/* 86 * Returns if a write owner is recursed. Write ownership is not assured 87 * here and should be previously checked. 88 */ 89#define rw_recursed(rw) ((rw)->rw_recurse != 0) 90 91/* 92 * Return true if curthread helds the lock. 93 */ 94#define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 95 96/* 97 * Return a pointer to the owning thread for this lock who should receive 98 * any priority lent by threads that block on this lock. Currently this 99 * is identical to rw_wowner(). 100 */ 101#define rw_owner(rw) rw_wowner(rw) 102 103#ifndef INVARIANTS 104#define _rw_assert(rw, what, file, line) 105#endif 106 107void 108assert_rw(struct lock_object *lock, int what) 109{ 110 111 rw_assert((struct rwlock *)lock, what); 112} 113 114void 115lock_rw(struct lock_object *lock, int how) 116{ 117 struct rwlock *rw; 118 119 rw = (struct rwlock *)lock; 120 if (how) 121 rw_wlock(rw); 122 else 123 rw_rlock(rw); 124} 125 126int 127unlock_rw(struct lock_object *lock) 128{ 129 struct rwlock *rw; 130 131 rw = (struct rwlock *)lock; 132 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 133 if (rw->rw_lock & RW_LOCK_READ) { 134 rw_runlock(rw); 135 return (0); 136 } else { 137 rw_wunlock(rw); 138 return (1); 139 } 140} 141 142void 143rw_init_flags(struct rwlock *rw, const char *name, int opts) 144{ 145 int flags; 146 147 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 148 RW_RECURSE)) == 0); 149 150 flags = LO_UPGRADABLE | LO_RECURSABLE; 151 if (opts & RW_DUPOK) 152 flags |= LO_DUPOK; 153 if (opts & RW_NOPROFILE) 154 flags |= LO_NOPROFILE; 155 if (!(opts & RW_NOWITNESS)) 156 flags |= LO_WITNESS; 157 if (opts & RW_QUIET) 158 flags |= LO_QUIET; 159 flags |= opts & RW_RECURSE; 160 161 rw->rw_lock = RW_UNLOCKED; 162 rw->rw_recurse = 0; 163 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 164} 165 166void 167rw_destroy(struct rwlock *rw) 168{ 169 170 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked")); 171 KASSERT(rw->rw_recurse == 0, ("rw lock still recursed")); 172 rw->rw_lock = RW_DESTROYED; 173 lock_destroy(&rw->lock_object); 174} 175 176void 177rw_sysinit(void *arg) 178{ 179 struct rw_args *args = arg; 180 181 rw_init(args->ra_rw, args->ra_desc); 182} 183 184int 185rw_wowned(struct rwlock *rw) 186{ 187 188 return (rw_wowner(rw) == curthread); 189} 190 191void 192_rw_wlock(struct rwlock *rw, const char *file, int line) 193{ 194 195 MPASS(curthread != NULL); 196 KASSERT(rw->rw_lock != RW_DESTROYED, 197 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 198 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 199 line); 200 __rw_wlock(rw, curthread, file, line); 201 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 202 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 203 curthread->td_locks++; 204} 205
|
206void 207_rw_wunlock(struct rwlock *rw, const char *file, int line) 208{ 209 210 MPASS(curthread != NULL); 211 KASSERT(rw->rw_lock != RW_DESTROYED, 212 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 213 _rw_assert(rw, RA_WLOCKED, file, line); 214 curthread->td_locks--; 215 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 216 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 217 line); 218 if (!rw_recursed(rw)) 219 lock_profile_release_lock(&rw->lock_object); 220 __rw_wunlock(rw, curthread, file, line); 221} 222/* 223 * Determines whether a new reader can acquire a lock. Succeeds if the 224 * reader already owns a read lock and the lock is locked for read to 225 * prevent deadlock from reader recursion. Also succeeds if the lock 226 * is unlocked and has no writer waiters or spinners. Failing otherwise 227 * prioritizes writers before readers. 228 */ 229#define RW_CAN_READ(_rw) \ 230 ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 231 (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 232 RW_LOCK_READ) 233 234void 235_rw_rlock(struct rwlock *rw, const char *file, int line) 236{ 237 struct turnstile *ts; 238#ifdef ADAPTIVE_RWLOCKS 239 volatile struct thread *owner; 240#endif 241 uint64_t waittime = 0; 242 int contested = 0; 243 uintptr_t v; 244 245 KASSERT(rw->rw_lock != RW_DESTROYED, 246 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 247 KASSERT(rw_wowner(rw) != curthread, 248 ("%s (%s): wlock already held @ %s:%d", __func__, 249 rw->lock_object.lo_name, file, line)); 250 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 251 252 for (;;) { 253 /* 254 * Handle the easy case. If no other thread has a write 255 * lock, then try to bump up the count of read locks. Note 256 * that we have to preserve the current state of the 257 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 258 * read lock, then rw_lock must have changed, so restart 259 * the loop. Note that this handles the case of a 260 * completely unlocked rwlock since such a lock is encoded 261 * as a read lock with no waiters. 262 */ 263 v = rw->rw_lock; 264 if (RW_CAN_READ(v)) { 265 /* 266 * The RW_LOCK_READ_WAITERS flag should only be set 267 * if the lock has been unlocked and write waiters 268 * were present. 269 */ 270 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, 271 v + RW_ONE_READER)) { 272 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 273 CTR4(KTR_LOCK, 274 "%s: %p succeed %p -> %p", __func__, 275 rw, (void *)v, 276 (void *)(v + RW_ONE_READER)); 277 break; 278 } 279 cpu_spinwait(); 280 continue; 281 } 282 lock_profile_obtain_lock_failed(&rw->lock_object, 283 &contested, &waittime); 284 285#ifdef ADAPTIVE_RWLOCKS 286 /* 287 * If the owner is running on another CPU, spin until 288 * the owner stops running or the state of the lock 289 * changes. 290 */ 291 if ((v & RW_LOCK_READ) == 0) { 292 owner = (struct thread *)RW_OWNER(v); 293 if (TD_IS_RUNNING(owner)) { 294 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 295 CTR3(KTR_LOCK, 296 "%s: spinning on %p held by %p", 297 __func__, rw, owner); 298 while ((struct thread*)RW_OWNER(rw->rw_lock) == 299 owner && TD_IS_RUNNING(owner)) 300 cpu_spinwait(); 301 continue; 302 } 303 } 304#endif 305 306 /* 307 * Okay, now it's the hard case. Some other thread already 308 * has a write lock or there are write waiters present, 309 * acquire the turnstile lock so we can begin the process 310 * of blocking. 311 */ 312 ts = turnstile_trywait(&rw->lock_object); 313 314 /* 315 * The lock might have been released while we spun, so 316 * recheck its state and restart the loop if needed. 317 */ 318 v = rw->rw_lock; 319 if (RW_CAN_READ(v)) { 320 turnstile_cancel(ts); 321 cpu_spinwait(); 322 continue; 323 } 324 325#ifdef ADAPTIVE_RWLOCKS 326 /* 327 * If the current owner of the lock is executing on another 328 * CPU quit the hard path and try to spin. 329 */ 330 if ((v & RW_LOCK_READ) == 0) { 331 owner = (struct thread *)RW_OWNER(v); 332 if (TD_IS_RUNNING(owner)) { 333 turnstile_cancel(ts); 334 cpu_spinwait(); 335 continue; 336 } 337 } 338#endif 339 340 /* 341 * The lock is held in write mode or it already has waiters. 342 */ 343 MPASS(!RW_CAN_READ(v)); 344 345 /* 346 * If the RW_LOCK_READ_WAITERS flag is already set, then 347 * we can go ahead and block. If it is not set then try 348 * to set it. If we fail to set it drop the turnstile 349 * lock and restart the loop. 350 */ 351 if (!(v & RW_LOCK_READ_WAITERS)) { 352 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 353 v | RW_LOCK_READ_WAITERS)) { 354 turnstile_cancel(ts); 355 cpu_spinwait(); 356 continue; 357 } 358 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 359 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 360 __func__, rw); 361 } 362 363 /* 364 * We were unable to acquire the lock and the read waiters 365 * flag is set, so we must block on the turnstile. 366 */ 367 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 368 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 369 rw); 370 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 371 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 372 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 373 __func__, rw); 374 } 375 376 /* 377 * TODO: acquire "owner of record" here. Here be turnstile dragons 378 * however. turnstiles don't like owners changing between calls to 379 * turnstile_wait() currently. 380 */ 381 lock_profile_obtain_lock_success( &rw->lock_object, contested, 382 waittime, file, line); 383 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 384 WITNESS_LOCK(&rw->lock_object, 0, file, line); 385 curthread->td_locks++; 386 curthread->td_rw_rlocks++; 387} 388
| 230void 231_rw_wunlock(struct rwlock *rw, const char *file, int line) 232{ 233 234 MPASS(curthread != NULL); 235 KASSERT(rw->rw_lock != RW_DESTROYED, 236 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 237 _rw_assert(rw, RA_WLOCKED, file, line); 238 curthread->td_locks--; 239 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 240 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 241 line); 242 if (!rw_recursed(rw)) 243 lock_profile_release_lock(&rw->lock_object); 244 __rw_wunlock(rw, curthread, file, line); 245} 246/* 247 * Determines whether a new reader can acquire a lock. Succeeds if the 248 * reader already owns a read lock and the lock is locked for read to 249 * prevent deadlock from reader recursion. Also succeeds if the lock 250 * is unlocked and has no writer waiters or spinners. Failing otherwise 251 * prioritizes writers before readers. 252 */ 253#define RW_CAN_READ(_rw) \ 254 ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 255 (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 256 RW_LOCK_READ) 257 258void 259_rw_rlock(struct rwlock *rw, const char *file, int line) 260{ 261 struct turnstile *ts; 262#ifdef ADAPTIVE_RWLOCKS 263 volatile struct thread *owner; 264#endif 265 uint64_t waittime = 0; 266 int contested = 0; 267 uintptr_t v; 268 269 KASSERT(rw->rw_lock != RW_DESTROYED, 270 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 271 KASSERT(rw_wowner(rw) != curthread, 272 ("%s (%s): wlock already held @ %s:%d", __func__, 273 rw->lock_object.lo_name, file, line)); 274 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); 275 276 for (;;) { 277 /* 278 * Handle the easy case. If no other thread has a write 279 * lock, then try to bump up the count of read locks. Note 280 * that we have to preserve the current state of the 281 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 282 * read lock, then rw_lock must have changed, so restart 283 * the loop. Note that this handles the case of a 284 * completely unlocked rwlock since such a lock is encoded 285 * as a read lock with no waiters. 286 */ 287 v = rw->rw_lock; 288 if (RW_CAN_READ(v)) { 289 /* 290 * The RW_LOCK_READ_WAITERS flag should only be set 291 * if the lock has been unlocked and write waiters 292 * were present. 293 */ 294 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, 295 v + RW_ONE_READER)) { 296 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 297 CTR4(KTR_LOCK, 298 "%s: %p succeed %p -> %p", __func__, 299 rw, (void *)v, 300 (void *)(v + RW_ONE_READER)); 301 break; 302 } 303 cpu_spinwait(); 304 continue; 305 } 306 lock_profile_obtain_lock_failed(&rw->lock_object, 307 &contested, &waittime); 308 309#ifdef ADAPTIVE_RWLOCKS 310 /* 311 * If the owner is running on another CPU, spin until 312 * the owner stops running or the state of the lock 313 * changes. 314 */ 315 if ((v & RW_LOCK_READ) == 0) { 316 owner = (struct thread *)RW_OWNER(v); 317 if (TD_IS_RUNNING(owner)) { 318 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 319 CTR3(KTR_LOCK, 320 "%s: spinning on %p held by %p", 321 __func__, rw, owner); 322 while ((struct thread*)RW_OWNER(rw->rw_lock) == 323 owner && TD_IS_RUNNING(owner)) 324 cpu_spinwait(); 325 continue; 326 } 327 } 328#endif 329 330 /* 331 * Okay, now it's the hard case. Some other thread already 332 * has a write lock or there are write waiters present, 333 * acquire the turnstile lock so we can begin the process 334 * of blocking. 335 */ 336 ts = turnstile_trywait(&rw->lock_object); 337 338 /* 339 * The lock might have been released while we spun, so 340 * recheck its state and restart the loop if needed. 341 */ 342 v = rw->rw_lock; 343 if (RW_CAN_READ(v)) { 344 turnstile_cancel(ts); 345 cpu_spinwait(); 346 continue; 347 } 348 349#ifdef ADAPTIVE_RWLOCKS 350 /* 351 * If the current owner of the lock is executing on another 352 * CPU quit the hard path and try to spin. 353 */ 354 if ((v & RW_LOCK_READ) == 0) { 355 owner = (struct thread *)RW_OWNER(v); 356 if (TD_IS_RUNNING(owner)) { 357 turnstile_cancel(ts); 358 cpu_spinwait(); 359 continue; 360 } 361 } 362#endif 363 364 /* 365 * The lock is held in write mode or it already has waiters. 366 */ 367 MPASS(!RW_CAN_READ(v)); 368 369 /* 370 * If the RW_LOCK_READ_WAITERS flag is already set, then 371 * we can go ahead and block. If it is not set then try 372 * to set it. If we fail to set it drop the turnstile 373 * lock and restart the loop. 374 */ 375 if (!(v & RW_LOCK_READ_WAITERS)) { 376 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 377 v | RW_LOCK_READ_WAITERS)) { 378 turnstile_cancel(ts); 379 cpu_spinwait(); 380 continue; 381 } 382 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 383 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 384 __func__, rw); 385 } 386 387 /* 388 * We were unable to acquire the lock and the read waiters 389 * flag is set, so we must block on the turnstile. 390 */ 391 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 392 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 393 rw); 394 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 395 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 396 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 397 __func__, rw); 398 } 399 400 /* 401 * TODO: acquire "owner of record" here. Here be turnstile dragons 402 * however. turnstiles don't like owners changing between calls to 403 * turnstile_wait() currently. 404 */ 405 lock_profile_obtain_lock_success( &rw->lock_object, contested, 406 waittime, file, line); 407 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 408 WITNESS_LOCK(&rw->lock_object, 0, file, line); 409 curthread->td_locks++; 410 curthread->td_rw_rlocks++; 411} 412
|
389void 390_rw_runlock(struct rwlock *rw, const char *file, int line) 391{ 392 struct turnstile *ts; 393 uintptr_t x, v, queue; 394 395 KASSERT(rw->rw_lock != RW_DESTROYED, 396 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 397 _rw_assert(rw, RA_RLOCKED, file, line); 398 curthread->td_locks--; 399 curthread->td_rw_rlocks--; 400 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 401 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 402 403 /* TODO: drop "owner of record" here. */ 404 405 for (;;) { 406 /* 407 * See if there is more than one read lock held. If so, 408 * just drop one and return. 409 */ 410 x = rw->rw_lock; 411 if (RW_READERS(x) > 1) { 412 if (atomic_cmpset_ptr(&rw->rw_lock, x, 413 x - RW_ONE_READER)) { 414 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 415 CTR4(KTR_LOCK, 416 "%s: %p succeeded %p -> %p", 417 __func__, rw, (void *)x, 418 (void *)(x - RW_ONE_READER)); 419 break; 420 } 421 continue; 422 } 423 /* 424 * If there aren't any waiters for a write lock, then try 425 * to drop it quickly. 426 */ 427 if (!(x & RW_LOCK_WAITERS)) { 428 MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 429 RW_READERS_LOCK(1)); 430 if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) { 431 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 432 CTR2(KTR_LOCK, "%s: %p last succeeded", 433 __func__, rw); 434 break; 435 } 436 continue; 437 } 438 /* 439 * Ok, we know we have waiters and we think we are the 440 * last reader, so grab the turnstile lock. 441 */ 442 turnstile_chain_lock(&rw->lock_object); 443 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 444 MPASS(v & RW_LOCK_WAITERS); 445 446 /* 447 * Try to drop our lock leaving the lock in a unlocked 448 * state. 449 * 450 * If you wanted to do explicit lock handoff you'd have to 451 * do it here. You'd also want to use turnstile_signal() 452 * and you'd have to handle the race where a higher 453 * priority thread blocks on the write lock before the 454 * thread you wakeup actually runs and have the new thread 455 * "steal" the lock. For now it's a lot simpler to just 456 * wakeup all of the waiters. 457 * 458 * As above, if we fail, then another thread might have 459 * acquired a read lock, so drop the turnstile lock and 460 * restart. 461 */ 462 x = RW_UNLOCKED; 463 if (v & RW_LOCK_WRITE_WAITERS) { 464 queue = TS_EXCLUSIVE_QUEUE; 465 x |= (v & RW_LOCK_READ_WAITERS); 466 } else 467 queue = TS_SHARED_QUEUE; 468 if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 469 x)) { 470 turnstile_chain_unlock(&rw->lock_object); 471 continue; 472 } 473 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 474 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 475 __func__, rw); 476 477 /* 478 * Ok. The lock is released and all that's left is to 479 * wake up the waiters. Note that the lock might not be 480 * free anymore, but in that case the writers will just 481 * block again if they run before the new lock holder(s) 482 * release the lock. 483 */ 484 ts = turnstile_lookup(&rw->lock_object); 485 MPASS(ts != NULL); 486 turnstile_broadcast(ts, queue); 487 turnstile_unpend(ts, TS_SHARED_LOCK); 488 turnstile_chain_unlock(&rw->lock_object); 489 break; 490 } 491 lock_profile_release_lock(&rw->lock_object); 492} 493 494/* 495 * This function is called when we are unable to obtain a write lock on the 496 * first try. This means that at least one other thread holds either a 497 * read or write lock. 498 */ 499void 500_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 501{ 502 struct turnstile *ts; 503#ifdef ADAPTIVE_RWLOCKS 504 volatile struct thread *owner; 505 int spintries = 0; 506 int i; 507#endif 508 uint64_t waittime = 0; 509 uintptr_t v, x; 510 int contested = 0; 511 512 if (rw_wlocked(rw)) { 513 KASSERT(rw->lock_object.lo_flags & RW_RECURSE, 514 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 515 __func__, rw->lock_object.lo_name, file, line)); 516 rw->rw_recurse++; 517 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 518 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 519 return; 520 } 521 522 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 523 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 524 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 525 526 while (!_rw_write_lock(rw, tid)) { 527 lock_profile_obtain_lock_failed(&rw->lock_object, 528 &contested, &waittime); 529#ifdef ADAPTIVE_RWLOCKS 530 /* 531 * If the lock is write locked and the owner is 532 * running on another CPU, spin until the owner stops 533 * running or the state of the lock changes. 534 */ 535 v = rw->rw_lock; 536 owner = (struct thread *)RW_OWNER(v); 537 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 538 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 539 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 540 __func__, rw, owner); 541 while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && 542 TD_IS_RUNNING(owner)) 543 cpu_spinwait(); 544 continue; 545 } 546 if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) { 547 if (!(v & RW_LOCK_WRITE_SPINNER)) { 548 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 549 v | RW_LOCK_WRITE_SPINNER)) { 550 cpu_spinwait(); 551 continue; 552 } 553 } 554 spintries++; 555 for (i = 100000; i > 0; i--) { 556 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 557 break; 558 cpu_spinwait(); 559 } 560 if (i) 561 continue; 562 } 563#endif 564 ts = turnstile_trywait(&rw->lock_object); 565 v = rw->rw_lock; 566 567#ifdef ADAPTIVE_RWLOCKS 568 /* 569 * If the current owner of the lock is executing on another 570 * CPU quit the hard path and try to spin. 571 */ 572 if (!(v & RW_LOCK_READ)) { 573 owner = (struct thread *)RW_OWNER(v); 574 if (TD_IS_RUNNING(owner)) { 575 turnstile_cancel(ts); 576 cpu_spinwait(); 577 continue; 578 } 579 } 580#endif 581 /* 582 * If the lock was released while waiting for the turnstile 583 * chain lock retry. 584 */ 585 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 586 if ((v & ~x) == RW_UNLOCKED) { 587 x &= ~RW_LOCK_WRITE_SPINNER; 588 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 589 if (x) 590 turnstile_claim(ts); 591 else 592 turnstile_cancel(ts); 593 break; 594 } 595 turnstile_cancel(ts); 596 cpu_spinwait(); 597 continue; 598 } 599 /* 600 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 601 * set it. If we fail to set it, then loop back and try 602 * again. 603 */ 604 if (!(v & RW_LOCK_WRITE_WAITERS)) { 605 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 606 v | RW_LOCK_WRITE_WAITERS)) { 607 turnstile_cancel(ts); 608 cpu_spinwait(); 609 continue; 610 } 611 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 612 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 613 __func__, rw); 614 } 615 /* 616 * We were unable to acquire the lock and the write waiters 617 * flag is set, so we must block on the turnstile. 618 */ 619 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 620 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 621 rw); 622 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 623 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 624 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 625 __func__, rw); 626#ifdef ADAPTIVE_RWLOCKS 627 spintries = 0; 628#endif 629 } 630 lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime, 631 file, line); 632} 633 634/* 635 * This function is called if the first try at releasing a write lock failed. 636 * This means that one of the 2 waiter bits must be set indicating that at 637 * least one thread is waiting on this lock. 638 */ 639void 640_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 641{ 642 struct turnstile *ts; 643 uintptr_t v; 644 int queue; 645 646 if (rw_wlocked(rw) && rw_recursed(rw)) { 647 rw->rw_recurse--; 648 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 649 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 650 return; 651 } 652 v = rw->rw_lock; 653 654 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 655 ("%s: neither of the waiter flags are set", __func__)); 656 657 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 658 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 659 660 turnstile_chain_lock(&rw->lock_object); 661 ts = turnstile_lookup(&rw->lock_object); 662 663 MPASS(ts != NULL); 664 665 /* 666 * Use the same algo as sx locks for now. Prefer waking up shared 667 * waiters if we have any over writers. This is probably not ideal. 668 * 669 * 'v' is the value we are going to write back to rw_lock. If we 670 * have waiters on both queues, we need to preserve the state of 671 * the waiter flag for the queue we don't wake up. For now this is 672 * hardcoded for the algorithm mentioned above. 673 * 674 * In the case of both readers and writers waiting we wakeup the 675 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 676 * new writer comes in before a reader it will claim the lock up 677 * above. There is probably a potential priority inversion in 678 * there that could be worked around either by waking both queues 679 * of waiters or doing some complicated lock handoff gymnastics. 680 */ 681 v = RW_UNLOCKED; 682 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 683 queue = TS_EXCLUSIVE_QUEUE; 684 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 685 } else 686 queue = TS_SHARED_QUEUE; 687 688 /* Wake up all waiters for the specific queue. */ 689 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 690 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 691 queue == TS_SHARED_QUEUE ? "read" : "write"); 692 turnstile_broadcast(ts, queue); 693 atomic_store_rel_ptr(&rw->rw_lock, v); 694 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 695 turnstile_chain_unlock(&rw->lock_object); 696} 697 698/* 699 * Attempt to do a non-blocking upgrade from a read lock to a write 700 * lock. This will only succeed if this thread holds a single read 701 * lock. Returns true if the upgrade succeeded and false otherwise. 702 */ 703int 704_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 705{ 706 uintptr_t v, x, tid; 707 struct turnstile *ts; 708 int success; 709 710 KASSERT(rw->rw_lock != RW_DESTROYED, 711 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 712 _rw_assert(rw, RA_RLOCKED, file, line); 713 714 /* 715 * Attempt to switch from one reader to a writer. If there 716 * are any write waiters, then we will have to lock the 717 * turnstile first to prevent races with another writer 718 * calling turnstile_wait() before we have claimed this 719 * turnstile. So, do the simple case of no waiters first. 720 */ 721 tid = (uintptr_t)curthread; 722 success = 0; 723 for (;;) { 724 v = rw->rw_lock; 725 if (RW_READERS(v) > 1) 726 break; 727 if (!(v & RW_LOCK_WAITERS)) { 728 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 729 if (!success) 730 continue; 731 break; 732 } 733 734 /* 735 * Ok, we think we have waiters, so lock the turnstile. 736 */ 737 ts = turnstile_trywait(&rw->lock_object); 738 v = rw->rw_lock; 739 if (RW_READERS(v) > 1) { 740 turnstile_cancel(ts); 741 break; 742 } 743 /* 744 * Try to switch from one reader to a writer again. This time 745 * we honor the current state of the waiters flags. 746 * If we obtain the lock with the flags set, then claim 747 * ownership of the turnstile. 748 */ 749 x = rw->rw_lock & RW_LOCK_WAITERS; 750 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 751 if (success) { 752 if (x) 753 turnstile_claim(ts); 754 else 755 turnstile_cancel(ts); 756 break; 757 } 758 turnstile_cancel(ts); 759 } 760 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 761 if (success) { 762 curthread->td_rw_rlocks--; 763 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 764 file, line); 765 } 766 return (success); 767} 768 769/* 770 * Downgrade a write lock into a single read lock. 771 */ 772void 773_rw_downgrade(struct rwlock *rw, const char *file, int line) 774{ 775 struct turnstile *ts; 776 uintptr_t tid, v; 777 int rwait, wwait; 778 779 KASSERT(rw->rw_lock != RW_DESTROYED, 780 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 781 _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line); 782#ifndef INVARIANTS 783 if (rw_recursed(rw)) 784 panic("downgrade of a recursed lock"); 785#endif 786 787 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 788 789 /* 790 * Convert from a writer to a single reader. First we handle 791 * the easy case with no waiters. If there are any waiters, we 792 * lock the turnstile and "disown" the lock. 793 */ 794 tid = (uintptr_t)curthread; 795 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 796 goto out; 797 798 /* 799 * Ok, we think we have waiters, so lock the turnstile so we can 800 * read the waiter flags without any races. 801 */ 802 turnstile_chain_lock(&rw->lock_object); 803 v = rw->rw_lock & RW_LOCK_WAITERS; 804 rwait = v & RW_LOCK_READ_WAITERS; 805 wwait = v & RW_LOCK_WRITE_WAITERS; 806 MPASS(rwait | wwait); 807 808 /* 809 * Downgrade from a write lock while preserving waiters flag 810 * and give up ownership of the turnstile. 811 */ 812 ts = turnstile_lookup(&rw->lock_object); 813 MPASS(ts != NULL); 814 if (!wwait) 815 v &= ~RW_LOCK_READ_WAITERS; 816 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 817 /* 818 * Wake other readers if there are no writers pending. Otherwise they 819 * won't be able to acquire the lock anyway. 820 */ 821 if (rwait && !wwait) { 822 turnstile_broadcast(ts, TS_SHARED_QUEUE); 823 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 824 } else 825 turnstile_disown(ts); 826 turnstile_chain_unlock(&rw->lock_object); 827out: 828 curthread->td_rw_rlocks++; 829 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 830} 831 832#ifdef INVARIANT_SUPPORT 833#ifndef INVARIANTS 834#undef _rw_assert 835#endif 836 837/* 838 * In the non-WITNESS case, rw_assert() can only detect that at least 839 * *some* thread owns an rlock, but it cannot guarantee that *this* 840 * thread owns an rlock. 841 */ 842void 843_rw_assert(struct rwlock *rw, int what, const char *file, int line) 844{ 845 846 if (panicstr != NULL) 847 return; 848 switch (what) { 849 case RA_LOCKED: 850 case RA_LOCKED | RA_RECURSED: 851 case RA_LOCKED | RA_NOTRECURSED: 852 case RA_RLOCKED: 853#ifdef WITNESS 854 witness_assert(&rw->lock_object, what, file, line); 855#else 856 /* 857 * If some other thread has a write lock or we have one 858 * and are asserting a read lock, fail. Also, if no one 859 * has a lock at all, fail. 860 */ 861 if (rw->rw_lock == RW_UNLOCKED || 862 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 863 rw_wowner(rw) != curthread))) 864 panic("Lock %s not %slocked @ %s:%d\n", 865 rw->lock_object.lo_name, (what == RA_RLOCKED) ? 866 "read " : "", file, line); 867 868 if (!(rw->rw_lock & RW_LOCK_READ)) { 869 if (rw_recursed(rw)) { 870 if (what & RA_NOTRECURSED) 871 panic("Lock %s recursed @ %s:%d\n", 872 rw->lock_object.lo_name, file, 873 line); 874 } else if (what & RA_RECURSED) 875 panic("Lock %s not recursed @ %s:%d\n", 876 rw->lock_object.lo_name, file, line); 877 } 878#endif 879 break; 880 case RA_WLOCKED: 881 case RA_WLOCKED | RA_RECURSED: 882 case RA_WLOCKED | RA_NOTRECURSED: 883 if (rw_wowner(rw) != curthread) 884 panic("Lock %s not exclusively locked @ %s:%d\n", 885 rw->lock_object.lo_name, file, line); 886 if (rw_recursed(rw)) { 887 if (what & RA_NOTRECURSED) 888 panic("Lock %s recursed @ %s:%d\n", 889 rw->lock_object.lo_name, file, line); 890 } else if (what & RA_RECURSED) 891 panic("Lock %s not recursed @ %s:%d\n", 892 rw->lock_object.lo_name, file, line); 893 break; 894 case RA_UNLOCKED: 895#ifdef WITNESS 896 witness_assert(&rw->lock_object, what, file, line); 897#else 898 /* 899 * If we hold a write lock fail. We can't reliably check 900 * to see if we hold a read lock or not. 901 */ 902 if (rw_wowner(rw) == curthread) 903 panic("Lock %s exclusively locked @ %s:%d\n", 904 rw->lock_object.lo_name, file, line); 905#endif 906 break; 907 default: 908 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 909 line); 910 } 911} 912#endif /* INVARIANT_SUPPORT */ 913 914#ifdef DDB 915void 916db_show_rwlock(struct lock_object *lock) 917{ 918 struct rwlock *rw; 919 struct thread *td; 920 921 rw = (struct rwlock *)lock; 922 923 db_printf(" state: "); 924 if (rw->rw_lock == RW_UNLOCKED) 925 db_printf("UNLOCKED\n"); 926 else if (rw->rw_lock == RW_DESTROYED) { 927 db_printf("DESTROYED\n"); 928 return; 929 } else if (rw->rw_lock & RW_LOCK_READ) 930 db_printf("RLOCK: %ju locks\n", 931 (uintmax_t)(RW_READERS(rw->rw_lock))); 932 else { 933 td = rw_wowner(rw); 934 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 935 td->td_tid, td->td_proc->p_pid, td->td_name); 936 if (rw_recursed(rw)) 937 db_printf(" recursed: %u\n", rw->rw_recurse); 938 } 939 db_printf(" waiters: "); 940 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 941 case RW_LOCK_READ_WAITERS: 942 db_printf("readers\n"); 943 break; 944 case RW_LOCK_WRITE_WAITERS: 945 db_printf("writers\n"); 946 break; 947 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 948 db_printf("readers and writers\n"); 949 break; 950 default: 951 db_printf("none\n"); 952 break; 953 } 954} 955 956#endif
| 438void 439_rw_runlock(struct rwlock *rw, const char *file, int line) 440{ 441 struct turnstile *ts; 442 uintptr_t x, v, queue; 443 444 KASSERT(rw->rw_lock != RW_DESTROYED, 445 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 446 _rw_assert(rw, RA_RLOCKED, file, line); 447 curthread->td_locks--; 448 curthread->td_rw_rlocks--; 449 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 450 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 451 452 /* TODO: drop "owner of record" here. */ 453 454 for (;;) { 455 /* 456 * See if there is more than one read lock held. If so, 457 * just drop one and return. 458 */ 459 x = rw->rw_lock; 460 if (RW_READERS(x) > 1) { 461 if (atomic_cmpset_ptr(&rw->rw_lock, x, 462 x - RW_ONE_READER)) { 463 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 464 CTR4(KTR_LOCK, 465 "%s: %p succeeded %p -> %p", 466 __func__, rw, (void *)x, 467 (void *)(x - RW_ONE_READER)); 468 break; 469 } 470 continue; 471 } 472 /* 473 * If there aren't any waiters for a write lock, then try 474 * to drop it quickly. 475 */ 476 if (!(x & RW_LOCK_WAITERS)) { 477 MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 478 RW_READERS_LOCK(1)); 479 if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) { 480 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 481 CTR2(KTR_LOCK, "%s: %p last succeeded", 482 __func__, rw); 483 break; 484 } 485 continue; 486 } 487 /* 488 * Ok, we know we have waiters and we think we are the 489 * last reader, so grab the turnstile lock. 490 */ 491 turnstile_chain_lock(&rw->lock_object); 492 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 493 MPASS(v & RW_LOCK_WAITERS); 494 495 /* 496 * Try to drop our lock leaving the lock in a unlocked 497 * state. 498 * 499 * If you wanted to do explicit lock handoff you'd have to 500 * do it here. You'd also want to use turnstile_signal() 501 * and you'd have to handle the race where a higher 502 * priority thread blocks on the write lock before the 503 * thread you wakeup actually runs and have the new thread 504 * "steal" the lock. For now it's a lot simpler to just 505 * wakeup all of the waiters. 506 * 507 * As above, if we fail, then another thread might have 508 * acquired a read lock, so drop the turnstile lock and 509 * restart. 510 */ 511 x = RW_UNLOCKED; 512 if (v & RW_LOCK_WRITE_WAITERS) { 513 queue = TS_EXCLUSIVE_QUEUE; 514 x |= (v & RW_LOCK_READ_WAITERS); 515 } else 516 queue = TS_SHARED_QUEUE; 517 if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 518 x)) { 519 turnstile_chain_unlock(&rw->lock_object); 520 continue; 521 } 522 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 523 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 524 __func__, rw); 525 526 /* 527 * Ok. The lock is released and all that's left is to 528 * wake up the waiters. Note that the lock might not be 529 * free anymore, but in that case the writers will just 530 * block again if they run before the new lock holder(s) 531 * release the lock. 532 */ 533 ts = turnstile_lookup(&rw->lock_object); 534 MPASS(ts != NULL); 535 turnstile_broadcast(ts, queue); 536 turnstile_unpend(ts, TS_SHARED_LOCK); 537 turnstile_chain_unlock(&rw->lock_object); 538 break; 539 } 540 lock_profile_release_lock(&rw->lock_object); 541} 542 543/* 544 * This function is called when we are unable to obtain a write lock on the 545 * first try. This means that at least one other thread holds either a 546 * read or write lock. 547 */ 548void 549_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 550{ 551 struct turnstile *ts; 552#ifdef ADAPTIVE_RWLOCKS 553 volatile struct thread *owner; 554 int spintries = 0; 555 int i; 556#endif 557 uint64_t waittime = 0; 558 uintptr_t v, x; 559 int contested = 0; 560 561 if (rw_wlocked(rw)) { 562 KASSERT(rw->lock_object.lo_flags & RW_RECURSE, 563 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 564 __func__, rw->lock_object.lo_name, file, line)); 565 rw->rw_recurse++; 566 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 567 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 568 return; 569 } 570 571 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 572 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 573 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 574 575 while (!_rw_write_lock(rw, tid)) { 576 lock_profile_obtain_lock_failed(&rw->lock_object, 577 &contested, &waittime); 578#ifdef ADAPTIVE_RWLOCKS 579 /* 580 * If the lock is write locked and the owner is 581 * running on another CPU, spin until the owner stops 582 * running or the state of the lock changes. 583 */ 584 v = rw->rw_lock; 585 owner = (struct thread *)RW_OWNER(v); 586 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 587 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 588 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 589 __func__, rw, owner); 590 while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && 591 TD_IS_RUNNING(owner)) 592 cpu_spinwait(); 593 continue; 594 } 595 if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < 100) { 596 if (!(v & RW_LOCK_WRITE_SPINNER)) { 597 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 598 v | RW_LOCK_WRITE_SPINNER)) { 599 cpu_spinwait(); 600 continue; 601 } 602 } 603 spintries++; 604 for (i = 100000; i > 0; i--) { 605 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 606 break; 607 cpu_spinwait(); 608 } 609 if (i) 610 continue; 611 } 612#endif 613 ts = turnstile_trywait(&rw->lock_object); 614 v = rw->rw_lock; 615 616#ifdef ADAPTIVE_RWLOCKS 617 /* 618 * If the current owner of the lock is executing on another 619 * CPU quit the hard path and try to spin. 620 */ 621 if (!(v & RW_LOCK_READ)) { 622 owner = (struct thread *)RW_OWNER(v); 623 if (TD_IS_RUNNING(owner)) { 624 turnstile_cancel(ts); 625 cpu_spinwait(); 626 continue; 627 } 628 } 629#endif 630 /* 631 * If the lock was released while waiting for the turnstile 632 * chain lock retry. 633 */ 634 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 635 if ((v & ~x) == RW_UNLOCKED) { 636 x &= ~RW_LOCK_WRITE_SPINNER; 637 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 638 if (x) 639 turnstile_claim(ts); 640 else 641 turnstile_cancel(ts); 642 break; 643 } 644 turnstile_cancel(ts); 645 cpu_spinwait(); 646 continue; 647 } 648 /* 649 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 650 * set it. If we fail to set it, then loop back and try 651 * again. 652 */ 653 if (!(v & RW_LOCK_WRITE_WAITERS)) { 654 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 655 v | RW_LOCK_WRITE_WAITERS)) { 656 turnstile_cancel(ts); 657 cpu_spinwait(); 658 continue; 659 } 660 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 661 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 662 __func__, rw); 663 } 664 /* 665 * We were unable to acquire the lock and the write waiters 666 * flag is set, so we must block on the turnstile. 667 */ 668 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 669 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 670 rw); 671 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 672 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 673 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 674 __func__, rw); 675#ifdef ADAPTIVE_RWLOCKS 676 spintries = 0; 677#endif 678 } 679 lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime, 680 file, line); 681} 682 683/* 684 * This function is called if the first try at releasing a write lock failed. 685 * This means that one of the 2 waiter bits must be set indicating that at 686 * least one thread is waiting on this lock. 687 */ 688void 689_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) 690{ 691 struct turnstile *ts; 692 uintptr_t v; 693 int queue; 694 695 if (rw_wlocked(rw) && rw_recursed(rw)) { 696 rw->rw_recurse--; 697 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 698 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 699 return; 700 } 701 v = rw->rw_lock; 702 703 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 704 ("%s: neither of the waiter flags are set", __func__)); 705 706 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 707 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 708 709 turnstile_chain_lock(&rw->lock_object); 710 ts = turnstile_lookup(&rw->lock_object); 711 712 MPASS(ts != NULL); 713 714 /* 715 * Use the same algo as sx locks for now. Prefer waking up shared 716 * waiters if we have any over writers. This is probably not ideal. 717 * 718 * 'v' is the value we are going to write back to rw_lock. If we 719 * have waiters on both queues, we need to preserve the state of 720 * the waiter flag for the queue we don't wake up. For now this is 721 * hardcoded for the algorithm mentioned above. 722 * 723 * In the case of both readers and writers waiting we wakeup the 724 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 725 * new writer comes in before a reader it will claim the lock up 726 * above. There is probably a potential priority inversion in 727 * there that could be worked around either by waking both queues 728 * of waiters or doing some complicated lock handoff gymnastics. 729 */ 730 v = RW_UNLOCKED; 731 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 732 queue = TS_EXCLUSIVE_QUEUE; 733 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 734 } else 735 queue = TS_SHARED_QUEUE; 736 737 /* Wake up all waiters for the specific queue. */ 738 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 739 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 740 queue == TS_SHARED_QUEUE ? "read" : "write"); 741 turnstile_broadcast(ts, queue); 742 atomic_store_rel_ptr(&rw->rw_lock, v); 743 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 744 turnstile_chain_unlock(&rw->lock_object); 745} 746 747/* 748 * Attempt to do a non-blocking upgrade from a read lock to a write 749 * lock. This will only succeed if this thread holds a single read 750 * lock. Returns true if the upgrade succeeded and false otherwise. 751 */ 752int 753_rw_try_upgrade(struct rwlock *rw, const char *file, int line) 754{ 755 uintptr_t v, x, tid; 756 struct turnstile *ts; 757 int success; 758 759 KASSERT(rw->rw_lock != RW_DESTROYED, 760 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 761 _rw_assert(rw, RA_RLOCKED, file, line); 762 763 /* 764 * Attempt to switch from one reader to a writer. If there 765 * are any write waiters, then we will have to lock the 766 * turnstile first to prevent races with another writer 767 * calling turnstile_wait() before we have claimed this 768 * turnstile. So, do the simple case of no waiters first. 769 */ 770 tid = (uintptr_t)curthread; 771 success = 0; 772 for (;;) { 773 v = rw->rw_lock; 774 if (RW_READERS(v) > 1) 775 break; 776 if (!(v & RW_LOCK_WAITERS)) { 777 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 778 if (!success) 779 continue; 780 break; 781 } 782 783 /* 784 * Ok, we think we have waiters, so lock the turnstile. 785 */ 786 ts = turnstile_trywait(&rw->lock_object); 787 v = rw->rw_lock; 788 if (RW_READERS(v) > 1) { 789 turnstile_cancel(ts); 790 break; 791 } 792 /* 793 * Try to switch from one reader to a writer again. This time 794 * we honor the current state of the waiters flags. 795 * If we obtain the lock with the flags set, then claim 796 * ownership of the turnstile. 797 */ 798 x = rw->rw_lock & RW_LOCK_WAITERS; 799 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 800 if (success) { 801 if (x) 802 turnstile_claim(ts); 803 else 804 turnstile_cancel(ts); 805 break; 806 } 807 turnstile_cancel(ts); 808 } 809 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 810 if (success) { 811 curthread->td_rw_rlocks--; 812 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 813 file, line); 814 } 815 return (success); 816} 817 818/* 819 * Downgrade a write lock into a single read lock. 820 */ 821void 822_rw_downgrade(struct rwlock *rw, const char *file, int line) 823{ 824 struct turnstile *ts; 825 uintptr_t tid, v; 826 int rwait, wwait; 827 828 KASSERT(rw->rw_lock != RW_DESTROYED, 829 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 830 _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line); 831#ifndef INVARIANTS 832 if (rw_recursed(rw)) 833 panic("downgrade of a recursed lock"); 834#endif 835 836 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 837 838 /* 839 * Convert from a writer to a single reader. First we handle 840 * the easy case with no waiters. If there are any waiters, we 841 * lock the turnstile and "disown" the lock. 842 */ 843 tid = (uintptr_t)curthread; 844 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 845 goto out; 846 847 /* 848 * Ok, we think we have waiters, so lock the turnstile so we can 849 * read the waiter flags without any races. 850 */ 851 turnstile_chain_lock(&rw->lock_object); 852 v = rw->rw_lock & RW_LOCK_WAITERS; 853 rwait = v & RW_LOCK_READ_WAITERS; 854 wwait = v & RW_LOCK_WRITE_WAITERS; 855 MPASS(rwait | wwait); 856 857 /* 858 * Downgrade from a write lock while preserving waiters flag 859 * and give up ownership of the turnstile. 860 */ 861 ts = turnstile_lookup(&rw->lock_object); 862 MPASS(ts != NULL); 863 if (!wwait) 864 v &= ~RW_LOCK_READ_WAITERS; 865 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 866 /* 867 * Wake other readers if there are no writers pending. Otherwise they 868 * won't be able to acquire the lock anyway. 869 */ 870 if (rwait && !wwait) { 871 turnstile_broadcast(ts, TS_SHARED_QUEUE); 872 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 873 } else 874 turnstile_disown(ts); 875 turnstile_chain_unlock(&rw->lock_object); 876out: 877 curthread->td_rw_rlocks++; 878 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 879} 880 881#ifdef INVARIANT_SUPPORT 882#ifndef INVARIANTS 883#undef _rw_assert 884#endif 885 886/* 887 * In the non-WITNESS case, rw_assert() can only detect that at least 888 * *some* thread owns an rlock, but it cannot guarantee that *this* 889 * thread owns an rlock. 890 */ 891void 892_rw_assert(struct rwlock *rw, int what, const char *file, int line) 893{ 894 895 if (panicstr != NULL) 896 return; 897 switch (what) { 898 case RA_LOCKED: 899 case RA_LOCKED | RA_RECURSED: 900 case RA_LOCKED | RA_NOTRECURSED: 901 case RA_RLOCKED: 902#ifdef WITNESS 903 witness_assert(&rw->lock_object, what, file, line); 904#else 905 /* 906 * If some other thread has a write lock or we have one 907 * and are asserting a read lock, fail. Also, if no one 908 * has a lock at all, fail. 909 */ 910 if (rw->rw_lock == RW_UNLOCKED || 911 (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED || 912 rw_wowner(rw) != curthread))) 913 panic("Lock %s not %slocked @ %s:%d\n", 914 rw->lock_object.lo_name, (what == RA_RLOCKED) ? 915 "read " : "", file, line); 916 917 if (!(rw->rw_lock & RW_LOCK_READ)) { 918 if (rw_recursed(rw)) { 919 if (what & RA_NOTRECURSED) 920 panic("Lock %s recursed @ %s:%d\n", 921 rw->lock_object.lo_name, file, 922 line); 923 } else if (what & RA_RECURSED) 924 panic("Lock %s not recursed @ %s:%d\n", 925 rw->lock_object.lo_name, file, line); 926 } 927#endif 928 break; 929 case RA_WLOCKED: 930 case RA_WLOCKED | RA_RECURSED: 931 case RA_WLOCKED | RA_NOTRECURSED: 932 if (rw_wowner(rw) != curthread) 933 panic("Lock %s not exclusively locked @ %s:%d\n", 934 rw->lock_object.lo_name, file, line); 935 if (rw_recursed(rw)) { 936 if (what & RA_NOTRECURSED) 937 panic("Lock %s recursed @ %s:%d\n", 938 rw->lock_object.lo_name, file, line); 939 } else if (what & RA_RECURSED) 940 panic("Lock %s not recursed @ %s:%d\n", 941 rw->lock_object.lo_name, file, line); 942 break; 943 case RA_UNLOCKED: 944#ifdef WITNESS 945 witness_assert(&rw->lock_object, what, file, line); 946#else 947 /* 948 * If we hold a write lock fail. We can't reliably check 949 * to see if we hold a read lock or not. 950 */ 951 if (rw_wowner(rw) == curthread) 952 panic("Lock %s exclusively locked @ %s:%d\n", 953 rw->lock_object.lo_name, file, line); 954#endif 955 break; 956 default: 957 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 958 line); 959 } 960} 961#endif /* INVARIANT_SUPPORT */ 962 963#ifdef DDB 964void 965db_show_rwlock(struct lock_object *lock) 966{ 967 struct rwlock *rw; 968 struct thread *td; 969 970 rw = (struct rwlock *)lock; 971 972 db_printf(" state: "); 973 if (rw->rw_lock == RW_UNLOCKED) 974 db_printf("UNLOCKED\n"); 975 else if (rw->rw_lock == RW_DESTROYED) { 976 db_printf("DESTROYED\n"); 977 return; 978 } else if (rw->rw_lock & RW_LOCK_READ) 979 db_printf("RLOCK: %ju locks\n", 980 (uintmax_t)(RW_READERS(rw->rw_lock))); 981 else { 982 td = rw_wowner(rw); 983 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 984 td->td_tid, td->td_proc->p_pid, td->td_name); 985 if (rw_recursed(rw)) 986 db_printf(" recursed: %u\n", rw->rw_recurse); 987 } 988 db_printf(" waiters: "); 989 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 990 case RW_LOCK_READ_WAITERS: 991 db_printf("readers\n"); 992 break; 993 case RW_LOCK_WRITE_WAITERS: 994 db_printf("writers\n"); 995 break; 996 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 997 db_printf("readers and writers\n"); 998 break; 999 default: 1000 db_printf("none\n"); 1001 break; 1002 } 1003} 1004 1005#endif
|