Lines Matching refs:rw_lock

64  * This functionality assumes that struct rwlock* have a member named rw_lock.
66 #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
179 if (rw->rw_lock & RW_LOCK_READ) {
193 uintptr_t x = rw->rw_lock;
211 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
212 ("%s: rw_lock not aligned for %s: %p", __func__, name,
213 &rw->rw_lock));
230 rw->rw_lock = RW_UNLOCKED;
241 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
243 rw->rw_lock = RW_DESTROYED;
276 KASSERT(rw->rw_lock != RW_DESTROYED,
309 KASSERT(rw->rw_lock != RW_DESTROYED,
316 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
322 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
357 KASSERT(rw->rw_lock != RW_DESTROYED,
402 * read lock, then rw_lock must have changed, so restart
408 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
581 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
649 KASSERT(rw->rw_lock != RW_DESTROYED,
690 x = rw->rw_lock;
692 KASSERT(rw->rw_lock != RW_DESTROYED,
696 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
731 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
750 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
814 if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv))
845 KASSERT(rw->rw_lock != RW_DESTROYED,
847 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
942 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
950 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
998 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1057 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | x)) {
1072 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1149 atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
1159 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1171 * 'v' is the value we are going to write back to rw_lock. If we
1190 atomic_store_rel_ptr(&rw->rw_lock, setv);
1219 KASSERT(rw->rw_lock != RW_DESTROYED,
1221 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
1237 success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid);
1260 success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv);
1302 KASSERT(rw->rw_lock != RW_DESTROYED,
1304 __rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line);
1318 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1326 v = rw->rw_lock & RW_LOCK_WAITERS;
1339 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1400 if (rw->rw_lock == RW_UNLOCKED ||
1401 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1407 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1463 if (rw->rw_lock == RW_UNLOCKED)
1465 else if (rw->rw_lock == RW_DESTROYED) {
1468 } else if (rw->rw_lock & RW_LOCK_READ)
1470 (uintmax_t)(RW_READERS(rw->rw_lock)));
1479 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {