Lines Matching refs:lock

25 #include <trace/events/lock.h>
35 * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
41 * This queued spinlock implementation is based on the MCS lock, however to
45 * In particular; where the traditional MCS lock consists of a tail pointer
54 * number. With one byte for the lock value and 3 bytes for the tail, only a
55 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
59 * We also change the first spinner to spin on the lock bit instead of its
60 * node; whereby avoiding the need to carry a node from lock to unlock, and
61 * preserving existing lock API. This also makes the unlock code simpler and
92 * made by atomic_cond_read_relaxed when waiting for the lock to
145 * @lock: Pointer to queued spinlock structure
149 static __always_inline void clear_pending(struct qspinlock *lock)
151 WRITE_ONCE(lock->pending, 0);
156 * @lock: Pointer to queued spinlock structure
162 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
164 WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
169 * @lock : Pointer to queued spinlock structure
173 * xchg(lock, tail), which heads an address dependency
175 * p,*,* -> n,*,* ; prev = xchg(lock, node)
177 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
183 return (u32)xchg_relaxed(&lock->tail,
191 * @lock: Pointer to queued spinlock structure
195 static __always_inline void clear_pending(struct qspinlock *lock)
197 atomic_andnot(_Q_PENDING_VAL, &lock->val);
202 * @lock: Pointer to queued spinlock structure
206 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
208 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
213 * @lock : Pointer to queued spinlock structure
217 * xchg(lock, tail)
219 * p,*,* -> n,*,* ; prev = xchg(lock, node)
221 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
223 u32 old, new, val = atomic_read(&lock->val);
232 old = atomic_cmpxchg_relaxed(&lock->val, val, new);
243 * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
244 * @lock : Pointer to queued spinlock structure
245 * Return: The previous lock value
250 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
252 return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
257 * set_locked - Set the lock bit and own the lock
258 * @lock: Pointer to queued spinlock structure
262 static __always_inline void set_locked(struct qspinlock *lock)
264 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
276 static __always_inline void __pv_kick_node(struct qspinlock *lock,
278 static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
297 * @lock: Pointer to queued spinlock structure
300 * (queue tail, pending bit, lock value)
316 void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
327 if (virt_spin_lock(lock))
338 val = atomic_cond_read_relaxed(&lock->val,
353 val = queued_fetch_set_pending_acquire(lock);
366 clear_pending(lock);
377 * store-release that clears the locked bit and create lock
383 smp_cond_load_acquire(&lock->locked, !VAL);
390 clear_pending_set_locked(lock);
405 trace_contention_begin(lock, LCB_F_SPIN);
412 * we fall back to spinning on the lock directly without using
418 while (!queued_spin_trylock(lock))
446 if (queued_spin_trylock(lock))
463 old = xchg_tail(lock, tail);
480 * While waiting for the MCS lock, the next pointer may have
481 * been set by another lock waiter. We optimistically load
497 * store-release that clears the locked bit and create lock
502 * the lock and return a non-zero value. So we have to skip the
511 if ((val = pv_wait_head_or_lock(lock, node)))
514 val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
518 * claim the lock:
520 * n,0,0 -> 0,0,1 : lock, uncontended
521 * *,*,0 -> *,*,1 : lock, contended
523 * If the queue head is the only one in the queue (lock value == tail)
524 * and nobody is pending, clear the tail code and grab the lock.
525 * Otherwise, we only need to grab the lock.
530 * of lock stealing; therefore we must also allow:
539 if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
548 set_locked(lock);
557 pv_kick_node(lock, next);
560 trace_contention_end(lock, 0);