Lines Matching refs:lock

73 static inline int arch_load_niai4(int *lock)
80 : "=d" (owner) : "Q" (*lock) : "memory");
84 static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
91 : "=d" (old), "=Q" (*lock)
92 : "0" (old), "d" (new), "Q" (*lock)
97 static inline struct spin_wait *arch_spin_decode_tail(int lock)
101 ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
102 cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
106 static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
108 if (lock & _Q_LOCK_CPU_MASK)
109 return lock & _Q_LOCK_CPU_MASK;
131 old = READ_ONCE(lp->lock);
135 * The lock is free but there may be waiters.
136 * With no waiters simply take the lock, if there
137 * are waiters try to steal the lock. The lock may
139 * waiter will get the lock.
142 if (__atomic_cmpxchg_bool(&lp->lock, old, new))
143 /* Got the lock */
145 /* lock passing in progress */
150 if (__atomic_cmpxchg_bool(&lp->lock, old, new))
160 /* Pass the virtual CPU to the lock holder if it is not running */
172 /* Query running state of lock holder again. */
179 /* Spin on the lock value in the spinlock_t */
182 old = READ_ONCE(lp->lock);
187 if (__atomic_cmpxchg_bool(&lp->lock, old, new))
188 /* Got the lock */
217 /* Pass the virtual CPU to the lock holder if it is not running */
218 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
224 old = arch_load_niai4(&lp->lock);
226 /* Try to get the lock if it is free. */
229 if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
230 /* Got the lock */
258 owner = READ_ONCE(lp->lock);
259 /* Try to get the lock if it is free. */
261 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
304 /* Got the lock */
317 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;