Lines Matching defs:lock

11  * Use the EH=1 hint for accesses that result in the lock being acquired.
12 * The hardware is supposed to optimise this pattern by holding the lock
31 * Put a speculation barrier after testing the lock/node and finding it
42 * Execute a miso instruction after passing the MCS lock ownership to the
55 * This executes miso after an unlock of the lock word, having ownership
67 * the lock field.
71 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
73 return READ_ONCE(lock->val);
76 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
78 return !lock.val;
81 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
83 return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK);
88 /* XXX: make this use lock value in paca like simple spinlocks? */
92 static __always_inline int __queued_spin_trylock_nosteal(struct qspinlock *lock)
107 : "r" (&lock->val), "r" (new),
114 static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
131 : "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK),
138 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
141 return __queued_spin_trylock_nosteal(lock);
143 return __queued_spin_trylock_steal(lock);
146 void queued_spin_lock_slowpath(struct qspinlock *lock);
148 static __always_inline void queued_spin_lock(struct qspinlock *lock)
150 if (!queued_spin_trylock(lock))
151 queued_spin_lock_slowpath(lock);
154 static inline void queued_spin_unlock(struct qspinlock *lock)
156 smp_store_release(&lock->locked, 0);