Lines Matching refs:lock

17 	struct qspinlock *lock;
21 u8 locked; /* 1 if lock acquired */
116 * Try to acquire the lock if it was not already locked. If the tail matches
119 * This is used by the head of the queue to acquire the lock and clean up
122 static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail)
132 /* Test whether the lock tail == mytail */
138 /* If the lock tail matched, then clear it, otherwise leave it. */
145 : "r" (&lock->val), "r"(tail), "r" (newval),
161 static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail)
175 : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK)
181 static __always_inline u32 set_mustq(struct qspinlock *lock)
191 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
197 static __always_inline u32 clear_mustq(struct qspinlock *lock)
207 : "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
213 static __always_inline bool try_set_sleepy(struct qspinlock *lock, u32 old)
229 : "r" (&lock->val), "r"(old), "r" (new)
235 static __always_inline void seen_sleepy_owner(struct qspinlock *lock, u32 val)
241 try_set_sleepy(lock, val);
260 static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu)
276 if (qnode->lock == lock)
284 static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq)
306 seen_sleepy_owner(lock, val);
310 * Read the lock word after sampling the yield count. On the other side
318 if (READ_ONCE(lock->val) == val) {
320 clear_mustq(lock);
323 set_mustq(lock);
337 static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
339 return __yield_to_locked_owner(lock, val, paravirt, false);
343 static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
350 return __yield_to_locked_owner(lock, val, paravirt, mustq);
376 static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt)
389 * propagate sleepy to us, so check the lock in that case too.
392 u32 val = READ_ONCE(lock->val);
400 * ceases to occur, even if the lock remains
407 preempted = yield_to_locked_owner(lock, val, paravirt);
456 static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt)
468 /* Attempt to steal the lock */
473 val = READ_ONCE(lock->val);
480 if (__queued_spin_trylock_steal(lock))
484 preempted = yield_to_locked_owner(lock, val, paravirt);
498 if (try_set_sleepy(lock, val))
526 static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
542 while (!queued_spin_trylock(lock))
556 node->lock = lock;
568 old = publish_tail_cpu(lock, tail);
576 struct qnode *prev = get_tail_qnode(lock, prev_cpu);
581 /* Wait for mcs node lock to be released */
586 if (yield_to_prev(lock, node, prev_cpu, paravirt))
592 smp_rmb(); /* acquire barrier for the mcs lock */
606 /* We're at the head of the waitqueue, wait for the lock. */
612 val = READ_ONCE(lock->val);
628 if (try_set_sleepy(lock, val))
634 preempted = yield_head_to_locked_owner(lock, val, paravirt);
652 set_mustq(lock);
660 old = trylock_clean_tail(lock, tail);
681 * here because the acquirer is only accessing the lock word, and
682 * the acquire barrier we took the lock with orders that update vs
684 * acquire barrier for mcs lock, above.
703 void queued_spin_lock_slowpath(struct qspinlock *lock)
711 if (try_to_steal_lock(lock, true)) {
715 queued_spin_lock_mcs_queue(lock, true);
717 if (try_to_steal_lock(lock, false)) {
721 queued_spin_lock_mcs_queue(lock, false);
739 static DEFINE_MUTEX(lock);
742 * The lock slow path has a !maybe_stealers case that can assume
748 mutex_lock(&lock);
762 mutex_unlock(&lock);