Lines Matching refs:lock

30  * not running. The one lock stealing attempt allowed at slowpath entry
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
76 * This hybrid PV queued/unfair lock combines the best attributes of a
77 * queued lock (no lock starvation) and an unfair lock (good performance
81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
84 * Stay in unfair lock mode as long as queued mode waiters are
88 int val = atomic_read(&lock->val);
91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
106 * is actively spinning on the lock and no lock stealing is allowed.
109 static __always_inline void set_pending(struct qspinlock *lock)
111 WRITE_ONCE(lock->pending, 1);
117 * lock just to be sure that it will get it.
119 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
121 return !READ_ONCE(lock->locked) &&
122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
126 static __always_inline void set_pending(struct qspinlock *lock)
128 atomic_or(_Q_PENDING_VAL, &lock->val);
131 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
133 int val = atomic_read(&lock->val);
146 val = atomic_cmpxchg_acquire(&lock->val, old, new);
172 struct qspinlock *lock;
212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
214 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
220 if (!cmpxchg(&he->lock, NULL, lock)) {
223 return &he->lock;
229 * This is guaranteed by ensuring every blocked lock only ever consumes
233 * The single entry is guaranteed by having the lock owner unhash
239 static struct pv_node *pv_unhash(struct qspinlock *lock)
241 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
246 if (READ_ONCE(he->lock) == lock) {
248 WRITE_ONCE(he->lock, NULL);
256 * having the lock owner do the unhash -- IFF the unlock sees the
331 * to hash this lock.
340 * MCS lock will be released soon.
354 * Called after setting next->locked = 1 when we're the lock owner.
360 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
385 * Put the lock into the hash table and set the _Q_SLOW_VAL.
391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
392 (void)pv_hash(lock, pn);
396 * Wait for l->locked to become clear and acquire the lock;
400 * The current value of the lock will be returned for additional processing.
403 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
430 * Set the pending bit in the active lock spinning loop to
431 * disable lock stealing before attempting to acquire the lock.
433 set_pending(lock);
435 if (trylock_clear_pending(lock))
439 clear_pending(lock);
443 lp = pv_hash(lock, pn);
456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
458 * The lock was free and now we own the lock.
459 * Change the lock value back to _Q_LOCKED_VAL
462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
470 pv_wait(&lock->locked, _Q_SLOW_VAL);
473 * Because of lock stealing, the queue head vCPU may not be
474 * able to acquire the lock before it has to wait again.
485 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
503 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
509 "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
510 (unsigned long)lock, atomic_read(&lock->val));
517 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
527 node = pv_unhash(lock);
531 * release the lock.
533 smp_store_release(&lock->locked, 0);
536 * At this point the memory pointed at by lock can be freed/reused,
547 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
553 * unhash. Otherwise it would be possible to have multiple @lock
556 locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
560 __pv_queued_spin_unlock_slowpath(lock, locked);