Lines Matching refs:owner

48 	atomic_long_set(&lock->owner, 0);
60 * @owner: contains: 'struct task_struct *' to the current lock owner,
81 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
84 static inline struct task_struct *__owner_task(unsigned long owner)
86 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
95 static inline unsigned long __owner_flags(unsigned long owner)
97 return owner & MUTEX_FLAGS;
105 unsigned long owner, curr = (unsigned long)current;
107 owner = atomic_long_read(&lock->owner);
109 unsigned long flags = __owner_flags(owner);
110 unsigned long task = owner & ~MUTEX_FLAGS;
129 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
136 return __owner_task(owner);
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
181 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
187 atomic_long_or(flag, &lock->owner);
192 atomic_long_andnot(flag, &lock->owner);
233 unsigned long owner = atomic_long_read(&lock->owner);
238 MUTEX_WARN_ON(__owner_task(owner) != current);
239 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
241 new = (owner & MUTEX_FLAG_WAITERS);
246 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
332 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
346 * Look out! "owner" is an entirely speculative pointer access and not
352 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
359 while (__mutex_owner(lock) == owner) {
361 * Ensure we emit the owner->on_cpu, dereference _after_
362 * checking lock->owner still matches owner. And we already
373 if (!owner_on_cpu(owner) || need_resched()) {
394 struct task_struct *owner;
407 owner = __mutex_owner(lock);
408 if (owner)
409 retval = owner_on_cpu(owner);
412 * If lock->owner is not set, the mutex has been released. Return true
422 * We try to spin for acquisition when we find that the lock owner
424 * need to reschedule. The rationale is that if the lock owner is
437 * with the spinner at the head of the OSQ, if present, until the owner is
458 * MCS (queued) lock first before spinning on the owner field.
465 struct task_struct *owner;
468 owner = __mutex_trylock_or_owner(lock);
469 if (!owner)
473 * There's an owner, wait for it to either
476 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
596 * race and wound us here since they can't have a valid owner
784 * race and wound us here, since they can't have a valid owner
910 unsigned long owner;
918 * Except when HANDOFF, in that case we must not clear the owner field,
921 owner = atomic_long_read(&lock->owner);
923 MUTEX_WARN_ON(__owner_task(owner) != current);
924 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
926 if (owner & MUTEX_FLAG_HANDOFF)
929 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
930 if (owner & MUTEX_FLAG_WAITERS)
951 if (owner & MUTEX_FLAG_HANDOFF)