Lines Matching defs:lock

9 __ww_waiter_first(struct mutex *lock)
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
14 if (list_entry_is_head(w, &lock->wait_list, list))
21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
24 if (list_entry_is_head(w, &lock->wait_list, list))
31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
34 if (list_entry_is_head(w, &lock->wait_list, list))
41 __ww_waiter_last(struct mutex *lock)
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
46 if (list_entry_is_head(w, &lock->wait_list, list))
53 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
55 struct list_head *p = &lock->wait_list;
58 __mutex_add_waiter(lock, waiter, p);
62 __ww_mutex_owner(struct mutex *lock)
64 return __mutex_owner(lock);
68 __ww_mutex_has_waiters(struct mutex *lock)
70 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
73 static inline void lock_wait_lock(struct mutex *lock)
75 raw_spin_lock(&lock->wait_lock);
78 static inline void unlock_wait_lock(struct mutex *lock)
80 raw_spin_unlock(&lock->wait_lock);
83 static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
85 lockdep_assert_held(&lock->wait_lock);
94 __ww_waiter_first(struct rt_mutex *lock)
96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
121 __ww_waiter_last(struct rt_mutex *lock)
123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
136 __ww_mutex_owner(struct rt_mutex *lock)
138 return rt_mutex_owner(&lock->rtmutex);
142 __ww_mutex_has_waiters(struct rt_mutex *lock)
144 return rt_mutex_has_waiters(&lock->rtmutex);
147 static inline void lock_wait_lock(struct rt_mutex *lock)
149 raw_spin_lock(&lock->rtmutex.wait_lock);
152 static inline void unlock_wait_lock(struct rt_mutex *lock)
154 raw_spin_unlock(&lock->rtmutex.wait_lock);
157 static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
159 lockdep_assert_held(&lock->rtmutex.wait_lock);
167 * It (the new transaction) makes a request for a lock being held
172 * An older transaction makes a request for a lock being held by
277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
285 debug_mutex_wake_waiter(lock, waiter);
294 * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
296 * Wound the lock holder if there are waiters with more important transactions
297 * than the lock holders. Even if multiple waiters may wound the lock holder,
300 static bool __ww_mutex_wound(struct MUTEX *lock,
304 struct task_struct *owner = __ww_mutex_owner(lock);
306 lockdep_assert_wait_lock_held(lock);
343 * We just acquired @lock under @ww_ctx, if there are more important contexts
355 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
359 lockdep_assert_wait_lock_held(lock);
361 for (cur = __ww_waiter_first(lock); cur;
362 cur = __ww_waiter_next(lock, cur)) {
367 if (__ww_mutex_die(lock, cur, ww_ctx) ||
368 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
374 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
380 ww_mutex_lock_acquired(lock, ctx);
383 * The lock->ctx update should be visible on all cores before
400 if (likely(!__ww_mutex_has_waiters(&lock->base)))
407 lock_wait_lock(&lock->base);
408 __ww_mutex_check_waiters(&lock->base, ctx);
409 unlock_wait_lock(&lock->base);
413 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
419 ww = container_of(lock, struct ww_mutex, base);
430 * Check the wound condition for the current lock acquire.
434 * Wait-Die: If we're trying to acquire a lock already held by an older
441 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
444 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
453 return __ww_mutex_kill(lock, ctx);
459 return __ww_mutex_kill(lock, ctx);
465 for (cur = __ww_waiter_prev(lock, waiter); cur;
466 cur = __ww_waiter_prev(lock, cur)) {
471 return __ww_mutex_kill(lock, ctx);
479 * first. Such that older contexts are preferred to acquire the lock over
490 struct MUTEX *lock,
497 __ww_waiter_add(lock, waiter, NULL);
508 * may wound the lock holder.
510 for (cur = __ww_waiter_last(lock); cur;
511 cur = __ww_waiter_prev(lock, cur)) {
520 * die the moment it would acquire the lock.
523 int ret = __ww_mutex_kill(lock, ww_ctx);
535 __ww_mutex_die(lock, cur, ww_ctx);
538 __ww_waiter_add(lock, waiter, pos);
545 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
553 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
559 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
561 if (lock->ctx) {
563 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
565 if (lock->ctx->acquired > 0)
566 lock->ctx->acquired--;
567 lock->ctx = NULL;