Lines Matching refs:waiter

35 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
53 struct rt_mutex_waiter *waiter,
73 * NULL 1 lock is free and has waiters and the top waiter
183 * With the check for the waiter bit in place T3 on CPU2 will not
271 * If a new waiter comes in between the unlock and the cmpxchg
289 * wake waiter();
357 * Update the waiter->tree copy of the sort keys.
360 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
362 lockdep_assert_held(&waiter->lock->wait_lock);
363 lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
365 waiter->tree.prio = __waiter_prio(task);
366 waiter->tree.deadline = task->dl.deadline;
370 * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
373 waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
375 lockdep_assert_held(&waiter->lock->wait_lock);
377 lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry));
379 waiter->pi_tree.prio = waiter->tree.prio;
380 waiter->pi_tree.deadline = waiter->tree.deadline;
400 * If left waiter has a dl_prio(), and we didn't return 1 above,
401 * then right waiter has a dl_prio() too.
418 * If left waiter has a dl_prio(), and we didn't return 0 above,
419 * then right waiter has a dl_prio() too.
427 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
430 if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree))
438 if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio))
441 return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
464 /* NOTE: relies on waiter->ww_ctx being set before insertion */
477 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
481 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less);
485 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
489 if (RB_EMPTY_NODE(&waiter->tree.entry))
492 rb_erase_cached(&waiter->tree.entry, &lock->waiters);
493 RB_CLEAR_NODE(&waiter->tree.entry);
505 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
509 rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
513 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
517 if (RB_EMPTY_NODE(&waiter->pi_tree.entry))
520 rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
521 RB_CLEAR_NODE(&waiter->pi_tree.entry);
584 * If the waiter argument is NULL this indicates the deboost path and
589 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
593 return waiter != NULL;
617 * depicted above or if the top waiter is gone away and we are
619 * @top_task: the current top waiter
649 * [2] waiter = task->pi_blocked_on; [P1]
651 * [4] lock = waiter->lock; [P1]
657 * [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
682 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
732 * [2] Get the waiter on which @task is blocked on.
734 waiter = task->pi_blocked_on;
745 if (!waiter)
764 if (next_lock != waiter->lock)
789 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
802 * are not the top pi waiter of the task. If deadlock
815 * If the waiter priority is the same as the task priority
821 if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
832 lock = waiter->lock;
910 * Get the top waiter for the next iteration
925 * Store the current top waiter before doing the requeue
931 /* [7] Requeue the waiter in the lock waiter tree. */
932 rt_mutex_dequeue(lock, waiter);
935 * Update the waiter prio fields now that we're dequeued.
945 waiter_update_prio(waiter, task);
947 rt_mutex_enqueue(lock, waiter);
968 * If the requeue [7] above changed the top waiter,
969 * then we need to wake the new top waiter up to try
989 if (waiter == rt_mutex_top_waiter(lock)) {
991 * The waiter became the new top (highest priority)
992 * waiter on the lock. Replace the previous top waiter
993 * in the owner tasks pi waiters tree with this waiter
997 waiter_clone_prio(waiter, task);
998 rt_mutex_enqueue_pi(task, waiter);
1001 } else if (prerequeue_top_waiter == waiter) {
1003 * The waiter was the top waiter on the lock, but is
1004 * no longer the top priority waiter. Replace waiter in
1006 * (highest priority) waiter and adjust the priority
1008 * The new top waiter is stored in @waiter so that
1009 * @waiter == @top_waiter evaluates to true below and
1012 rt_mutex_dequeue_pi(task, waiter);
1013 waiter = rt_mutex_top_waiter(lock);
1014 waiter_clone_prio(waiter, task);
1015 rt_mutex_enqueue_pi(task, waiter);
1036 * Store the top waiter of @lock for the end of chain walk
1056 * If the current waiter is not the top waiter on the lock,
1060 if (!detect_deadlock && waiter != top_waiter)
1080 * @waiter: The waiter that is queued to the lock's wait tree if the
1085 struct rt_mutex_waiter *waiter)
1115 * If @waiter != NULL, @task has already enqueued the waiter
1116 * into @lock waiter tree. If @waiter == NULL then this is a
1119 if (waiter) {
1123 * If waiter is the highest priority waiter of @lock,
1126 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
1128 * We can acquire the lock. Remove the waiter from the
1131 rt_mutex_dequeue(lock, waiter);
1151 * The current top waiter stays enqueued. We
1168 * @task->pi_lock. Redundant operation for the @waiter == NULL
1177 * waiter into @task->pi_waiters tree.
1196 * Prepare waiter and propagate pi chain
1201 struct rt_mutex_waiter *waiter,
1207 struct rt_mutex_waiter *top_waiter = waiter;
1216 * only an optimization. We drop the locks, so another waiter
1219 * which is wrong, as the other waiter is not in a deadlock
1229 waiter->task = task;
1230 waiter->lock = lock;
1231 waiter_update_prio(waiter, task);
1232 waiter_clone_prio(waiter, task);
1234 /* Get the top priority waiter on the lock */
1237 rt_mutex_enqueue(lock, waiter);
1239 task->pi_blocked_on = waiter;
1246 /* Check whether the waiter should back out immediately */
1248 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
1251 rt_mutex_dequeue(lock, waiter);
1262 if (waiter == rt_mutex_top_waiter(lock)) {
1264 rt_mutex_enqueue_pi(owner, waiter);
1269 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1295 next_lock, waiter, task);
1303 * Remove the top waiter from the current tasks pi waiter tree and
1311 struct rt_mutex_waiter *waiter;
1317 waiter = rt_mutex_top_waiter(lock);
1326 rt_mutex_dequeue_pi(current, waiter);
1330 * As we are waking up the top waiter, and the waiter stays
1335 * the top waiter can steal this lock.
1340 * We deboosted before waking the top waiter task such that we don't
1350 rt_mutex_wake_q_add(wqh, waiter);
1457 * The wakeup next waiter path does not suffer from the above
1460 * Queue the next waiter for wakeup once we release the wait_lock.
1478 struct rt_mutex_waiter *waiter,
1498 * - current is not longer the top waiter
1504 !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
1515 struct rt_mutex_waiter *waiter,
1530 * Remove a waiter from a lock and give up
1536 struct rt_mutex_waiter *waiter)
1538 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1545 rt_mutex_dequeue(lock, waiter);
1550 * Only update priority if the waiter was the highest priority
1551 * waiter of the lock and there is an owner to update.
1558 rt_mutex_dequeue_pi(owner, waiter);
1595 * @waiter: the pre-initialized rt_mutex_waiter
1603 struct rt_mutex_waiter *waiter)
1611 if (try_to_take_rt_mutex(lock, current, waiter))
1624 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
1629 if (waiter == rt_mutex_top_waiter(lock))
1635 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1675 * @waiter: Initializer waiter for blocking
1681 struct rt_mutex_waiter *waiter)
1702 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
1704 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
1715 remove_waiter(lock, waiter);
1716 rt_mutex_handle_deadlock(ret, chwalk, waiter);
1720 * try_to_take_rt_mutex() sets the waiter bit
1734 struct rt_mutex_waiter waiter;
1737 rt_mutex_init_waiter(&waiter);
1738 waiter.ww_ctx = ww_ctx;
1741 &waiter);
1743 debug_rt_mutex_free_waiter(&waiter);
1761 * Do all pre-schedule work here, before we queue a waiter and invoke
1764 * rtlock_slowlock() and will then enqueue a second waiter for this
1808 struct rt_mutex_waiter waiter;
1816 rt_mutex_init_rtlock_waiter(&waiter);
1823 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
1827 if (try_to_take_rt_mutex(lock, current, &waiter))
1830 if (&waiter == rt_mutex_top_waiter(lock))
1836 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1847 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
1851 debug_rt_mutex_free_waiter(&waiter);