Lines Matching refs:rq

177 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
179 return rt_rq->rq;
187 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
191 return rt_rq->rq;
220 struct rq *rq = cpu_rq(cpu);
224 rt_rq->rq = rq;
234 rt_se->rt_rq = &rq->rt;
292 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
294 return container_of(rt_rq, struct rq, rt);
297 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
306 struct rq *rq = rq_of_rt_se(rt_se);
308 return &rq->rt;
323 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
325 /* Try to pull RT tasks here if we lower this rq's prio */
326 return rq->online && rq->rt.highest_prio.curr > prev->prio;
329 static inline int rt_overloaded(struct rq *rq)
331 return atomic_read(&rq->rd->rto_count);
334 static inline void rt_set_overload(struct rq *rq)
336 if (!rq->online)
339 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
350 atomic_inc(&rq->rd->rto_count);
353 static inline void rt_clear_overload(struct rq *rq)
355 if (!rq->online)
359 atomic_dec(&rq->rd->rto_count);
360 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
363 static inline int has_pushable_tasks(struct rq *rq)
365 return !plist_head_empty(&rq->rt.pushable_tasks);
371 static void push_rt_tasks(struct rq *);
372 static void pull_rt_task(struct rq *);
374 static inline void rt_queue_push_tasks(struct rq *rq)
376 if (!has_pushable_tasks(rq))
379 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
382 static inline void rt_queue_pull_task(struct rq *rq)
384 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
387 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
391 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
394 if (p->prio < rq->rt.highest_prio.next)
395 rq->rt.highest_prio.next = p->prio;
397 if (!rq->rt.overloaded) {
398 rt_set_overload(rq);
399 rq->rt.overloaded = 1;
403 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
405 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
408 if (has_pushable_tasks(rq)) {
409 p = plist_first_entry(&rq->rt.pushable_tasks,
411 rq->rt.highest_prio.next = p->prio;
413 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
415 if (rq->rt.overloaded) {
416 rt_clear_overload(rq);
417 rq->rt.overloaded = 0;
424 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
428 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
432 static inline void rt_queue_push_tasks(struct rq *rq)
514 #define for_each_rt_rq(rt_rq, iter, rq) \
517 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
533 struct rq *rq = rq_of_rt_rq(rt_rq);
536 int cpu = cpu_of(rq);
547 resched_curr(rq);
621 #define for_each_rt_rq(rt_rq, iter, rq) \
622 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
634 struct rq *rq = rq_of_rt_rq(rt_rq);
640 resched_curr(rq);
704 * or __disable_runtime() below sets a specific rq to inf to
735 static void __disable_runtime(struct rq *rq)
737 struct root_domain *rd = rq->rd;
744 for_each_rt_rq(rt_rq, iter, rq) {
817 static void __enable_runtime(struct rq *rq)
828 for_each_rt_rq(rt_rq, iter, rq) {
878 struct rq *rq = rq_of_rt_rq(rt_rq);
883 * When span == cpu_online_mask, taking each rq->lock
894 rq_lock(rq, &rf);
895 update_rq_clock(rq);
916 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
917 rq_clock_cancel_skipupdate(rq);
932 rq_unlock(rq, &rf);
1000 static void update_curr_rt(struct rq *rq)
1002 struct task_struct *curr = rq->curr;
1009 delta_exec = update_curr_common(rq);
1025 resched_curr(rq);
1036 struct rq *rq = rq_of_rt_rq(rt_rq);
1038 BUG_ON(&rq->rt != rt_rq);
1043 BUG_ON(!rq->nr_running);
1045 sub_nr_running(rq, count);
1053 struct rq *rq = rq_of_rt_rq(rt_rq);
1055 BUG_ON(&rq->rt != rt_rq);
1064 add_nr_running(rq, rt_rq->rt_nr_running);
1069 cpufreq_update_util(rq, 0);
1077 struct rq *rq = rq_of_rt_rq(rt_rq);
1081 * Change rq's cpupri only if rt_rq is the top queue.
1083 if (&rq->rt != rt_rq)
1086 if (rq->online && prio < prev_prio)
1087 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1093 struct rq *rq = rq_of_rt_rq(rt_rq);
1097 * Change rq's cpupri only if rt_rq is the top queue.
1099 if (&rq->rt != rt_rq)
1102 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1103 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1448 struct rq *rq = rq_of_rt_se(rt_se);
1455 enqueue_top_rt_rq(&rq->rt);
1460 struct rq *rq = rq_of_rt_se(rt_se);
1472 enqueue_top_rt_rq(&rq->rt);
1479 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1491 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1492 enqueue_pushable_task(rq, p);
1495 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1499 update_curr_rt(rq);
1502 dequeue_pushable_task(rq, p);
1523 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1534 static void yield_task_rt(struct rq *rq)
1536 requeue_task_rt(rq, rq->curr, 0);
1546 struct rq *rq;
1553 rq = cpu_rq(cpu);
1556 curr = READ_ONCE(rq->curr); /* unlocked access */
1614 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1620 if (rq->curr->nr_cpus_allowed == 1 ||
1621 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1629 cpupri_find(&rq->rd->cpupri, p, NULL))
1637 requeue_task_rt(rq, p, 1);
1638 resched_curr(rq);
1641 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1643 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1650 rq_unpin_lock(rq, rf);
1651 pull_rt_task(rq);
1652 rq_repin_lock(rq, rf);
1655 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1662 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1664 if (p->prio < rq->curr->prio) {
1665 resched_curr(rq);
1682 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1683 check_preempt_equal_prio(rq, p);
1687 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1690 struct rt_rq *rt_rq = &rq->rt;
1692 p->se.exec_start = rq_clock_task(rq);
1697 dequeue_pushable_task(rq, p);
1707 if (rq->curr->sched_class != &rt_sched_class)
1708 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1710 rt_queue_push_tasks(rq);
1731 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1734 struct rt_rq *rt_rq = &rq->rt;
1746 static struct task_struct *pick_task_rt(struct rq *rq)
1750 if (!sched_rt_runnable(rq))
1753 p = _pick_next_task_rt(rq);
1758 static struct task_struct *pick_next_task_rt(struct rq *rq)
1760 struct task_struct *p = pick_task_rt(rq);
1763 set_next_task_rt(rq, p, true);
1768 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1771 struct rt_rq *rt_rq = &rq->rt;
1776 update_curr_rt(rq);
1778 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1785 enqueue_pushable_task(rq, p);
1793 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1795 if (!task_on_cpu(rq, p) &&
1803 * Return the highest pushable rq's task, which is suitable to be executed
1806 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1808 struct plist_head *head = &rq->rt.pushable_tasks;
1811 if (!has_pushable_tasks(rq))
1815 if (pick_rt_task(rq, p, cpu))
1915 /* Will lock the rq it finds */
1916 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1918 struct rq *lowest_rq = NULL;
1925 if ((cpu == -1) || (cpu == rq->cpu))
1932 * Target rq has tasks of equal or higher priority,
1941 if (double_lock_balance(rq, lowest_rq)) {
1946 * Also make sure that it wasn't scheduled on its rq.
1951 if (unlikely(task_rq(task) != rq ||
1953 task_on_cpu(rq, task) ||
1958 double_unlock_balance(rq, lowest_rq);
1964 /* If this rq is still suitable use it. */
1969 double_unlock_balance(rq, lowest_rq);
1976 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1980 if (!has_pushable_tasks(rq))
1983 p = plist_first_entry(&rq->rt.pushable_tasks,
1986 BUG_ON(rq->cpu != task_cpu(p));
1987 BUG_ON(task_current(rq, p));
2001 static int push_rt_task(struct rq *rq, bool pull)
2004 struct rq *lowest_rq;
2007 if (!rq->rt.overloaded)
2010 next_task = pick_next_pushable_task(rq);
2020 if (unlikely(next_task->prio < rq->curr->prio)) {
2021 resched_curr(rq);
2029 if (!pull || rq->push_busy)
2041 if (rq->curr->sched_class != &rt_sched_class)
2044 cpu = find_lowest_rq(rq->curr);
2045 if (cpu == -1 || cpu == rq->cpu)
2054 push_task = get_push_task(rq);
2057 raw_spin_rq_unlock(rq);
2058 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2059 push_task, &rq->push_work);
2061 raw_spin_rq_lock(rq);
2067 if (WARN_ON(next_task == rq->curr))
2070 /* We might release rq lock */
2073 /* find_lock_lowest_rq locks the rq if found */
2074 lowest_rq = find_lock_lowest_rq(next_task, rq);
2078 * find_lock_lowest_rq releases rq->lock
2085 task = pick_next_pushable_task(rq);
2108 deactivate_task(rq, next_task, 0);
2114 double_unlock_balance(rq, lowest_rq);
2121 static void push_rt_tasks(struct rq *rq)
2124 while (push_rt_task(rq, false))
2228 static void tell_cpu_to_push(struct rq *rq)
2233 atomic_inc(&rq->rd->rto_loop_next);
2236 if (!rto_start_trylock(&rq->rd->rto_loop_start))
2239 raw_spin_lock(&rq->rd->rto_lock);
2247 if (rq->rd->rto_cpu < 0)
2248 cpu = rto_next_cpu(rq->rd);
2250 raw_spin_unlock(&rq->rd->rto_lock);
2252 rto_start_unlock(&rq->rd->rto_loop_start);
2256 sched_get_rd(rq->rd);
2257 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2266 struct rq *rq;
2269 rq = this_rq();
2275 if (has_pushable_tasks(rq)) {
2276 raw_spin_rq_lock(rq);
2277 while (push_rt_task(rq, true))
2279 raw_spin_rq_unlock(rq);
2299 static void pull_rt_task(struct rq *this_rq)
2304 struct rq *src_rq;
2355 * on its rq, and no others.
2414 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2416 bool need_to_push = !task_on_cpu(rq, p) &&
2417 !test_tsk_need_resched(rq->curr) &&
2419 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2420 (rq->curr->nr_cpus_allowed < 2 ||
2421 rq->curr->prio <= p->prio);
2424 push_rt_tasks(rq);
2427 /* Assumes rq->lock is held */
2428 static void rq_online_rt(struct rq *rq)
2430 if (rq->rt.overloaded)
2431 rt_set_overload(rq);
2433 __enable_runtime(rq);
2435 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2438 /* Assumes rq->lock is held */
2439 static void rq_offline_rt(struct rq *rq)
2441 if (rq->rt.overloaded)
2442 rt_clear_overload(rq);
2444 __disable_runtime(rq);
2446 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2453 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2462 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2465 rt_queue_pull_task(rq);
2484 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2490 if (task_current(rq, p)) {
2491 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2502 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2503 rt_queue_push_tasks(rq);
2505 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2506 resched_curr(rq);
2515 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2520 if (task_current(rq, p)) {
2527 rt_queue_pull_task(rq);
2533 if (p->prio > rq->rt.highest_prio.curr)
2534 resched_curr(rq);
2538 resched_curr(rq);
2546 if (p->prio < rq->curr->prio)
2547 resched_curr(rq);
2552 static void watchdog(struct rq *rq, struct task_struct *p)
2576 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2584 * and everything must be accessed through the @rq and @curr passed in
2587 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2591 update_curr_rt(rq);
2592 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2594 watchdog(rq, p);
2614 requeue_task_rt(rq, p, 0);
2615 resched_curr(rq);
2621 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)