Lines Matching refs:rq

67 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
69 return container_of(dl_rq, struct rq, dl);
72 static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
74 struct rq *rq = dl_se->rq;
77 rq = task_rq(dl_task_of(dl_se));
79 return rq;
153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
189 struct rq *rq = cpu_rq(i);
191 rq->dl.extra_bw += bw;
325 struct rq *rq;
332 rq = task_rq(p);
334 sub_running_bw(&p->dl, &rq->dl);
340 * will not touch the rq's active utilization,
346 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
347 __add_rq_bw(new_bw, &rq->dl);
354 * the rq active utilization (running_bw) when the task blocks.
409 struct rq *rq = rq_of_dl_se(dl_se);
410 struct dl_rq *dl_rq = &rq->dl;
433 zerolag_time -= rq_clock(rq);
452 sub_rq_bw(dl_se, &rq->dl);
490 * will not touch the rq's active utilization,
547 static inline int dl_overloaded(struct rq *rq)
549 return atomic_read(&rq->rd->dlo_count);
552 static inline void dl_set_overload(struct rq *rq)
554 if (!rq->online)
557 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
565 atomic_inc(&rq->rd->dlo_count);
568 static inline void dl_clear_overload(struct rq *rq)
570 if (!rq->online)
573 atomic_dec(&rq->rd->dlo_count);
574 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
585 static inline int has_pushable_dl_tasks(struct rq *rq)
587 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
594 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
601 &rq->dl.pushable_dl_tasks_root,
604 rq->dl.earliest_dl.next = p->dl.deadline;
606 if (!rq->dl.overloaded) {
607 dl_set_overload(rq);
608 rq->dl.overloaded = 1;
612 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
614 struct dl_rq *dl_rq = &rq->dl;
627 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
628 dl_clear_overload(rq);
629 rq->dl.overloaded = 0;
633 static int push_dl_task(struct rq *rq);
635 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
637 return rq->online && dl_task(prev);
643 static void push_dl_tasks(struct rq *);
644 static void pull_dl_task(struct rq *);
646 static inline void deadline_queue_push_tasks(struct rq *rq)
648 if (!has_pushable_dl_tasks(rq))
651 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
654 static inline void deadline_queue_pull_task(struct rq *rq)
656 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
659 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
661 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
663 struct rq *later_rq = NULL;
666 later_rq = find_lock_later_rq(p, rq);
671 * If we cannot preempt any rq, fall back to pick any
690 double_lock_balance(rq, later_rq);
696 * waiting for us to release rq locks). In any case, when it
700 sub_running_bw(&p->dl, &rq->dl);
701 sub_rq_bw(&p->dl, &rq->dl);
706 sub_rq_bw(&p->dl, &rq->dl);
715 dl_b = &rq->rd->dl_bw;
717 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
726 double_unlock_balance(later_rq, rq);
734 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
739 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
753 static inline void deadline_queue_push_tasks(struct rq *rq)
757 static inline void deadline_queue_pull_task(struct rq *rq)
764 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
766 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
769 struct rq *rq)
772 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
791 struct rq *rq = rq_of_dl_rq(dl_rq);
794 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
809 replenish_dl_new_period(dl_se, rq);
833 struct rq *rq = rq_of_dl_rq(dl_rq);
842 replenish_dl_new_period(dl_se, rq);
860 * the future" with respect to rq->clock. If it's
867 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
869 replenish_dl_new_period(dl_se, rq);
915 * and (deadline - t), since t is rq->clock, is the time left
950 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
952 u64 laxity = dl_se->deadline - rq_clock(rq);
960 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
1013 struct rq *rq = rq_of_dl_se(dl_se);
1015 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1016 dl_entity_overflow(dl_se, rq_clock(rq))) {
1019 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1021 update_dl_revised_wakeup(dl_se, rq);
1025 replenish_dl_new_period(dl_se, rq);
1048 struct rq *rq = rq_of_dl_rq(dl_rq);
1052 lockdep_assert_rq_held(rq);
1056 * that it is actually coming from rq->clock and not from
1061 delta = ktime_to_ns(now) - rq_clock(rq);
1090 static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1094 * Queueing this task back might have overloaded rq, check if we need
1097 if (has_pushable_dl_tasks(rq)) {
1099 * Nothing relies on rq->lock after this, so its safe to drop
1100 * rq->lock.
1102 rq_unpin_lock(rq, rf);
1103 push_dl_task(rq);
1104 rq_repin_lock(rq, rf);
1115 * (it is on its rq) or has been removed from there by a call to
1129 struct rq *rq;
1132 struct rq *rq = rq_of_dl_se(dl_se);
1135 rq_lock(rq, &rf);
1138 update_rq_clock(rq);
1142 resched_curr(rq);
1143 __push_dl_task(rq, &rf);
1149 rq_unlock(rq, &rf);
1155 rq = task_rq_lock(p, &rf);
1179 update_rq_clock(rq);
1201 if (unlikely(!rq->online)) {
1204 * task elsewhere. This necessarily changes rq.
1206 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1207 rq = dl_task_offline_migration(rq, p);
1208 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1209 update_rq_clock(rq);
1219 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1220 if (dl_task(rq->curr))
1221 wakeup_preempt_dl(rq, p, 0);
1223 resched_curr(rq);
1225 __push_dl_task(rq, &rf);
1228 task_rq_unlock(rq, p, &rf);
1267 struct rq *rq = rq_of_dl_se(dl_se);
1269 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1270 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1294 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1296 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1297 * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1302 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1305 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1313 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1316 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
1318 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
1325 static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1346 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
1348 int cpu = cpu_of(rq);
1369 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1370 dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1377 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1380 if (!is_leftmost(dl_se, &rq->dl))
1381 resched_curr(rq);
1396 struct rt_rq *rt_rq = &rq->rt;
1412 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1429 void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1433 dl_se->rq = rq;
1442 static void update_curr_dl(struct rq *rq)
1444 struct task_struct *curr = rq->curr;
1459 delta_exec = update_curr_common(rq);
1460 update_curr_dl_se(rq, dl_se, delta_exec);
1470 struct rq *rq;
1474 rq = task_rq_lock(p, &rf);
1476 rq = dl_se->rq;
1477 rq_lock(rq, &rf);
1481 update_rq_clock(rq);
1507 sub_running_bw(dl_se, &rq->dl);
1512 task_rq_unlock(rq, p, &rf);
1515 rq_unlock(rq, &rf);
1536 struct rq *rq = rq_of_dl_rq(dl_rq);
1541 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1543 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1549 struct rq *rq = rq_of_dl_rq(dl_rq);
1558 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1559 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1565 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1729 * its rq, the bandwidth timer callback (which clearly has not
1787 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1840 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1841 enqueue_pushable_dl_task(rq, p);
1844 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1846 update_curr_dl(rq);
1853 dequeue_pushable_dl_task(rq, p);
1866 static void yield_task_dl(struct rq *rq)
1874 rq->curr->dl.dl_yielded = 1;
1876 update_rq_clock(rq);
1877 update_curr_dl(rq);
1883 rq_clock_skip_update(rq);
1889 struct rq *rq)
1891 return (!rq->dl.dl_nr_running ||
1893 rq->dl.earliest_dl.curr));
1903 struct rq *rq;
1908 rq = cpu_rq(cpu);
1911 curr = READ_ONCE(rq->curr); /* unlocked access */
1917 * on this rq can't move (provided the waking task
1950 struct rq *rq;
1955 rq = task_rq(p);
1959 * rq->lock is not... So, lock it
1961 rq_lock(rq, &rf);
1963 update_rq_clock(rq);
1964 sub_running_bw(&p->dl, &rq->dl);
1970 * will not touch the rq's active utilization,
1976 sub_rq_bw(&p->dl, &rq->dl);
1977 rq_unlock(rq, &rf);
1980 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1986 if (rq->curr->nr_cpus_allowed == 1 ||
1987 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1995 cpudl_find(&rq->rd->cpudl, p, NULL))
1998 resched_curr(rq);
2001 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2003 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2010 rq_unpin_lock(rq, rf);
2011 pull_dl_task(rq);
2012 rq_repin_lock(rq, rf);
2015 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2023 static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
2026 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2027 resched_curr(rq);
2036 if ((p->dl.deadline == rq->curr->dl.deadline) &&
2037 !test_tsk_need_resched(rq->curr))
2038 check_preempt_equal_dl(rq, p);
2043 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2045 hrtick_start(rq, dl_se->runtime);
2048 static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
2053 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2056 struct dl_rq *dl_rq = &rq->dl;
2058 p->se.exec_start = rq_clock_task(rq);
2063 dequeue_pushable_dl_task(rq, p);
2068 if (rq->curr->sched_class != &dl_sched_class)
2069 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2071 deadline_queue_push_tasks(rq);
2084 static struct task_struct *pick_task_dl(struct rq *rq)
2087 struct dl_rq *dl_rq = &rq->dl;
2091 if (!sched_dl_runnable(rq))
2102 update_curr_dl_se(rq, dl_se, 0);
2113 static struct task_struct *pick_next_task_dl(struct rq *rq)
2117 p = pick_task_dl(rq);
2122 set_next_task_dl(rq, p, true);
2124 if (hrtick_enabled(rq))
2125 start_hrtick_dl(rq, &p->dl);
2130 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2133 struct dl_rq *dl_rq = &rq->dl;
2138 update_curr_dl(rq);
2140 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2142 enqueue_pushable_dl_task(rq, p);
2150 * and everything must be accessed through the @rq and @curr passed in
2153 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2155 update_curr_dl(rq);
2157 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2163 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2164 is_leftmost(&p->dl, &rq->dl))
2165 start_hrtick_dl(rq, &p->dl);
2181 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2183 if (!task_on_cpu(rq, p) &&
2190 * Return the earliest pushable rq's task, which is suitable to be executed
2193 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2198 if (!has_pushable_dl_tasks(rq))
2201 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2207 if (pick_dl_task(rq, p, cpu))
2244 * rq with the latest possible one.
2306 /* Locks the rq it finds */
2307 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2309 struct rq *later_rq = NULL;
2316 if ((cpu == -1) || (cpu == rq->cpu))
2323 * Target rq has tasks of equal or earlier deadline,
2332 if (double_lock_balance(rq, later_rq)) {
2333 if (unlikely(task_rq(task) != rq ||
2335 task_on_cpu(rq, task) ||
2339 double_unlock_balance(rq, later_rq);
2346 * If the rq we found has no -deadline task, or
2348 * task, the rq is a good one.
2354 double_unlock_balance(rq, later_rq);
2361 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2365 if (!has_pushable_dl_tasks(rq))
2368 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2370 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2371 WARN_ON_ONCE(task_current(rq, p));
2381 * See if the non running -deadline tasks on this rq
2385 static int push_dl_task(struct rq *rq)
2388 struct rq *later_rq;
2391 next_task = pick_next_pushable_dl_task(rq);
2397 * If next_task preempts rq->curr, and rq->curr
2401 if (dl_task(rq->curr) &&
2402 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2403 rq->curr->nr_cpus_allowed > 1) {
2404 resched_curr(rq);
2411 if (WARN_ON(next_task == rq->curr))
2414 /* We might release rq lock */
2417 /* Will lock the rq it'll find */
2418 later_rq = find_lock_later_rq(next_task, rq);
2424 * find_lock_later_rq releases rq->lock and it is
2427 task = pick_next_pushable_dl_task(rq);
2445 deactivate_task(rq, next_task, 0);
2452 double_unlock_balance(rq, later_rq);
2460 static void push_dl_tasks(struct rq *rq)
2463 while (push_dl_task(rq))
2467 static void pull_dl_task(struct rq *this_rq)
2472 struct rq *src_rq;
2505 * rq, we're done with it.
2563 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2565 if (!task_on_cpu(rq, p) &&
2566 !test_tsk_need_resched(rq->curr) &&
2568 dl_task(rq->curr) &&
2569 (rq->curr->nr_cpus_allowed < 2 ||
2570 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2571 push_dl_tasks(rq);
2579 struct rq *rq;
2583 rq = task_rq(p);
2584 src_rd = rq->rd;
2594 src_dl_b = dl_bw_of(cpu_of(rq));
2608 /* Assumes rq->lock is held */
2609 static void rq_online_dl(struct rq *rq)
2611 if (rq->dl.overloaded)
2612 dl_set_overload(rq);
2614 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2615 if (rq->dl.dl_nr_running > 0)
2616 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2619 /* Assumes rq->lock is held */
2620 static void rq_offline_dl(struct rq *rq)
2622 if (rq->dl.overloaded)
2623 dl_clear_overload(rq);
2625 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2626 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2641 struct rq *rq;
2650 rq = __task_rq_lock(p, &rf);
2652 dl_b = &rq->rd->dl_bw;
2655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2659 task_rq_unlock(rq, p, &rf);
2673 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2695 * might migrate away from this rq while continuing to run on
2697 * this rq running_bw now, or sub_rq_bw (below) will complain.
2700 sub_running_bw(&p->dl, &rq->dl);
2701 sub_rq_bw(&p->dl, &rq->dl);
2713 * Since this might be the only -deadline task on the rq,
2717 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2720 deadline_queue_pull_task(rq);
2724 * When switching to -deadline, we may overload the rq, then
2727 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2740 add_rq_bw(&p->dl, &rq->dl);
2745 if (rq->curr != p) {
2747 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2748 deadline_queue_push_tasks(rq);
2750 if (dl_task(rq->curr))
2751 wakeup_preempt_dl(rq, p, 0);
2753 resched_curr(rq);
2755 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2763 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2776 if (!rq->dl.overloaded)
2777 deadline_queue_pull_task(rq);
2779 if (task_current(rq, p)) {
2785 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2786 resched_curr(rq);
2794 if (!dl_task(rq->curr) ||
2795 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
2796 resched_curr(rq);
2803 resched_curr(rq);
2942 * This function is called while holding p's rq->lock.