Lines Matching refs:rq

311 	struct rq *rq = rq_of(cfs_rq);
312 int cpu = cpu_of(rq);
315 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
343 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
349 * cfs rq without parent should be put
353 &rq->leaf_cfs_rq_list);
358 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
368 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
373 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
380 struct rq *rq = rq_of(cfs_rq);
386 * to the prev element but it will point to rq->leaf_cfs_rq_list
389 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
390 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
397 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
399 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
403 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
404 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
485 static inline void assert_list_leaf_cfs_rq(struct rq *rq)
489 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
490 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
1070 switched_from_fair(rq, p);
1106 static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
1108 u64 now = rq_clock_task(rq);
1141 s64 update_curr_common(struct rq *rq)
1143 struct task_struct *curr = rq->curr;
1146 delta_exec = update_curr_se(rq, &curr->se);
1178 static void update_curr_fair(struct rq *rq)
1180 update_curr(cfs_rq_of(&rq->curr->se));
1493 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1495 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
1496 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1499 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1501 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
1502 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1984 static unsigned long cpu_load(struct rq *rq);
1985 static unsigned long cpu_runnable(struct rq *rq);
2046 struct rq *rq = cpu_rq(cpu);
2048 ns->load += cpu_load(rq);
2049 ns->runnable += cpu_runnable(rq);
2051 ns->nr_running += rq->cfs.h_nr_running;
2054 if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
2055 if (READ_ONCE(rq->numa_migrate_on) ||
2078 struct rq *rq = cpu_rq(env->dst_cpu);
2081 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
2093 rq = cpu_rq(env->dst_cpu);
2094 if (!xchg(&rq->numa_migrate_on, 1))
2104 * Clear previous best_cpu/rq numa-migrate flag, since task now
2108 rq = cpu_rq(env->best_cpu);
2109 WRITE_ONCE(rq->numa_migrate_on, 0);
2167 struct rq *dst_rq = cpu_rq(env->dst_cpu);
2427 struct rq *best_rq;
3492 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3557 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
3561 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
3565 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
3581 struct rq *rq = rq_of(cfs_rq);
3583 account_numa_enqueue(rq, task_of(se));
3584 list_add(&se->group_node, &rq->cfs_tasks);
3829 * whether the rq-wide min_vruntime needs updated too. Since
3993 struct rq *rq = rq_of(cfs_rq);
3995 if (&rq->cfs == cfs_rq) {
4010 cpufreq_update_util(rq, flags);
4060 struct rq *rq = rq_of(cfs_rq);
4062 prev = rq->tmp_alone_branch;
4109 /* rq has been offline and doesn't contribute to the share anymore: */
4148 static void __maybe_unused clear_tg_offline_cfs_rqs(struct rq *rq)
4152 lockdep_assert_rq_held(rq);
4155 * The rq clock has already been updated in
4157 * the rq clock again in unthrottle_cfs_rq().
4159 rq_clock_start_loop_update(rq);
4163 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4169 rq_clock_stop_loop_update(rq);
4175 * including the state of rq->lock, should be made.
4215 * the group entity and group rq do not have their PELT windows aligned).
4248 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
4249 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
4251 * align the rq as a whole would be runnable 2/3 of the time. If however we
4252 * always have at least 1 runnable task, the rq as a whole is always runnable.
4260 * We can construct a rule that adds runnable to a rq by assuming minimal
4468 static inline void clear_tg_offline_cfs_rqs(struct rq *rq) {}
4484 struct rq *rq;
4491 rq = rq_of(cfs_rq);
4494 is_idle = is_idle_task(rcu_dereference(rq->curr));
4513 * cfs_idle_lag (delta between rq's update and cfs_rq's update)
4516 * rq_idle_lag (delta between now and rq's update)
4524 * rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
4525 * rq_clock()@rq_idle is rq->clock_idle
4536 now = u64_u32_load(rq->clock_pelt_idle);
4554 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
4779 * the previous rq.
4825 static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf);
5108 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
5110 int cpu = cpu_of(rq);
5123 rq->misfit_task_load = 0;
5131 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
5158 static inline int sched_balance_newidle(struct rq *rq, struct rq_flags *rf)
5172 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
5339 struct rq *rq = rq_of(cfs_rq);
5342 cfs_rq->throttled_clock = rq_clock(rq);
5344 cfs_rq->throttled_clock_self = rq_clock(rq);
5591 * directly instead of rq->clock to avoid adding additional synchronization
5592 * around rq->lock.
5717 struct rq *rq = data;
5718 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5722 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
5730 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
5746 struct rq *rq = data;
5747 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5751 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
5756 cfs_rq->throttled_clock_self = rq_clock(rq);
5765 struct rq *rq = rq_of(cfs_rq);
5795 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5838 sub_nr_running(rq, task_delta);
5843 * throttled-list. rq->lock protects completion.
5848 cfs_rq->throttled_clock = rq_clock(rq);
5854 struct rq *rq = rq_of(cfs_rq);
5859 se = cfs_rq->tg->se[cpu_of(rq)];
5863 update_rq_clock(rq);
5867 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5874 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5928 add_nr_running(rq, task_delta);
5931 assert_list_leaf_cfs_rq(rq);
5934 if (rq->curr == rq->idle && rq->cfs.nr_running)
5935 resched_curr(rq);
5942 struct rq *rq = arg;
5945 rq_lock(rq, &rf);
5952 update_rq_clock(rq);
5953 rq_clock_start_loop_update(rq);
5956 * Since we hold rq lock we're safe from concurrent manipulation of
5964 list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
5974 rq_clock_stop_loop_update(rq);
5975 rq_unlock(rq, &rf);
5980 struct rq *rq = rq_of(cfs_rq);
5983 if (rq == this_rq()) {
5992 first = list_empty(&rq->cfsb_csd_list);
5993 list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
5995 smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
6022 struct rq *rq;
6028 rq = rq_of(cfs_rq);
6035 rq_lock_irqsave(rq, &rf);
6058 if (cpu_of(rq) != this_cpu) {
6074 rq_unlock_irqrestore(rq, &rf);
6079 struct rq *rq = rq_of(cfs_rq);
6081 rq_lock_irqsave(rq, &rf);
6088 rq_unlock_irqrestore(rq, &rf);
6219 /* we are under rq->lock, defer unthrottling using a timer */
6243 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
6273 * runtime as update_curr() throttling can not trigger until it's on-rq.
6460 struct rq *rq = cpu_rq(i);
6463 if (list_empty(&rq->cfsb_csd_list))
6467 __cfsb_csd_unthrottle(rq);
6481 static void __maybe_unused update_runtime_enabled(struct rq *rq)
6485 lockdep_assert_rq_held(rq);
6490 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6500 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
6504 lockdep_assert_rq_held(rq);
6507 * The rq clock has already been updated in the
6509 * the rq clock again in unthrottle_cfs_rq().
6511 rq_clock_start_loop_update(rq);
6515 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6526 * Offline rq is schedulable till CPU is completely disabled
6536 rq_clock_stop_loop_update(rq);
6555 static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
6557 int cpu = cpu_of(rq);
6565 if (rq->nr_running != 1)
6618 static inline void update_runtime_enabled(struct rq *rq) {}
6619 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
6629 static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {}
6637 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
6641 SCHED_WARN_ON(task_rq(p) != rq);
6643 if (rq->cfs.h_nr_running > 1) {
6649 if (task_current(rq, p))
6650 resched_curr(rq);
6653 hrtick_start(rq, delta);
6662 static void hrtick_update(struct rq *rq)
6664 struct task_struct *curr = rq->curr;
6666 if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
6669 hrtick_start_fair(rq, curr);
6673 hrtick_start_fair(struct rq *rq, struct task_struct *p)
6677 static inline void hrtick_update(struct rq *rq)
6714 static inline void check_update_overutilized_status(struct rq *rq)
6721 if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
6722 set_rd_overutilized(rq->rd, 1);
6725 static inline void check_update_overutilized_status(struct rq *rq) { }
6729 static int sched_idle_rq(struct rq *rq)
6731 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
6732 rq->nr_running);
6748 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6761 util_est_enqueue(&rq->cfs, p);
6769 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
6809 add_nr_running(rq, 1);
6826 check_update_overutilized_status(rq);
6829 assert_list_leaf_cfs_rq(rq);
6831 hrtick_update(rq);
6841 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6847 bool was_sched_idle = sched_idle_rq(rq);
6849 util_est_dequeue(&rq->cfs, p);
6900 sub_nr_running(rq, 1);
6903 if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
6904 rq->next_balance = jiffies;
6907 util_est_update(&rq->cfs, p, task_sleep);
6908 hrtick_update(rq);
6931 static unsigned long cpu_load(struct rq *rq)
6933 return cfs_rq_load_avg(&rq->cfs);
6949 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
6955 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6956 return cpu_load(rq);
6958 cfs_rq = &rq->cfs;
6967 static unsigned long cpu_runnable(struct rq *rq)
6969 return cfs_rq_runnable_avg(&rq->cfs);
6972 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
6978 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
6979 return cpu_runnable(rq);
6981 cfs_rq = &rq->cfs;
7166 struct rq *rq = cpu_rq(i);
7168 if (!sched_core_cookie_match(rq, p))
7175 struct cpuidle_state *idle = idle_get_state(rq);
7183 latest_idle_timestamp = rq->idle_stamp;
7186 rq->idle_stamp > latest_idle_timestamp) {
7192 latest_idle_timestamp = rq->idle_stamp;
7300 void __update_idle_core(struct rq *rq)
7302 int core = cpu_of(rq);
7404 * average idle time for this rq (as found in rq->avg_idle).
8050 struct rq *rq = cpu_rq(cpu);
8070 if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
8078 rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
8079 rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
8271 * the current rq's clock. But if that clock hasn't been
8273 * leading to an inflation after wake-up on the new rq.
8324 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8326 if (rq->nr_running)
8329 return sched_balance_newidle(rq, rf) != 0;
8349 static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int wake_flags)
8351 struct task_struct *curr = rq->curr;
8424 resched_curr(rq);
8428 static struct task_struct *pick_task_fair(struct rq *rq)
8434 cfs_rq = &rq->cfs;
8461 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
8463 struct cfs_rq *cfs_rq = &rq->cfs;
8469 if (!sched_fair_runnable(rq))
8506 cfs_rq = &rq->cfs;
8551 put_prev_task(rq, prev);
8568 list_move(&p->se.group_node, &rq->cfs_tasks);
8571 if (hrtick_enabled_fair(rq))
8572 hrtick_start_fair(rq, p);
8574 update_misfit_status(p, rq);
8575 sched_fair_update_stop_tick(rq, p);
8583 new_tasks = sched_balance_newidle(rq, rf);
8586 * Because sched_balance_newidle() releases (and re-acquires) rq->lock, it is
8597 * rq is about to be idle, check if we need to update the
8600 update_idle_rq_clock_pelt(rq);
8605 static struct task_struct *__pick_next_task_fair(struct rq *rq)
8607 return pick_next_task_fair(rq, NULL, NULL);
8613 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
8627 static void yield_task_fair(struct rq *rq)
8629 struct task_struct *curr = rq->curr;
8636 if (unlikely(rq->nr_running == 1))
8641 update_rq_clock(rq);
8651 rq_clock_skip_update(rq);
8656 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
8667 yield_task_fair(rq);
8854 struct rq *src_rq;
8858 struct rq *dst_rq;
8983 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
9259 * attach_task() -- attach the task detached by detach_task() to its new rq.
9261 static void attach_task(struct rq *rq, struct task_struct *p)
9263 lockdep_assert_rq_held(rq);
9265 WARN_ON_ONCE(task_rq(p) != rq);
9266 activate_task(rq, p, ENQUEUE_NOCLOCK);
9267 wakeup_preempt(rq, p, 0);
9272 * its new rq.
9274 static void attach_one_task(struct rq *rq, struct task_struct *p)
9278 rq_lock(rq, &rf);
9279 update_rq_clock(rq);
9280 attach_task(rq, p);
9281 rq_unlock(rq, &rf);
9286 * new rq.
9319 static inline bool others_have_blocked(struct rq *rq)
9321 if (cpu_util_rt(rq))
9324 if (cpu_util_dl(rq))
9327 if (hw_load_avg(rq))
9330 if (cpu_util_irq(rq))
9336 static inline void update_blocked_load_tick(struct rq *rq)
9338 WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
9341 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
9344 rq->has_blocked_load = 0;
9348 static inline bool others_have_blocked(struct rq *rq) { return false; }
9349 static inline void update_blocked_load_tick(struct rq *rq) {}
9350 static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
9353 static bool __update_blocked_others(struct rq *rq, bool *done)
9356 u64 now = rq_clock_pelt(rq);
9364 curr_class = rq->curr->sched_class;
9366 hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
9368 decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
9369 update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
9370 update_hw_load_avg(now, rq, hw_pressure) |
9371 update_irq_load_avg(rq, 0);
9373 if (others_have_blocked(rq))
9381 static bool __update_blocked_fair(struct rq *rq, bool *done)
9385 int cpu = cpu_of(rq);
9391 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
9400 if (cfs_rq == &rq->cfs)
9431 struct rq *rq = rq_of(cfs_rq);
9432 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
9471 static bool __update_blocked_fair(struct rq *rq, bool *done)
9473 struct cfs_rq *cfs_rq = &rq->cfs;
9492 struct rq *rq = cpu_rq(cpu);
9495 rq_lock_irqsave(rq, &rf);
9496 update_blocked_load_tick(rq);
9497 update_rq_clock(rq);
9499 decayed |= __update_blocked_others(rq, &done);
9500 decayed |= __update_blocked_fair(rq, &done);
9502 update_blocked_load_status(rq, !done);
9504 cpufreq_update_util(rq, 0);
9505 rq_unlock_irqrestore(rq, &rf);
9572 struct rq *rq = cpu_rq(cpu);
9576 irq = cpu_util_irq(rq);
9585 used = cpu_util_rt(rq);
9586 used += cpu_util_dl(rq);
9668 * Check whether the capacity of the rq has been noticeably reduced by side
9673 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
9675 return ((rq->cpu_capacity * sd->imbalance_pct) <
9676 (arch_scale_cpu_capacity(cpu_of(rq)) * 100));
9679 /* Check if the rq has a misfit task */
9680 static inline bool check_misfit_status(struct rq *rq)
9682 return rq->misfit_task_load;
9923 sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
9929 if (rq->cfs.h_nr_running != 1)
9932 return check_cpu_capacity(rq, sd);
9958 struct rq *rq = cpu_rq(i);
9959 unsigned long load = cpu_load(rq);
9963 sgs->group_runnable += cpu_runnable(rq);
9964 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
9966 nr_running = rq->nr_running;
9976 sgs->nr_numa_running += rq->nr_numa_running;
9977 sgs->nr_preferred_running += rq->nr_preferred_running;
9993 if (sgs->group_misfit_task_load < rq->misfit_task_load) {
9994 sgs->group_misfit_task_load = rq->misfit_task_load;
9997 } else if (env->idle && sched_reduced_capacity(rq, env->sd)) {
10186 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
10188 if (rq->nr_running > rq->nr_numa_running)
10190 if (rq->nr_running > rq->nr_preferred_running)
10200 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
10234 struct rq *rq = cpu_rq(cpu);
10236 if (rq->curr != rq->idle && rq->curr != p)
10240 * rq->nr_running can't be used but an updated version without the
10245 if (rq->ttwu_pending)
10272 struct rq *rq = cpu_rq(i);
10275 sgs->group_load += cpu_load_without(rq, p);
10277 sgs->group_runnable += cpu_runnable_without(rq, p);
10279 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
10281 nr_running = rq->nr_running - local;
11023 static struct rq *sched_balance_find_src_rq(struct lb_env *env,
11026 struct rq *busiest = NULL, *rq;
11036 rq = cpu_rq(i);
11037 rt = fbq_classify_rq(rq);
11061 nr_running = rq->cfs.h_nr_running;
11094 load = cpu_load(rq);
11097 !check_cpu_capacity(rq, env->sd))
11116 busiest = rq;
11133 busiest = rq;
11140 busiest = rq;
11149 if (rq->misfit_task_load > busiest_load) {
11150 busiest_load = rq->misfit_task_load;
11151 busiest = rq;
11304 static int sched_balance_rq(int this_cpu, struct rq *this_rq,
11311 struct rq *busiest;
11626 struct rq *busiest_rq = data;
11629 struct rq *target_rq = cpu_rq(target_cpu);
11756 static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
11759 int cpu = rq->cpu;
11798 if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
11820 * Ensure the rq-wide value also decays but keep it at a
11821 * reasonable floor to avoid funnies with rq->avg_idle.
11823 rq->max_idle_balance_cost =
11834 rq->next_balance = next_balance;
11838 static inline int on_null_domain(struct rq *rq)
11840 return unlikely(!rcu_dereference_sched(rq->sd));
11895 * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
11914 static void nohz_balancer_kick(struct rq *rq)
11919 int nr_busy, i, cpu = rq->cpu;
11922 if (unlikely(rq->idle_balance))
11929 nohz_balance_exit_idle(rq);
11945 if (rq->nr_running >= 2) {
11952 sd = rcu_dereference(rq->sd);
11958 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
11988 if (check_misfit_status(rq)) {
12046 void nohz_balance_exit_idle(struct rq *rq)
12048 SCHED_WARN_ON(rq != this_rq());
12050 if (likely(!rq->nohz_tick_stopped))
12053 rq->nohz_tick_stopped = 0;
12054 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
12057 set_cpu_sd_state_busy(rq->cpu);
12082 struct rq *rq = cpu_rq(cpu);
12095 * Can be set safely without rq->lock held
12097 * rq->lock is held during the check and the clear
12099 rq->has_blocked_load = 1;
12107 if (rq->nohz_tick_stopped)
12111 if (on_null_domain(rq))
12114 rq->nohz_tick_stopped = 1;
12137 static bool update_nohz_stats(struct rq *rq)
12139 unsigned int cpu = rq->cpu;
12141 if (!rq->has_blocked_load)
12147 if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
12152 return rq->has_blocked_load;
12160 static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
12169 struct rq *rq;
12215 rq = cpu_rq(balance_cpu);
12218 has_blocked_load |= update_nohz_stats(rq);
12224 if (time_after_eq(jiffies, rq->next_balance)) {
12227 rq_lock_irqsave(rq, &rf);
12228 update_rq_clock(rq);
12229 rq_unlock_irqrestore(rq, &rf);
12232 sched_balance_domains(rq, CPU_IDLE);
12235 if (time_after(next_balance, rq->next_balance)) {
12236 next_balance = rq->next_balance;
12263 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
12309 static void nohz_newidle_balance(struct rq *this_rq)
12337 static inline void nohz_balancer_kick(struct rq *rq) { }
12339 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
12344 static inline void nohz_newidle_balance(struct rq *this_rq) { }
12356 static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
12439 * now runnable tasks on this rq.
12452 * While browsing the domains, we released the rq lock, a task could
12488 struct rq *this_rq = this_rq();
12509 void sched_balance_trigger(struct rq *rq)
12515 if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
12518 if (time_after_eq(jiffies, rq->next_balance))
12521 nohz_balancer_kick(rq);
12524 static void rq_online_fair(struct rq *rq)
12528 update_runtime_enabled(rq);
12531 static void rq_offline_fair(struct rq *rq)
12536 unthrottle_offline_cfs_rqs(rq);
12538 /* Ensure that we remove rq contribution to group share: */
12539 clear_tg_offline_cfs_rqs(rq);
12555 static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
12557 if (!sched_core_enabled(rq))
12565 * sched_slice() considers only this active rq and it gets the
12569 * go through the forced idle rq, but that would be a perf hit.
12574 if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
12576 resched_curr(rq);
12598 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
12605 se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
12611 struct rq *rq = task_rq(a);
12618 SCHED_WARN_ON(task_rq(b)->core != rq->core);
12635 se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
12636 se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
12668 static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
12676 * and everything must be accessed through the @rq and @curr passed in
12679 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
12690 task_tick_numa(rq, curr);
12692 update_misfit_status(curr, rq);
12695 task_tick_core(rq, curr);
12707 struct rq *rq = this_rq();
12710 rq_lock(rq, &rf);
12711 update_rq_clock(rq);
12720 rq_unlock(rq, &rf);
12728 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
12733 if (rq->cfs.nr_running == 1)
12741 if (task_current(rq, p)) {
12743 resched_curr(rq);
12745 wakeup_preempt(rq, p, 0);
12829 static void switched_from_fair(struct rq *rq, struct task_struct *p)
12834 static void switched_to_fair(struct rq *rq, struct task_struct *p)
12846 if (task_current(rq, p))
12847 resched_curr(rq);
12849 wakeup_preempt(rq, p, 0);
12858 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
12868 list_move(&se->group_node, &rq->cfs_tasks);
12970 struct rq *rq;
12974 rq = cpu_rq(i);
12976 rq_lock_irq(rq, &rf);
12977 update_rq_clock(rq);
12980 rq_unlock_irq(rq, &rf);
12987 struct rq *rq;
13003 rq = cpu_rq(cpu);
13005 raw_spin_rq_lock_irqsave(rq, flags);
13007 raw_spin_rq_unlock_irqrestore(rq, flags);
13015 struct rq *rq = cpu_rq(cpu);
13018 cfs_rq->rq = rq;
13029 se->cfs_rq = &rq->cfs;
13063 struct rq *rq = cpu_rq(i);
13068 rq_lock_irqsave(rq, &rf);
13069 update_rq_clock(rq);
13074 rq_unlock_irqrestore(rq, &rf);
13114 struct rq *rq = cpu_rq(i);
13121 rq_lock_irqsave(rq, &rf);
13154 rq_unlock_irqrestore(rq, &rf);
13170 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
13179 if (rq->cfs.load.weight)