Lines Matching defs:cfs_rq

121  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
122 * each time a cfs_rq requests quota.
316 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
318 struct rq *rq = rq_of(cfs_rq);
321 if (cfs_rq->on_list)
324 cfs_rq->on_list = 1;
331 * cfs_rq. Furthermore, it also means that we will always reset
335 if (cfs_rq->tg->parent &&
336 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
343 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
344 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
354 if (!cfs_rq->tg->parent) {
359 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
375 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
380 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
384 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
386 if (cfs_rq->on_list) {
387 struct rq *rq = rq_of(cfs_rq);
390 * With cfs_rq being unthrottled/throttled during an enqueue,
396 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
397 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
399 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
400 cfs_rq->on_list = 0;
409 /* Iterate thr' all leaf cfs_rq's on a runqueue */
410 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
411 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
415 static inline struct cfs_rq *
418 if (se->cfs_rq == pse->cfs_rq)
419 return se->cfs_rq;
436 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
466 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
468 return cfs_rq->idle > 0;
483 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
488 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
496 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
497 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
514 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
527 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
561 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
563 return (s64)(se->vruntime - cfs_rq->min_vruntime);
615 * v0 := cfs_rq->min_vruntime
616 * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
617 * \Sum w_i := cfs_rq->avg_load
628 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
631 s64 key = entity_key(cfs_rq, se);
633 cfs_rq->avg_vruntime += key * weight;
634 cfs_rq->avg_load += weight;
638 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
641 s64 key = entity_key(cfs_rq, se);
643 cfs_rq->avg_vruntime -= key * weight;
644 cfs_rq->avg_load -= weight;
648 void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
653 cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
660 u64 avg_vruntime(struct cfs_rq *cfs_rq)
662 struct sched_entity *curr = cfs_rq->curr;
663 s64 avg = cfs_rq->avg_vruntime;
664 long load = cfs_rq->avg_load;
669 avg += entity_key(cfs_rq, curr) * weight;
680 return cfs_rq->min_vruntime + avg;
709 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
713 se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
733 static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
735 struct sched_entity *curr = cfs_rq->curr;
736 s64 avg = cfs_rq->avg_vruntime;
737 long load = cfs_rq->avg_load;
742 avg += entity_key(cfs_rq, curr) * weight;
746 return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
749 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
751 return vruntime_eligible(cfs_rq, se->vruntime);
754 static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
756 u64 min_vruntime = cfs_rq->min_vruntime;
762 avg_vruntime_update(cfs_rq, delta);
768 static void update_min_vruntime(struct cfs_rq *cfs_rq)
770 struct sched_entity *se = __pick_root_entity(cfs_rq);
771 struct sched_entity *curr = cfs_rq->curr;
772 u64 vruntime = cfs_rq->min_vruntime;
789 u64_u32_store(cfs_rq->min_vruntime,
790 __update_min_vruntime(cfs_rq, vruntime));
830 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
832 avg_vruntime_add(cfs_rq, se);
834 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
838 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
840 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
842 avg_vruntime_sub(cfs_rq, se);
845 struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
847 struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node;
855 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
857 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
884 static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
886 struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
887 struct sched_entity *se = __pick_first_entity(cfs_rq);
888 struct sched_entity *curr = cfs_rq->curr;
893 * in this cfs_rq, saving some cycles.
895 if (cfs_rq->nr_running == 1)
898 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
909 if (se && entity_eligible(cfs_rq, se)) {
922 if (left && vruntime_eligible(cfs_rq,
935 if (entity_eligible(cfs_rq, se)) {
950 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
952 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
978 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
984 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
1004 if (cfs_rq->nr_running > 1) {
1005 resched_curr(rq_of(cfs_rq));
1006 clear_buddies(cfs_rq, se);
1033 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
1038 * based on the cfs_rq's current util_avg:
1040 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
1049 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
1057 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
1065 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1067 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
1068 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
1074 update_cfs_rq_load_avg(now, cfs_rq);
1075 attach_entity_load_avg(cfs_rq, se);
1081 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
1086 if (cfs_rq->avg.util_avg != 0) {
1087 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1088 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
1107 static void update_tg_load_avg(struct cfs_rq *cfs_rq)
1162 static void update_curr(struct cfs_rq *cfs_rq)
1164 struct sched_entity *curr = cfs_rq->curr;
1170 delta_exec = update_curr_se(rq_of(cfs_rq), curr);
1175 update_deadline(cfs_rq, curr);
1176 update_min_vruntime(cfs_rq);
1181 account_cfs_rq_runtime(cfs_rq, delta_exec);
1190 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1203 __update_stats_wait_start(rq_of(cfs_rq), p, stats);
1207 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1229 __update_stats_wait_end(rq_of(cfs_rq), p, stats);
1233 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
1246 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats);
1253 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1262 if (se != cfs_rq->curr)
1263 update_stats_wait_start_fair(cfs_rq, se);
1266 update_stats_enqueue_sleeper_fair(cfs_rq, se);
1270 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1280 if (se != cfs_rq->curr)
1281 update_stats_wait_end_fair(cfs_rq, se);
1291 rq_clock(rq_of(cfs_rq)));
1294 rq_clock(rq_of(cfs_rq)));
1302 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1307 se->exec_start = rq_clock_task(rq_of(cfs_rq));
3582 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3584 update_load_add(&cfs_rq->load, se->load.weight);
3587 struct rq *rq = rq_of(cfs_rq);
3593 cfs_rq->nr_running++;
3595 cfs_rq->idle_nr_running++;
3599 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
3601 update_load_sub(&cfs_rq->load, se->load.weight);
3604 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
3608 cfs_rq->nr_running--;
3610 cfs_rq->idle_nr_running--;
3663 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3665 cfs_rq->avg.load_avg += se->avg.load_avg;
3666 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
3670 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3672 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3673 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
3675 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
3676 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
3680 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3682 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
3730 * So the cfs_rq contains only one entity, hence vruntime of
3731 * the entity @v should always equal to the cfs_rq's weighted
3754 * If the entity is the only one in the cfs_rq, then reweight
3791 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
3794 bool curr = cfs_rq->curr == se;
3799 update_curr(cfs_rq);
3800 avruntime = avg_vruntime(cfs_rq);
3802 __dequeue_entity(cfs_rq, se);
3803 update_load_sub(&cfs_rq->load, se->load.weight);
3805 dequeue_load_avg(cfs_rq, se);
3827 enqueue_load_avg(cfs_rq, se);
3829 update_load_add(&cfs_rq->load, se->load.weight);
3831 __enqueue_entity(cfs_rq, se);
3840 update_min_vruntime(cfs_rq);
3847 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3851 reweight_entity(cfs_rq, se, weight);
3855 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3932 static long calc_group_shares(struct cfs_rq *cfs_rq)
3935 struct task_group *tg = cfs_rq->tg;
3939 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3944 tg_weight -= cfs_rq->tg_load_avg_contrib;
3973 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3997 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
3999 struct rq *rq = rq_of(cfs_rq);
4001 if (&rq->cfs == cfs_rq) {
4044 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
4046 return u64_u32_load_copy(cfs_rq->avg.last_update_time,
4047 cfs_rq->last_update_time_copy);
4051 * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
4052 * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
4053 * bottom-up, we only have to test whether the cfs_rq before us on the list
4055 * If cfs_rq is not on the list, test whether a child needs its to be added to
4058 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
4060 struct cfs_rq *prev_cfs_rq;
4063 if (cfs_rq->on_list) {
4064 prev = cfs_rq->leaf_cfs_rq_list.prev;
4066 struct rq *rq = rq_of(cfs_rq);
4071 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
4073 return (prev_cfs_rq->tg->parent == cfs_rq->tg);
4076 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
4078 if (cfs_rq->load.weight)
4081 if (!load_avg_is_decayed(&cfs_rq->avg))
4084 if (child_cfs_rq_on_list(cfs_rq))
4092 * @cfs_rq: the cfs_rq whose avg changed
4094 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
4098 * In order to avoid having to look at the other cfs_rq's, we use a
4104 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
4112 if (cfs_rq->tg == &root_task_group)
4116 if (!cpu_active(cpu_of(rq_of(cfs_rq))))
4123 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
4124 if (now - cfs_rq->last_update_tg_load_avg < NSEC_PER_MSEC)
4127 delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
4128 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
4129 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4130 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
4131 cfs_rq->last_update_tg_load_avg = now;
4135 static inline void clear_tg_load_avg(struct cfs_rq *cfs_rq)
4143 if (cfs_rq->tg == &root_task_group)
4146 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq)));
4147 delta = 0 - cfs_rq->tg_load_avg_contrib;
4148 atomic_long_add(delta, &cfs_rq->tg->load_avg);
4149 cfs_rq->tg_load_avg_contrib = 0;
4150 cfs_rq->last_update_tg_load_avg = now;
4169 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4171 clear_tg_load_avg(cfs_rq);
4184 struct cfs_rq *prev, struct cfs_rq *next)
4194 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
4277 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4287 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4290 divider = get_pelt_divider(&cfs_rq->avg);
4299 /* Update parent cfs_rq utilization */
4300 add_positive(&cfs_rq->avg.util_avg, delta_avg);
4301 add_positive(&cfs_rq->avg.util_sum, delta_sum);
4304 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4305 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4309 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4319 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4322 divider = get_pelt_divider(&cfs_rq->avg);
4330 /* Update parent cfs_rq runnable */
4331 add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
4332 add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
4334 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4335 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4339 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
4353 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4356 divider = get_pelt_divider(&cfs_rq->avg);
4399 add_positive(&cfs_rq->avg.load_avg, delta_avg);
4400 add_positive(&cfs_rq->avg.load_sum, delta_sum);
4402 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
4403 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
4406 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
4408 cfs_rq->propagate = 1;
4409 cfs_rq->prop_runnable_sum += runnable_sum;
4412 /* Update task and its cfs_rq load average */
4415 struct cfs_rq *cfs_rq, *gcfs_rq;
4426 cfs_rq = cfs_rq_of(se);
4428 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
4430 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
4431 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
4432 update_tg_cfs_load(cfs_rq, se, gcfs_rq);
4434 trace_pelt_cfs_tp(cfs_rq);
4446 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
4472 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
4481 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
4489 struct cfs_rq *cfs_rq;
4496 cfs_rq = cfs_rq_of(se);
4497 rq = rq_of(cfs_rq);
4514 * last_update_time (the cfs_rq's last_update_time)
4519 * cfs_idle_lag (delta between rq's update and cfs_rq's update)
4533 * is cfs_rq->throttled_pelt_idle
4537 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
4550 lut = cfs_rq_last_update_time(cfs_rq);
4555 * cfs_rq->avg.last_update_time is more recent than our
4569 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
4571 * @cfs_rq: cfs_rq to update
4573 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
4576 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
4580 * Since both these conditions indicate a changed cfs_rq->avg.load we should
4584 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4587 struct sched_avg *sa = &cfs_rq->avg;
4590 if (cfs_rq->removed.nr) {
4592 u32 divider = get_pelt_divider(&cfs_rq->avg);
4594 raw_spin_lock(&cfs_rq->removed.lock);
4595 swap(cfs_rq->removed.util_avg, removed_util);
4596 swap(cfs_rq->removed.load_avg, removed_load);
4597 swap(cfs_rq->removed.runnable_avg, removed_runnable);
4598 cfs_rq->removed.nr = 0;
4599 raw_spin_unlock(&cfs_rq->removed.lock);
4634 add_tg_cfs_propagate(cfs_rq,
4640 decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
4642 cfs_rq->last_update_time_copy,
4648 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
4649 * @cfs_rq: cfs_rq to attach to
4653 * cfs_rq->avg.last_update_time being current.
4655 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4658 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
4661 u32 divider = get_pelt_divider(&cfs_rq->avg);
4664 * When we attach the @se to the @cfs_rq, we must align the decay
4670 se->avg.last_update_time = cfs_rq->avg.last_update_time;
4671 se->avg.period_contrib = cfs_rq->avg.period_contrib;
4689 enqueue_load_avg(cfs_rq, se);
4690 cfs_rq->avg.util_avg += se->avg.util_avg;
4691 cfs_rq->avg.util_sum += se->avg.util_sum;
4692 cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
4693 cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
4695 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
4697 cfs_rq_util_change(cfs_rq, 0);
4699 trace_pelt_cfs_tp(cfs_rq);
4703 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
4704 * @cfs_rq: cfs_rq to detach from
4708 * cfs_rq->avg.last_update_time being current.
4710 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
4712 dequeue_load_avg(cfs_rq, se);
4713 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4714 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4716 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
4717 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
4719 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
4720 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
4722 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
4723 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
4725 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
4727 cfs_rq_util_change(cfs_rq, 0);
4729 trace_pelt_cfs_tp(cfs_rq);
4740 /* Update task and its cfs_rq load average */
4741 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
4743 u64 now = cfs_rq_clock_pelt(cfs_rq);
4751 __update_load_avg_se(now, cfs_rq, se);
4753 decayed = update_cfs_rq_load_avg(now, cfs_rq);
4765 attach_entity_load_avg(cfs_rq, se);
4766 update_tg_load_avg(cfs_rq);
4773 detach_entity_load_avg(cfs_rq, se);
4774 update_tg_load_avg(cfs_rq);
4776 cfs_rq_util_change(cfs_rq, 0);
4779 update_tg_load_avg(cfs_rq);
4789 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4792 last_update_time = cfs_rq_last_update_time(cfs_rq);
4797 * Task first catches up with cfs_rq, and then subtract
4798 * itself from the cfs_rq (task must be off the queue now).
4802 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4807 * enqueue_task_fair() which will have added things to the cfs_rq,
4813 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
4814 ++cfs_rq->removed.nr;
4815 cfs_rq->removed.util_avg += se->avg.util_avg;
4816 cfs_rq->removed.load_avg += se->avg.load_avg;
4817 cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
4818 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
4821 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
4823 return cfs_rq->avg.runnable_avg;
4826 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
4828 return cfs_rq->avg.load_avg;
4853 static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
4861 /* Update root cfs_rq's estimated utilization */
4862 enqueued = cfs_rq->avg.util_est;
4864 WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
4866 trace_sched_util_est_cfs_tp(cfs_rq);
4869 static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
4877 /* Update root cfs_rq's estimated utilization */
4878 enqueued = cfs_rq->avg.util_est;
4880 WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
4882 trace_sched_util_est_cfs_tp(cfs_rq);
4887 static inline void util_est_update(struct cfs_rq *cfs_rq,
4937 if (dequeued > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))))
5129 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
5131 return !cfs_rq->nr_running;
5139 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
5141 cfs_rq_util_change(cfs_rq, 0);
5147 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5149 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
5157 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
5160 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
5163 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
5170 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5172 u64 vslice, vruntime = avg_vruntime(cfs_rq);
5186 if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
5187 struct sched_entity *curr = cfs_rq->curr;
5244 load = cfs_rq->avg_load;
5270 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
5271 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
5276 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5278 bool curr = cfs_rq->curr == se;
5285 place_entity(cfs_rq, se, flags);
5287 update_curr(cfs_rq);
5291 * - Update loads to have both entity and cfs_rq synced with now.
5293 * h_nr_running of its group cfs_rq.
5295 * its group cfs_rq
5296 * - Add its new weight to cfs_rq->load.weight
5298 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
5312 place_entity(cfs_rq, se, flags);
5314 account_entity_enqueue(cfs_rq, se);
5321 update_stats_enqueue_fair(cfs_rq, se, flags);
5323 __enqueue_entity(cfs_rq, se);
5326 if (cfs_rq->nr_running == 1) {
5327 check_enqueue_throttle(cfs_rq);
5328 if (!throttled_hierarchy(cfs_rq)) {
5329 list_add_leaf_cfs_rq(cfs_rq);
5332 struct rq *rq = rq_of(cfs_rq);
5334 if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
5335 cfs_rq->throttled_clock = rq_clock(rq);
5336 if (!cfs_rq->throttled_clock_self)
5337 cfs_rq->throttled_clock_self = rq_clock(rq);
5346 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5347 if (cfs_rq->next != se)
5350 cfs_rq->next = NULL;
5354 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5356 if (cfs_rq->next == se)
5360 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5363 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5373 update_curr(cfs_rq);
5377 * - Update loads to have both entity and cfs_rq synced with now.
5379 * h_nr_running of its group cfs_rq.
5380 * - Subtract its previous weight from cfs_rq->load.weight.
5382 * of its group cfs_rq.
5384 update_load_avg(cfs_rq, se, action);
5387 update_stats_dequeue_fair(cfs_rq, se, flags);
5389 clear_buddies(cfs_rq, se);
5391 update_entity_lag(cfs_rq, se);
5392 if (se != cfs_rq->curr)
5393 __dequeue_entity(cfs_rq, se);
5395 account_entity_dequeue(cfs_rq, se);
5398 return_cfs_rq_runtime(cfs_rq);
5409 update_min_vruntime(cfs_rq);
5411 if (cfs_rq->nr_running == 0)
5412 update_idle_cfs_rq_clock_pelt(cfs_rq);
5416 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
5418 clear_buddies(cfs_rq, se);
5427 update_stats_wait_end_fair(cfs_rq, se);
5428 __dequeue_entity(cfs_rq, se);
5429 update_load_avg(cfs_rq, se, UPDATE_TG);
5437 update_stats_curr_start(cfs_rq, se);
5438 cfs_rq->curr = se;
5446 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
5466 pick_next_entity(struct cfs_rq *cfs_rq)
5472 cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
5473 return cfs_rq->next;
5475 return pick_eevdf(cfs_rq);
5478 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5480 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
5487 update_curr(cfs_rq);
5490 check_cfs_rq_runtime(cfs_rq);
5493 update_stats_wait_start_fair(cfs_rq, prev);
5495 __enqueue_entity(cfs_rq, prev);
5497 update_load_avg(cfs_rq, prev, 0);
5499 cfs_rq->curr = NULL;
5503 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
5508 update_curr(cfs_rq);
5513 update_load_avg(cfs_rq, curr, UPDATE_TG);
5522 resched_curr(rq_of(cfs_rq));
5529 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
5614 struct cfs_rq *cfs_rq, u64 target_runtime)
5621 min_amount = target_runtime - cfs_rq->runtime_remaining;
5635 cfs_rq->runtime_remaining += amount;
5637 return cfs_rq->runtime_remaining > 0;
5641 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
5643 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5647 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
5653 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5656 cfs_rq->runtime_remaining -= delta_exec;
5658 if (likely(cfs_rq->runtime_remaining > 0))
5661 if (cfs_rq->throttled)
5667 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
5668 resched_curr(rq_of(cfs_rq));
5672 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
5674 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
5677 __account_cfs_rq_runtime(cfs_rq, delta_exec);
5680 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5682 return cfs_bandwidth_used() && cfs_rq->throttled;
5685 /* check whether cfs_rq, or any parent, is throttled */
5686 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5688 return cfs_bandwidth_used() && cfs_rq->throttle_count;
5699 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
5701 src_cfs_rq = tg->cfs_rq[src_cpu];
5702 dest_cfs_rq = tg->cfs_rq[dest_cpu];
5711 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5713 cfs_rq->throttle_count--;
5714 if (!cfs_rq->throttle_count) {
5715 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
5716 cfs_rq->throttled_clock_pelt;
5718 /* Add cfs_rq with load or one or more already running entities to the list */
5719 if (!cfs_rq_is_decayed(cfs_rq))
5720 list_add_leaf_cfs_rq(cfs_rq);
5722 if (cfs_rq->throttled_clock_self) {
5723 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
5725 cfs_rq->throttled_clock_self = 0;
5730 cfs_rq->throttled_clock_self_time += delta;
5740 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
5743 if (!cfs_rq->throttle_count) {
5744 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
5745 list_del_leaf_cfs_rq(cfs_rq);
5747 SCHED_WARN_ON(cfs_rq->throttled_clock_self);
5748 if (cfs_rq->nr_running)
5749 cfs_rq->throttled_clock_self = rq_clock(rq);
5751 cfs_rq->throttle_count++;
5756 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5758 struct rq *rq = rq_of(cfs_rq);
5759 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5765 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
5776 list_add_tail_rcu(&cfs_rq->throttled_list,
5784 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
5788 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
5791 task_delta = cfs_rq->h_nr_running;
5792 idle_task_delta = cfs_rq->idle_h_nr_running;
5794 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5802 idle_task_delta = cfs_rq->h_nr_running;
5815 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5824 idle_task_delta = cfs_rq->h_nr_running;
5838 cfs_rq->throttled = 1;
5839 SCHED_WARN_ON(cfs_rq->throttled_clock);
5840 if (cfs_rq->nr_running)
5841 cfs_rq->throttled_clock = rq_clock(rq);
5845 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
5847 struct rq *rq = rq_of(cfs_rq);
5848 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5852 se = cfs_rq->tg->se[cpu_of(rq)];
5854 cfs_rq->throttled = 0;
5859 if (cfs_rq->throttled_clock) {
5860 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5861 cfs_rq->throttled_clock = 0;
5863 list_del_rcu(&cfs_rq->throttled_list);
5867 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
5869 if (!cfs_rq->load.weight) {
5870 if (!cfs_rq->on_list)
5883 task_delta = cfs_rq->h_nr_running;
5884 idle_task_delta = cfs_rq->idle_h_nr_running;
5886 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5893 idle_task_delta = cfs_rq->h_nr_running;
5898 /* end evaluation on encountering a throttled cfs_rq */
5904 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
5910 idle_task_delta = cfs_rq->h_nr_running;
5915 /* end evaluation on encountering a throttled cfs_rq */
5934 struct cfs_rq *cursor, *tmp;
5971 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5973 struct rq *rq = rq_of(cfs_rq);
5977 unthrottle_cfs_rq(cfs_rq);
5982 if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list)))
5986 list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
5991 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5993 unthrottle_cfs_rq(cfs_rq);
5997 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
5999 lockdep_assert_rq_held(rq_of(cfs_rq));
6001 if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) ||
6002 cfs_rq->runtime_remaining <= 0))
6005 __unthrottle_cfs_rq_async(cfs_rq);
6013 struct cfs_rq *cfs_rq, *tmp;
6019 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
6021 rq = rq_of(cfs_rq);
6029 if (!cfs_rq_throttled(cfs_rq))
6033 if (!list_empty(&cfs_rq->throttled_csd_list))
6037 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
6040 runtime = -cfs_rq->runtime_remaining + 1;
6047 cfs_rq->runtime_remaining += runtime;
6050 if (cfs_rq->runtime_remaining > 0) {
6052 unthrottle_cfs_rq_async(cfs_rq);
6056 * a single cfs_rq locally.
6059 list_add_tail(&cfs_rq->throttled_csd_list,
6070 list_for_each_entry_safe(cfs_rq, tmp, &local_unthrottle,
6072 struct rq *rq = rq_of(cfs_rq);
6076 list_del_init(&cfs_rq->throttled_csd_list);
6078 if (cfs_rq_throttled(cfs_rq))
6079 unthrottle_cfs_rq(cfs_rq);
6150 /* a cfs_rq won't donate quota below this amount */
6200 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6202 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6203 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
6220 cfs_rq->runtime_remaining -= slack_runtime;
6223 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6228 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
6231 __return_cfs_rq_runtime(cfs_rq);
6268 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
6274 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
6278 if (cfs_rq_throttled(cfs_rq))
6282 account_cfs_rq_runtime(cfs_rq, 0);
6283 if (cfs_rq->runtime_remaining <= 0)
6284 throttle_cfs_rq(cfs_rq);
6289 struct cfs_rq *pcfs_rq, *cfs_rq;
6297 cfs_rq = tg->cfs_rq[cpu];
6298 pcfs_rq = tg->parent->cfs_rq[cpu];
6300 cfs_rq->throttle_count = pcfs_rq->throttle_count;
6301 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
6304 /* conditionally throttle active cfs_rq's from put_prev_entity() */
6305 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6310 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
6317 if (cfs_rq_throttled(cfs_rq))
6320 return throttle_cfs_rq(cfs_rq);
6411 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
6413 cfs_rq->runtime_enabled = 0;
6414 INIT_LIST_HEAD(&cfs_rq->throttled_list);
6415 INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
6442 * It is possible that we still have some cfs_rq's pending on a CSD
6445 * exist throttled cfs_rq(s), and the period_timer must have queued the
6448 * guaranteed at this point that no additional cfs_rq of this group can
6483 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6486 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
6508 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
6510 if (!cfs_rq->runtime_enabled)
6517 cfs_rq->runtime_remaining = 1;
6522 cfs_rq->runtime_enabled = 0;
6524 if (cfs_rq_throttled(cfs_rq))
6525 unthrottle_cfs_rq(cfs_rq);
6534 struct cfs_rq *cfs_rq = task_cfs_rq(p);
6539 if (cfs_rq->runtime_enabled ||
6540 tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF)
6579 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
6580 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
6581 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
6583 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
6585 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
6590 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
6603 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
6718 struct cfs_rq *cfs_rq;
6725 * the cfs_rq utilization to select a frequency.
6726 * Let's add the task's estimated utilization to the cfs_rq's
6742 cfs_rq = cfs_rq_of(se);
6743 enqueue_entity(cfs_rq, se, flags);
6745 cfs_rq->h_nr_running++;
6746 cfs_rq->idle_h_nr_running += idle_h_nr_running;
6748 if (cfs_rq_is_idle(cfs_rq))
6751 /* end evaluation on encountering a throttled cfs_rq */
6752 if (cfs_rq_throttled(cfs_rq))
6759 cfs_rq = cfs_rq_of(se);
6761 update_load_avg(cfs_rq, se, UPDATE_TG);
6765 cfs_rq->h_nr_running++;
6766 cfs_rq->idle_h_nr_running += idle_h_nr_running;
6768 if (cfs_rq_is_idle(cfs_rq))
6771 /* end evaluation on encountering a throttled cfs_rq */
6772 if (cfs_rq_throttled(cfs_rq))
6811 struct cfs_rq *cfs_rq;
6820 cfs_rq = cfs_rq_of(se);
6821 dequeue_entity(cfs_rq, se, flags);
6823 cfs_rq->h_nr_running--;
6824 cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6826 if (cfs_rq_is_idle(cfs_rq))
6829 /* end evaluation on encountering a throttled cfs_rq */
6830 if (cfs_rq_throttled(cfs_rq))
6834 if (cfs_rq->load.weight) {
6838 * Bias pick_next to pick a task from this cfs_rq, as
6841 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
6849 cfs_rq = cfs_rq_of(se);
6851 update_load_avg(cfs_rq, se, UPDATE_TG);
6855 cfs_rq->h_nr_running--;
6856 cfs_rq->idle_h_nr_running -= idle_h_nr_running;
6858 if (cfs_rq_is_idle(cfs_rq))
6861 /* end evaluation on encountering a throttled cfs_rq */
6862 if (cfs_rq_throttled(cfs_rq))
6919 struct cfs_rq *cfs_rq;
6926 cfs_rq = &rq->cfs;
6927 load = READ_ONCE(cfs_rq->avg.load_avg);
6942 struct cfs_rq *cfs_rq;
6949 cfs_rq = &rq->cfs;
6950 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
7680 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
7681 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
7685 runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
7703 util_est = READ_ONCE(cfs_rq->avg.util_est);
8244 * Estimate the missing time from the cfs_rq last_update_time
8290 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8349 cfs_rq = cfs_rq_of(se);
8350 update_curr(cfs_rq);
8353 * XXX pick_eevdf(cfs_rq) != se ?
8355 if (pick_eevdf(cfs_rq) == pse)
8368 struct cfs_rq *cfs_rq;
8371 cfs_rq = &rq->cfs;
8372 if (!cfs_rq->nr_running)
8376 struct sched_entity *curr = cfs_rq->curr;
8381 update_curr(cfs_rq);
8385 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
8389 se = pick_next_entity(cfs_rq);
8390 cfs_rq = group_cfs_rq(se);
8391 } while (cfs_rq);
8400 struct cfs_rq *cfs_rq = &rq->cfs;
8422 struct sched_entity *curr = cfs_rq->curr;
8426 * have to consider cfs_rq->curr. If it is still a runnable
8432 update_curr(cfs_rq);
8442 if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
8443 cfs_rq = &rq->cfs;
8445 if (!cfs_rq->nr_running)
8452 se = pick_next_entity(cfs_rq);
8453 cfs_rq = group_cfs_rq(se);
8454 } while (cfs_rq);
8466 while (!(cfs_rq = is_same_group(se, pse))) {
8480 put_prev_entity(cfs_rq, pse);
8481 set_next_entity(cfs_rq, se);
8491 se = pick_next_entity(cfs_rq);
8492 set_next_entity(cfs_rq, se);
8493 cfs_rq = group_cfs_rq(se);
8494 } while (cfs_rq);
8553 struct cfs_rq *cfs_rq;
8556 cfs_rq = cfs_rq_of(se);
8557 put_prev_entity(cfs_rq, se);
8567 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8576 clear_buddies(cfs_rq, se);
8582 update_curr(cfs_rq);
9245 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
9247 if (cfs_rq->avg.load_avg)
9250 if (cfs_rq->avg.util_avg)
9284 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
9320 struct cfs_rq *cfs_rq, *pos;
9328 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
9331 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
9332 update_tg_load_avg(cfs_rq);
9334 if (cfs_rq->nr_running == 0)
9335 update_idle_cfs_rq_clock_pelt(cfs_rq);
9337 if (cfs_rq == &rq->cfs)
9342 se = cfs_rq->tg->se[cpu];
9350 if (cfs_rq_is_decayed(cfs_rq))
9351 list_del_leaf_cfs_rq(cfs_rq);
9354 if (cfs_rq_has_blocked(cfs_rq))
9362 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9366 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9368 struct rq *rq = rq_of(cfs_rq);
9369 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
9373 if (cfs_rq->last_h_load_update == now)
9376 WRITE_ONCE(cfs_rq->h_load_next, NULL);
9378 cfs_rq = cfs_rq_of(se);
9379 WRITE_ONCE(cfs_rq->h_load_next, se);
9380 if (cfs_rq->last_h_load_update == now)
9385 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
9386 cfs_rq->last_h_load_update = now;
9389 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
9390 load = cfs_rq->h_load;
9392 cfs_rq_load_avg(cfs_rq) + 1);
9393 cfs_rq = group_cfs_rq(se);
9394 cfs_rq->h_load = load;
9395 cfs_rq->last_h_load_update = now;
9401 struct cfs_rq *cfs_rq = task_cfs_rq(p);
9403 update_cfs_rq_h_load(cfs_rq);
9404 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
9405 cfs_rq_load_avg(cfs_rq) + 1);
9410 struct cfs_rq *cfs_rq = &rq->cfs;
9413 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
9414 if (cfs_rq_has_blocked(cfs_rq))
12511 * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
12517 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12520 if (cfs_rq->forceidle_seq == fi_seq)
12522 cfs_rq->forceidle_seq = fi_seq;
12525 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
12545 struct cfs_rq *cfs_rqa;
12546 struct cfs_rq *cfs_rqb;
12556 while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
12569 cfs_rqa = sea->cfs_rq;
12570 cfs_rqb = seb->cfs_rq;
12577 * Find delta after normalizing se's vruntime with its cfs_rq's
12589 struct cfs_rq *cfs_rq;
12592 cfs_rq = task_group(p)->cfs_rq[cpu];
12594 cfs_rq = &cpu_rq(cpu)->cfs;
12596 return throttled_hierarchy(cfs_rq);
12612 struct cfs_rq *cfs_rq;
12616 cfs_rq = cfs_rq_of(se);
12617 entity_tick(cfs_rq, se, queued);
12637 struct cfs_rq *cfs_rq;
12644 cfs_rq = task_cfs_rq(current);
12645 curr = cfs_rq->curr;
12647 update_curr(cfs_rq);
12648 place_entity(cfs_rq, se, ENQUEUE_INITIAL);
12684 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12686 if (cfs_rq_throttled(cfs_rq))
12689 if (!throttled_hierarchy(cfs_rq))
12690 list_add_leaf_cfs_rq(cfs_rq);
12696 cfs_rq = cfs_rq_of(se);
12698 update_load_avg(cfs_rq, se, UPDATE_TG);
12700 if (cfs_rq_throttled(cfs_rq))
12703 if (!throttled_hierarchy(cfs_rq))
12704 list_add_leaf_cfs_rq(cfs_rq);
12713 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12726 /* Catch up with the cfs_rq and remove our load when we leave */
12727 update_load_avg(cfs_rq, se, 0);
12728 detach_entity_load_avg(cfs_rq, se);
12729 update_tg_load_avg(cfs_rq);
12735 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12737 /* Synchronize entity with its cfs_rq */
12738 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
12739 attach_entity_load_avg(cfs_rq, se);
12740 update_tg_load_avg(cfs_rq);
12782 * This routine is mostly called to set cfs_rq->curr field when a task
12800 struct cfs_rq *cfs_rq = cfs_rq_of(se);
12802 set_next_entity(cfs_rq, se);
12803 /* ensure bandwidth has been allocated on our new cfs_rq */
12804 account_cfs_rq_runtime(cfs_rq, 0);
12808 void init_cfs_rq(struct cfs_rq *cfs_rq)
12810 cfs_rq->tasks_timeline = RB_ROOT_CACHED;
12811 u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
12813 raw_spin_lock_init(&cfs_rq->removed.lock);
12830 /* Tell se's cfs_rq has been changed -- migrated */
12842 if (tg->cfs_rq)
12843 kfree(tg->cfs_rq[i]);
12848 kfree(tg->cfs_rq);
12855 struct cfs_rq *cfs_rq;
12858 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
12859 if (!tg->cfs_rq)
12870 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
12872 if (!cfs_rq)
12880 init_cfs_rq(cfs_rq);
12881 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
12888 kfree(cfs_rq);
12927 if (!tg->cfs_rq[cpu]->on_list)
12933 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
12938 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
12944 cfs_rq->tg = tg;
12945 cfs_rq->rq = rq;
12946 init_cfs_rq_runtime(cfs_rq);
12948 tg->cfs_rq[cpu] = cfs_rq;
12956 se->cfs_rq = &rq->cfs;
12959 se->cfs_rq = parent->my_q;
12963 se->my_q = cfs_rq;
13043 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
13068 struct cfs_rq *cfs_rq = cfs_rq_of(se);
13073 cfs_rq->idle_h_nr_running += idle_task_delta;
13076 if (cfs_rq_is_idle(cfs_rq))
13168 struct cfs_rq *cfs_rq, *pos;
13171 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
13172 print_cfs_rq(m, cpu, cfs_rq);