Lines Matching refs:sgs

9679 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9681 if (sgs->sum_nr_running < sgs->group_weight)
9684 if ((sgs->group_capacity * imbalance_pct) <
9685 (sgs->group_runnable * 100))
9688 if ((sgs->group_capacity * 100) >
9689 (sgs->group_util * imbalance_pct))
9704 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
9706 if (sgs->sum_nr_running <= sgs->group_weight)
9709 if ((sgs->group_capacity * 100) <
9710 (sgs->group_util * imbalance_pct))
9713 if ((sgs->group_capacity * imbalance_pct) <
9714 (sgs->group_runnable * 100))
9723 struct sg_lb_stats *sgs)
9725 if (group_is_overloaded(imbalance_pct, sgs))
9731 if (sgs->group_asym_packing)
9734 if (sgs->group_smt_balance)
9737 if (sgs->group_misfit_task_load)
9740 if (!group_has_capacity(imbalance_pct, sgs))
9781 * @sgs: Load-balancing statistics of the candidate busiest group
9791 sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
9798 (sgs->group_weight - sgs->idle_cpus != 1))
9815 static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
9828 sgs->sum_h_nr_running > 1)
9887 * @sgs: variable to hold the statistics for this group.
9893 struct sg_lb_stats *sgs,
9898 memset(sgs, 0, sizeof(*sgs));
9906 sgs->group_load += load;
9907 sgs->group_util += cpu_util_cfs(i);
9908 sgs->group_runnable += cpu_runnable(rq);
9909 sgs->sum_h_nr_running += rq->cfs.h_nr_running;
9912 sgs->sum_nr_running += nr_running;
9921 sgs->nr_numa_running += rq->nr_numa_running;
9922 sgs->nr_preferred_running += rq->nr_preferred_running;
9928 sgs->idle_cpus++;
9938 if (sgs->group_misfit_task_load < rq->misfit_task_load) {
9939 sgs->group_misfit_task_load = rq->misfit_task_load;
9945 if (sgs->group_misfit_task_load < load)
9946 sgs->group_misfit_task_load = load;
9950 sgs->group_capacity = group->sgc->capacity;
9952 sgs->group_weight = group->group_weight;
9955 if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
9956 sched_group_asym(env, sgs, group))
9957 sgs->group_asym_packing = 1;
9960 if (!local_group && smt_balance(env, sgs, group))
9961 sgs->group_smt_balance = 1;
9963 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
9966 if (sgs->group_type == group_overloaded)
9967 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
9968 sgs->group_capacity;
9976 * @sgs: sched_group statistics
9987 struct sg_lb_stats *sgs)
9992 if (!sgs->sum_h_nr_running)
10002 (sgs->group_type == group_misfit_task) &&
10007 if (sgs->group_type > busiest->group_type)
10010 if (sgs->group_type < busiest->group_type)
10018 switch (sgs->group_type) {
10021 return sgs->avg_load > busiest->avg_load;
10039 return sgs->group_misfit_task_load > busiest->group_misfit_task_load;
10046 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
10064 if (sgs->avg_load < busiest->avg_load)
10067 if (sgs->avg_load == busiest->avg_load) {
10085 if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
10099 if (sgs->idle_cpus > busiest->idle_cpus)
10101 else if ((sgs->idle_cpus == busiest->idle_cpus) &&
10102 (sgs->sum_nr_running <= busiest->sum_nr_running))
10115 (sgs->group_type <= group_fully_busy) &&
10123 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
10125 if (sgs->sum_h_nr_running > sgs->nr_numa_running)
10127 if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
10141 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
10201 * @sgs: variable to hold the statistics for this group.
10206 struct sg_lb_stats *sgs,
10211 memset(sgs, 0, sizeof(*sgs));
10215 sgs->group_misfit_task_load = 1;
10221 sgs->group_load += cpu_load_without(rq, p);
10222 sgs->group_util += cpu_util_without(i, p);
10223 sgs->group_runnable += cpu_runnable_without(rq, p);
10225 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
10228 sgs->sum_nr_running += nr_running;
10234 sgs->idle_cpus++;
10238 sgs->group_misfit_task_load &&
10240 sgs->group_misfit_task_load = 0;
10244 sgs->group_capacity = group->sgc->capacity;
10246 sgs->group_weight = group->group_weight;
10248 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
10254 if (sgs->group_type == group_fully_busy ||
10255 sgs->group_type == group_overloaded)
10256 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
10257 sgs->group_capacity;
10263 struct sg_lb_stats *sgs)
10265 if (sgs->group_type < idlest_sgs->group_type)
10268 if (sgs->group_type > idlest_sgs->group_type)
10276 switch (sgs->group_type) {
10280 if (idlest_sgs->avg_load <= sgs->avg_load)
10298 if (idlest_sgs->idle_cpus > sgs->idle_cpus)
10302 if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
10303 idlest_sgs->group_util <= sgs->group_util)
10323 struct sg_lb_stats *sgs;
10346 sgs = &local_sgs;
10349 sgs = &tmp_sgs;
10352 update_sg_wakeup_stats(sd, group, sgs, p);
10354 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
10356 idlest_sgs = *sgs;
10570 struct sg_lb_stats *sgs = &tmp_sgs;
10576 sgs = local;
10583 update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
10585 if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
10587 sds->busiest_stat = *sgs;
10591 sds->total_load += sgs->group_load;
10592 sds->total_capacity += sgs->group_capacity;
10594 sum_util += sgs->group_util;