• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching defs:sds

2105  * @sds: Variable containing the statistics for sd.
2109 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2116 sds->power_savings_balance = 0;
2118 sds->power_savings_balance = 1;
2119 sds->min_nr_running = ULONG_MAX;
2120 sds->leader_nr_running = 0;
2129 * @sds: Variable containing the statistics of the sched_domain
2135 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2138 if (!sds->power_savings_balance)
2145 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2146 !sds->this_nr_running))
2147 sds->power_savings_balance = 0;
2153 if (!sds->power_savings_balance ||
2163 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2164 (sgs->sum_nr_running == sds->min_nr_running &&
2165 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2166 sds->group_min = group;
2167 sds->min_nr_running = sgs->sum_nr_running;
2168 sds->min_load_per_task = sgs->sum_weighted_load /
2180 if (sgs->sum_nr_running > sds->leader_nr_running ||
2181 (sgs->sum_nr_running == sds->leader_nr_running &&
2182 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2183 sds->group_leader = group;
2184 sds->leader_nr_running = sgs->sum_nr_running;
2190 * @sds: Variable containing the statistics of the sched_domain
2203 static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2206 if (!sds->power_savings_balance)
2209 if (sds->this != sds->group_leader ||
2210 sds->group_leader == sds->group_min)
2213 *imbalance = sds->min_load_per_task;
2214 sds->busiest = sds->group_min;
2221 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2227 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2232 static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2462 * @sds: sched_domain statistics
2471 struct sd_lb_stats *sds,
2476 if (sgs->avg_load <= sds->max_load)
2492 if (!sds->busiest)
2495 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2510 * @sds: variable to hold the statistics for this sched_domain.
2515 struct sd_lb_stats *sds)
2525 init_sd_power_savings_stats(sd, sds, idle);
2539 sds->total_load += sgs.group_load;
2540 sds->total_pwr += sg->cpu_power;
2551 sds->this_load = sgs.avg_load;
2552 sds->this = sg;
2553 sds->this_nr_running = sgs.sum_nr_running;
2554 sds->this_load_per_task = sgs.sum_weighted_load;
2555 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
2556 sds->max_load = sgs.avg_load;
2557 sds->busiest = sg;
2558 sds->busiest_nr_running = sgs.sum_nr_running;
2559 sds->busiest_group_capacity = sgs.group_capacity;
2560 sds->busiest_load_per_task = sgs.sum_weighted_load;
2561 sds->group_imb = sgs.group_imb;
2564 update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2595 * @sds: Statistics of the sched_domain which is to be packed
2600 struct sd_lb_stats *sds,
2608 if (!sds->busiest)
2611 busiest_cpu = group_first_cpu(sds->busiest);
2615 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
2624 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2628 static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2635 if (sds->this_nr_running) {
2636 sds->this_load_per_task /= sds->this_nr_running;
2637 if (sds->busiest_load_per_task >
2638 sds->this_load_per_task)
2641 sds->this_load_per_task =
2644 scaled_busy_load_per_task = sds->busiest_load_per_task
2646 scaled_busy_load_per_task /= sds->busiest->cpu_power;
2648 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2650 *imbalance = sds->busiest_load_per_task;
2660 pwr_now += sds->busiest->cpu_power *
2661 min(sds->busiest_load_per_task, sds->max_load);
2662 pwr_now += sds->this->cpu_power *
2663 min(sds->this_load_per_task, sds->this_load);
2667 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2668 sds->busiest->cpu_power;
2669 if (sds->max_load > tmp)
2670 pwr_move += sds->busiest->cpu_power *
2671 min(sds->busiest_load_per_task, sds->max_load - tmp);
2674 if (sds->max_load * sds->busiest->cpu_power <
2675 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
2676 tmp = (sds->max_load * sds->busiest->cpu_power) /
2677 sds->this->cpu_power;
2679 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2680 sds->this->cpu_power;
2681 pwr_move += sds->this->cpu_power *
2682 min(sds->this_load_per_task, sds->this_load + tmp);
2687 *imbalance = sds->busiest_load_per_task;
2693 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
2697 static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
2702 sds->busiest_load_per_task /= sds->busiest_nr_running;
2703 if (sds->group_imb) {
2704 sds->busiest_load_per_task =
2705 min(sds->busiest_load_per_task, sds->avg_load);
2713 if (sds->max_load < sds->avg_load) {
2715 return fix_small_imbalance(sds, this_cpu, imbalance);
2718 if (!sds->group_imb) {
2722 load_above_capacity = (sds->busiest_nr_running -
2723 sds->busiest_group_capacity);
2727 load_above_capacity /= sds->busiest->cpu_power;
2740 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
2743 *imbalance = min(max_pull * sds->busiest->cpu_power,
2744 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
2753 if (*imbalance < sds->busiest_load_per_task)
2754 return fix_small_imbalance(sds, this_cpu, imbalance);
2789 struct sd_lb_stats sds;
2791 memset(&sds, 0, sizeof(sds));
2798 balance, &sds);
2813 check_asym_packing(sd, &sds, this_cpu, imbalance))
2814 return sds.busiest;
2816 if (!sds.busiest || sds.busiest_nr_running == 0)
2819 if (sds.this_load >= sds.max_load)
2822 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
2824 if (sds.this_load >= sds.avg_load)
2827 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
2831 calculate_imbalance(&sds, this_cpu, imbalance);
2832 return sds.busiest;
2839 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
2840 return sds.busiest;