Searched refs:busiest (Results 1 - 2 of 2) sorted by relevance
/linux-master/kernel/sched/ |
H A D | fair.c | 8736 * first so the group_type can simply be compared when selecting the busiest 9475 struct sched_group *busiest; /* Busiest group in this sd */ member in struct:sd_lb_stats 9482 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ 9492 * busiest_stat::idle_cpus to the worst busiest group because 9496 .busiest = NULL, 9651 * When this is so detected; this group becomes a candidate for busiest; see 9781 * @sgs: Load-balancing statistics of the candidate busiest group 9782 * @group: The candidate busiest group 9836 struct sg_lb_stats *busiest, 9842 if (env->idle == CPU_NOT_IDLE || !busiest 9834 sibling_imbalance(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *busiest, struct sg_lb_stats *local) argument 9989 struct sg_lb_stats *busiest = &sds->busiest_stat; local 10637 struct sg_lb_stats *local, *busiest; local 10839 struct sg_lb_stats *local, *busiest; local 10981 struct rq *busiest = NULL, *rq; local 11266 struct rq *busiest; local 11467 active_load_balance_cpu_stop, busiest, local [all...] |
H A D | sched.h | 2710 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2712 __acquires(busiest->lock) 2716 double_rq_lock(this_rq, busiest); 2729 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) 2731 __acquires(busiest->lock) 2734 if (__rq_lockp(this_rq) == __rq_lockp(busiest) || 2735 likely(raw_spin_rq_trylock(busiest))) { 2736 double_rq_clock_clear_update(this_rq, busiest); 2740 if (rq_order_less(this_rq, busiest)) { 2741 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTIN 2757 double_lock_balance(struct rq *this_rq, struct rq *busiest) argument 2768 raw_spin_rq_unlock(busiest); variable [all...] |
Completed in 553 milliseconds