Lines Matching defs:this_cpu

6017 	int this_cpu = smp_processor_id();
6058 if (cpu_of(rq) != this_cpu) {
7055 wake_affine_idle(int this_cpu, int prev_cpu, int sync)
7058 * If this_cpu is idle, it implies the wakeup is from interrupt
7069 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
7070 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
7072 if (sync && cpu_rq(this_cpu)->nr_running == 1)
7073 return this_cpu;
7083 int this_cpu, int prev_cpu, int sync)
7088 this_eff_load = cpu_load(cpu_rq(this_cpu));
7094 return this_cpu;
7110 prev_eff_load *= capacity_of(this_cpu);
7121 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
7125 int this_cpu, int prev_cpu, int sync)
7130 target = wake_affine_idle(this_cpu, prev_cpu, sync);
7133 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
7136 if (target != this_cpu)
7145 sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
7151 sched_balance_find_dst_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
7156 int least_loaded_cpu = this_cpu;
8012 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
8983 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
10373 sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
10393 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
10396 local_group = cpumask_test_cpu(this_cpu,
10492 if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
11301 * Check this_cpu to ensure it is balanced within domain. Attempt to move
11304 static int sched_balance_rq(int this_cpu, struct rq *this_rq,
11316 .dst_cpu = this_cpu,
11491 * moved to this_cpu:
11493 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
11498 /* Record that we found at least one task that could run on this_cpu */
11508 busiest->push_cpu = this_cpu;
12167 int this_cpu = this_rq->cpu;
12195 * Start with the next CPU after this_cpu so we will end with this_cpu and let a
12198 for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) {
12311 int this_cpu = this_rq->cpu;
12317 if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED))
12333 atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
12348 * sched_balance_newidle is called by schedule() if this_cpu is about to become
12359 int this_cpu = this_rq->cpu;
12384 if (!cpu_active(this_cpu))
12411 t0 = sched_clock_cpu(this_cpu);
12412 sched_balance_update_blocked_averages(this_cpu);
12415 for_each_domain(this_cpu, sd) {
12425 pulled_task = sched_balance_rq(this_cpu, this_rq,
12429 t1 = sched_clock_cpu(this_cpu);