• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/kernel/

Lines Matching defs:this_cpu

857 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
863 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1298 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1314 local_group = cpu_isset(this_cpu, group->cpumask);
1351 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1364 if (load < min_load || (load == min_load && i == this_cpu)) {
1509 int cpu, orig_cpu, this_cpu, success = 0;
1529 this_cpu = smp_processor_id();
1538 if (cpu == this_cpu) {
1543 for_each_domain(this_cpu, sd) {
1551 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1564 this_load = target_load(this_cpu, idx);
1566 new_cpu = this_cpu; /* Wake to this CPU if we can */
1579 tl_per_task = cpu_avg_load_per_task(this_cpu);
1616 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1630 this_cpu = smp_processor_id();
1641 if (cpu == this_cpu)
2187 int new_cpu, this_cpu = get_cpu();
2188 new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
2190 if (new_cpu != this_cpu)
2199 struct rq *this_rq, int this_cpu)
2202 set_task_cpu(p, this_cpu);
2212 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2215 int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2225 if (!cpu_isset(this_cpu, p->cpus_allowed)) {
2261 balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2290 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2295 pull_task(busiest, p, this_rq, this_cpu);
2329 static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2340 class->load_balance(this_rq, this_cpu, busiest,
2350 iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2358 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
2359 pull_task(busiest, p, this_rq, this_cpu);
2382 static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2388 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
2400 find_busiest_group(struct sched_domain *sd, int this_cpu,
2435 local_group = cpu_isset(this_cpu, group->cpumask);
2484 balance_cpu != this_cpu && balance) {
2737 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2740 static int load_balance(int this_cpu, struct rq *this_rq,
2764 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2795 ld_moved = move_tasks(this_rq, this_cpu, busiest,
2803 if (ld_moved && this_cpu != smp_processor_id())
2804 resched_cpu(this_cpu);
2824 * task on busiest cpu can't be moved to this_cpu
2826 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2834 busiest->push_cpu = this_cpu;
2887 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2894 load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2916 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
2940 ld_moved = move_tasks(this_rq, this_cpu, busiest,
2973 * idle_balance is called by schedule() if this_cpu is about to become
2976 static void idle_balance(int this_cpu, struct rq *this_rq)
2982 for_each_domain(this_cpu, sd) {
2990 pulled_task = load_balance_newidle(this_cpu,
3216 int this_cpu = smp_processor_id();
3217 struct rq *this_rq = cpu_rq(this_cpu);
3221 rebalance_domains(this_cpu, idle);
3230 atomic_read(&nohz.load_balancer) == this_cpu) {
3235 cpu_clear(this_cpu, cpus);
5315 int this_cpu = smp_processor_id();
5316 struct rq *rq = cpu_rq(this_cpu);
5321 BUG_ON(cpu_online(this_cpu));