Searched refs:new_cpu (Results 1 - 17 of 17) sorted by relevance

/linux-master/drivers/irqchip/
H A Dirq-bcm6345-l1.c194 unsigned int new_cpu; local
202 new_cpu = cpumask_any_and(&valid, cpu_online_mask);
203 if (new_cpu >= nr_cpu_ids)
206 dest = cpumask_of(new_cpu);
209 if (old_cpu != new_cpu) {
221 irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
/linux-master/kernel/sched/
H A Dcpudeadline.c175 int old_idx, new_cpu; local
190 new_cpu = cp->elements[cp->size - 1].cpu;
192 cp->elements[old_idx].cpu = new_cpu;
194 cp->elements[new_cpu].idx = old_idx;
H A Dfair.c3522 static void update_scan_period(struct task_struct *p, int new_cpu) argument
3525 int dst_nid = cpu_to_node(new_cpu);
3569 static inline void update_scan_period(struct task_struct *p, int new_cpu) argument
7172 int new_cpu = cpu; local
7200 new_cpu = find_idlest_group_cpu(group, p, cpu);
7201 if (new_cpu == cpu) {
7207 /* Now try balancing at a lower domain level of 'new_cpu': */
7208 cpu = new_cpu;
7219 return new_cpu;
8156 int new_cpu local
8225 migrate_task_rq_fair(struct task_struct *p, int new_cpu) argument
[all...]
H A Dcore.c668 * [S] ->cpu = new_cpu [L] task_rq()
2521 struct task_struct *p, int new_cpu)
2526 set_task_cpu(p, new_cpu);
2529 rq = cpu_rq(new_cpu);
2532 WARN_ON_ONCE(task_cpu(p) != new_cpu);
3348 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) argument
3385 WARN_ON_ONCE(!cpu_online(new_cpu));
3390 trace_sched_migrate_task(p, new_cpu);
3392 if (task_cpu(p) != new_cpu) {
3394 p->sched_class->migrate_task_rq(p, new_cpu);
2520 move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) argument
[all...]
H A Ddeadline.c1948 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
H A Dsched.h2285 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
/linux-master/arch/x86/hyperv/
H A Dhv_init.c236 unsigned int new_cpu; local
275 new_cpu = cpumask_any_but(cpu_online_mask, cpu);
277 if (new_cpu < nr_cpu_ids)
278 re_ctrl.target_vp = hv_vp_index[new_cpu];
/linux-master/drivers/hv/
H A Dhyperv_vmbus.h455 unsigned int new_cpu)
457 hv_set_allocated_cpu(new_cpu);
454 hv_update_allocated_cpus(unsigned int old_cpu, unsigned int new_cpu) argument
/linux-master/tools/perf/scripts/python/
H A Dsched-migration.py191 def migrate(self, ts_list, new, old_cpu, new_cpu):
192 if old_cpu == new_cpu:
199 new_rq = self.prev.rqs[new_cpu]
201 self.rqs[new_cpu] = in_rq
208 self.event_cpus.append(new_cpu)
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device.c1023 int cpu, new_cpu; local
1025 cpu = new_cpu = smp_processor_id();
1027 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
1028 if (cpu_to_node(new_cpu) == numa_node_id())
1030 } while (cpu != new_cpu);
1032 queue_work_on(new_cpu, wq, work);
/linux-master/drivers/perf/
H A Dthunderx2_pmu.c935 int new_cpu; local
950 new_cpu = cpumask_any_and(
954 tx2_pmu->cpu = new_cpu;
955 if (new_cpu >= nr_cpu_ids)
957 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
/linux-master/arch/x86/events/intel/
H A Duncore.c1457 int new_cpu)
1463 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
1471 box->cpu = new_cpu;
1477 if (new_cpu < 0)
1481 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1482 box->cpu = new_cpu;
1487 int old_cpu, int new_cpu)
1490 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1456 uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) argument
1486 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) argument
/linux-master/arch/powerpc/perf/
H A Dimc-pmu.c344 static void nest_change_cpu_context(int old_cpu, int new_cpu) argument
348 if (old_cpu < 0 || new_cpu < 0)
352 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
/linux-master/arch/arm64/kvm/vgic/
H A Dvgic.c693 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; local
697 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
/linux-master/kernel/
H A Dworkqueue.c2279 int new_cpu; local
2288 new_cpu = __this_cpu_read(wq_rr_cpu_last);
2289 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
2290 if (unlikely(new_cpu >= nr_cpu_ids)) {
2291 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
2292 if (unlikely(new_cpu >= nr_cpu_ids))
2295 __this_cpu_write(wq_rr_cpu_last, new_cpu);
2297 return new_cpu;
/linux-master/drivers/scsi/lpfc/
H A Dlpfc_init.c12450 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; local
12514 new_cpu = start_cpu;
12516 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12521 new_cpu = lpfc_next_present_cpu(new_cpu);
12533 start_cpu = lpfc_next_present_cpu(new_cpu);
12539 cpu, cpup->eq, new_cpu,
12560 new_cpu = start_cpu;
12562 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12566 new_cpu
[all...]
/linux-master/tools/perf/
H A Dbuiltin-sched.c1555 bool new_cpu = false; local
1568 new_cpu = true;
1675 if (sched->map.comp && new_cpu)

Completed in 479 milliseconds