Searched refs:prev_cpu (Results 1 - 10 of 10) sorted by relevance

/linux-master/arch/x86/include/asm/trace/
H A Dirq_vectors.h156 unsigned int prev_cpu),
158 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
165 __field( unsigned int, prev_cpu )
173 __entry->prev_cpu = prev_cpu;
177 TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u",
179 __entry->prev_vector, __entry->prev_cpu)
186 unsigned int prev_cpu), \
187 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
/linux-master/arch/powerpc/lib/
H A Dqspinlock.c260 static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu) argument
262 struct qnodes *qnodesp = per_cpu_ptr(&qnodes, prev_cpu);
376 static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt) argument
391 if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
418 yield_count = yield_count_of(prev_cpu);
430 yield_to_preempted(prev_cpu, yield_count);
575 int prev_cpu = decode_tail_cpu(old); local
576 struct qnode *prev = get_tail_qnode(lock, prev_cpu);
586 if (yield_to_prev(lock, node, prev_cpu, paravirt))
/linux-master/arch/sparc/kernel/
H A Dcpumap.c193 int n, id, cpu, prev_cpu, last_cpu, level; local
204 prev_cpu = cpu = cpumask_first(cpu_online_mask);
268 (cpu == last_cpu) ? cpu : prev_cpu;
290 prev_cpu = cpu;
/linux-master/arch/x86/kernel/apic/
H A Dvector.c31 unsigned int prev_cpu; member in struct:apic_chip_data
174 apicd->prev_cpu = apicd->cpu;
357 apicd->prev_cpu);
368 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
369 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
648 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
938 unsigned int cpu = apicd->prev_cpu;
1011 unsigned int cpu = apicd->prev_cpu;
/linux-master/kernel/sched/
H A Dfair.c1007 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
7017 wake_affine_idle(int this_cpu, int prev_cpu, int sync) argument
7025 * If the prev_cpu is idle and cache affine then avoid a migration.
7027 * is more important than cache hot data on the prev_cpu and from
7031 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
7032 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
7037 if (available_idle_cpu(prev_cpu))
7038 return prev_cpu;
7045 int this_cpu, int prev_cpu, in
7044 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) argument
7086 wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) argument
7169 find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag) argument
7790 eenv_task_busy_time(struct energy_env *eenv, struct task_struct *p, int prev_cpu) argument
7952 find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) argument
8151 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) argument
[all...]
/linux-master/arch/powerpc/kvm/
H A Dbook3s_hv.c3086 vcpu->arch.prev_cpu = -1;
3295 int prev_cpu; local
3301 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
3303 prev_cpu = vcpu->arch.prev_cpu;
3316 if (prev_cpu != pcpu) {
3317 if (prev_cpu >= 0) {
3318 if (cpu_first_tlb_thread_sibling(prev_cpu) !=
3320 radix_flush_cpu(kvm, prev_cpu, vcp
[all...]
H A Dbook3s_hv_nested.c736 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
/linux-master/arch/powerpc/include/asm/
H A Dkvm_book3s_64.h34 short prev_cpu[NR_CPUS]; member in struct:kvm_nested_guest
H A Dkvm_host.h787 int prev_cpu; member in struct:kvmppc_slb::kvm_vcpu_arch
/linux-master/drivers/irqchip/
H A Dirq-gic-v3-its.c1685 int cpu, prev_cpu; local
1691 prev_cpu = its_dev->event_map.col_map[id];
1692 its_dec_lpi_count(d, prev_cpu);
1703 if (cpu != prev_cpu) {
1715 its_inc_lpi_count(d, prev_cpu);

Completed in 302 milliseconds