Searched refs:per_cpu (Results 26 - 50 of 402) sorted by relevance

1234567891011>>

/linux-master/net/rds/
H A Dpage.c90 rem = &per_cpu(rds_page_remainders, get_cpu());
124 rem = &per_cpu(rds_page_remainders, get_cpu());
160 rem = &per_cpu(rds_page_remainders, cpu);
/linux-master/arch/powerpc/platforms/ps3/
H A Dsmp.c40 virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
54 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
95 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
/linux-master/tools/testing/radix-tree/linux/
H A Dpercpu.h11 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) macro
/linux-master/arch/powerpc/platforms/cell/
H A Dcpufreq_spudemand.c79 struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
95 affected_info = &per_cpu(spu_gov_info, i);
110 struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
118 info = &per_cpu(spu_gov_info, i);
/linux-master/arch/mips/kernel/
H A Dtopology.c16 struct cpu *c = &per_cpu(cpu_devices, i);
H A Dtime.c57 per_cpu(pcp_lpj_ref, cpu) =
59 per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
74 lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
75 per_cpu(pcp_lpj_ref_freq, cpu),
/linux-master/kernel/
H A Dwatchdog_perf.c196 struct perf_event *event = per_cpu(dead_event, cpu);
204 per_cpu(dead_event, cpu) = NULL;
221 struct perf_event *event = per_cpu(watchdog_ev, cpu);
243 struct perf_event *event = per_cpu(watchdog_ev, cpu);
H A Dsoftirq.c905 per_cpu(tasklet_vec, cpu).tail =
906 &per_cpu(tasklet_vec, cpu).head;
907 per_cpu(tasklet_hi_vec, cpu).tail =
908 &per_cpu(tasklet_hi_vec, cpu).head;
945 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
946 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
947 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
948 per_cpu(tasklet_vec, cpu).head = NULL;
949 per_cpu(tasklet_ve
[all...]
/linux-master/arch/x86/include/asm/
H A Dsmp.h128 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
129 #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
146 return per_cpu(cpu_llc_shared_map, cpu);
151 return per_cpu(cpu_l2c_shared_map, cpu);
/linux-master/arch/x86/kvm/vmx/
H A Dposted_intr.c92 raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
94 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
155 raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
157 &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
158 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
221 struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu);
222 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu);
236 INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu));
237 raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
/linux-master/arch/x86/kernel/apic/
H A Dx2apic_cluster.c58 struct cpumask *cmsk = per_cpu(cluster_masks, cpu);
110 struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i);
135 if (per_cpu(cluster_masks, cpu))
150 cmsk = per_cpu(cluster_masks, cpu_i);
156 per_cpu(cluster_masks, cpu) = cmsk;
170 per_cpu(cluster_masks, cpu) = cmsk;
188 if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
196 struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu);
200 free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
/linux-master/arch/x86/mm/
H A Dcpu_entry_area.c25 return per_cpu(_cea_offset, cpu);
35 per_cpu(_cea_offset, i) = i;
56 per_cpu(_cea_offset, i) = cea;
117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
147 per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
172 &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
233 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
237 per_cpu(cpu_entry_area, cpu) = cea;
/linux-master/arch/x86/kernel/
H A Ditmt.c162 return per_cpu(sched_core_priority, cpu);
181 per_cpu(sched_core_priority, cpu) = prio;
H A Dsetup_percpu.c171 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
172 per_cpu(pcpu_hot.cpu_number, cpu) = cpu;
182 per_cpu(x86_cpu_to_apicid, cpu) =
184 per_cpu(x86_cpu_to_acpiid, cpu) =
188 per_cpu(x86_cpu_to_node_map, cpu) =
194 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
H A Dirq_32.c115 if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
127 per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
128 per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
/linux-master/arch/riscv/mm/
H A Dcontext.c52 if (per_cpu(reserved_context, cpu) == cntx) {
54 per_cpu(reserved_context, cpu) = newcntx;
74 cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
82 cntx = per_cpu(reserved_context, i);
85 per_cpu(reserved_context, i) = cntx;
169 old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
172 atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
188 atomic_long_set(&per_cpu(active_context, cpu), cntx);
/linux-master/arch/parisc/kernel/
H A Dirq.c75 per_cpu(local_ack_eiem, cpu) &= ~mask;
78 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
90 per_cpu(local_ack_eiem, cpu) |= mask;
93 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
126 #define irq_stats(x) (&per_cpu(irq_stat, x))
318 return per_cpu(cpu_data, cpu).txn_addr;
330 (!per_cpu(cpu_data, next_cpu).txn_addr ||
403 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
406 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
422 last_usage = &per_cpu(irq_sta
[all...]
H A Dsmp.c123 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
128 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
199 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
200 spinlock_t *lock = &per_cpu(ipi_lock, cpu);
335 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
342 memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
421 spin_lock_init(&per_cpu(ipi_lock, cpu));
/linux-master/drivers/cpufreq/
H A Dsh-cpufreq.c39 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
47 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
91 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
110 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
141 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
H A Dvexpress-spc-cpufreq.c77 cpu_freq = per_cpu(cpu_last_req_freq, j);
79 if (cluster == per_cpu(physical_cluster, j) &&
89 u32 cur_cluster = per_cpu(physical_cluster, cpu);
102 return per_cpu(cpu_last_req_freq, cpu);
117 prev_rate = per_cpu(cpu_last_req_freq, cpu);
118 per_cpu(cpu_last_req_freq, cpu) = rate;
119 per_cpu(physical_cluster, cpu) = new_cluster;
143 per_cpu(cpu_last_req_freq, cpu) = prev_rate;
144 per_cpu(physical_cluster, cpu) = old_cluster;
184 new_cluster = actual_cluster = per_cpu(physical_cluste
[all...]
/linux-master/arch/arm/mm/
H A Dproc-v7-bugs.c71 if (per_cpu(harden_branch_predictor_fn, cpu))
76 per_cpu(harden_branch_predictor_fn, cpu) =
82 per_cpu(harden_branch_predictor_fn, cpu) =
88 per_cpu(harden_branch_predictor_fn, cpu) =
95 per_cpu(harden_branch_predictor_fn, cpu) =
/linux-master/arch/arm64/kvm/
H A Dvmid.c53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
57 vmid = per_cpu(reserved_vmids, cpu);
59 per_cpu(reserved_vmids, cpu) = vmid;
83 if (per_cpu(reserved_vmids, cpu) == vmid) {
85 per_cpu(reserved_vmids, cpu) = newvmid;
/linux-master/arch/arm/mach-omap2/
H A Domap-mpuss-lowpower.c120 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
131 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
185 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
230 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
317 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
375 pm_info = &per_cpu(omap4_pm_info, 0x0);
399 pm_info = &per_cpu(omap4_pm_info, 0x1);
/linux-master/drivers/xen/events/
H A Devents_2l.c52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
58 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
59 set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
152 per_cpu(cpu_evtchn_mask, cpu)[idx] &
268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
280 v = per_cpu(xen_vcpu, i);
289 v = per_cpu(xen_vcpu, cpu);
353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
359 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
/linux-master/arch/loongarch/kernel/
H A Dirq.c111 per_cpu(irq_stack, i) = (unsigned long)page_address(page);
113 per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);

Completed in 270 milliseconds

1234567891011>>