/linux-master/tools/testing/selftests/powerpc/ |
H A D | utils.c | 419 int ncpus, cpu = -1; local 439 for (cpu = 8; cpu < ncpus; cpu += 8) 440 if (CPU_ISSET_S(cpu, size, mask)) 444 for (cpu = ncpus - 1; cpu >= 0; cpu--) 445 if (CPU_ISSET_S(cpu, size, mask)) 452 return cpu; 455 bind_to_cpu(int cpu) argument 523 perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags) argument [all...] |
/linux-master/arch/arm/mach-mvebu/ |
H A D | pmsu.c | 45 #define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 50 #define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 54 #define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 63 #define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 67 #define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 538 u32 cpu = smp_processor_id(); local 569 mvebu_pmsu_dfs_request(int cpu) argument [all...] |
/linux-master/arch/openrisc/mm/ |
H A D | tlb.c | 140 unsigned int cpu; local 145 cpu = smp_processor_id(); 147 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 148 cpumask_set_cpu(cpu, mm_cpumask(next)); 156 current_pgd[cpu] = next->pgd;
|
/linux-master/sound/soc/meson/ |
H A D | gx-card.c | 83 struct snd_soc_dai_link_component *cpu; local 86 cpu = devm_kzalloc(card->dev, sizeof(*cpu), GFP_KERNEL); 87 if (!cpu) 90 dai_link->cpus = cpu; 111 /* Check if the cpu is the i2s encoder and parse i2s data */
|
/linux-master/include/xen/ |
H A D | events.h | 29 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); 30 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, 35 unsigned int cpu, 76 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); 103 int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
|
/linux-master/arch/mips/bcm63xx/ |
H A D | irq.c | 28 static void (*dispatch_internal)(int cpu); 53 static inline int enable_irq_for_cpu(int cpu, struct irq_data *d, argument 56 bool enable = cpu_online(cpu); 60 enable &= cpumask_test_cpu(cpu, m); 62 enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d)); 75 static void __dispatch_internal_##width(int cpu) \ 81 unsigned int *next = &i[cpu]; \ 89 val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \ 90 val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \ 119 int cpu; \ [all...] |
/linux-master/arch/x86/include/asm/ |
H A D | msr.h | 334 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 335 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 336 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 337 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 340 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 341 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 342 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 343 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 344 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 345 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u3 347 rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) argument 352 wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) argument 357 rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) argument 362 wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) argument 377 rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) argument 382 wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) argument 386 rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) argument 390 wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) argument 394 rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) argument 398 wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) argument [all...] |
/linux-master/arch/x86/kernel/ |
H A D | smp.c | 22 #include <linux/cpu.h> 31 #include <asm/cpu.h> 123 /* We are registered on stopping cpu too, avoid spurious NMI */ 215 unsigned int cpu; local 219 for_each_cpu(cpu, &cpus_stop_mask) 220 __apic_send_IPI(cpu, NMI_VECTOR); 245 * Reschedule call back. KVM uses this interrupt to force a cpu out of
|
H A D | kvm.c | 17 #include <linux/cpu.h> 33 #include <asm/cpu.h> 88 int cpu; member in struct:kvm_task_sleep_node 128 n->cpu = smp_processor_id(); 184 if (n->cpu == smp_processor_id()) 230 dummy->cpu = smp_processor_id(); 314 pv_ops.cpu.io_delay = kvm_io_delay; 323 int cpu = smp_processor_id(); local 324 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); 330 pr_debug("stealtime: cpu 404 kvm_steal_clock(int cpu) argument 436 int cpu; local 462 kvm_cpu_online(unsigned int cpu) argument 505 int cpu, min = 0, max = 0; local 636 int cpu; local 653 int cpu; local 682 int cpu; local 710 kvm_cpu_down_prepare(unsigned int cpu) argument 785 __kvm_vcpu_is_preempted(long cpu) argument 1031 kvm_kick_cpu(int cpu) argument 1132 arch_haltpoll_enable(unsigned int cpu) argument 1145 arch_haltpoll_disable(unsigned int cpu) argument [all...] |
/linux-master/arch/riscv/kernel/ |
H A D | paravirt.c | 28 static u64 native_steal_clock(int cpu) argument 75 static int pv_time_cpu_online(unsigned int cpu) argument 85 static int pv_time_cpu_down_prepare(unsigned int cpu) argument 91 static u64 pv_time_steal_clock(int cpu) argument 93 struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
|
/linux-master/drivers/hwtracing/coresight/ |
H A D | coresight-etm3x-core.c | 22 #include <linux/cpu.h> 432 dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n", 433 drvdata->cpu, rc); 455 return drvdata->cpu; 463 * This will allocate a trace ID to the cpu, 468 trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu); 474 dev_name(&drvdata->csdev->dev), drvdata->cpu); 480 coresight_trace_id_put_cpu_id(drvdata->cpu); 489 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) 496 * perf allocates cpu id 709 etm_online_cpu(unsigned int cpu) argument 719 etm_starting_cpu(unsigned int cpu) argument 736 etm_dying_cpu(unsigned int cpu) argument 934 int cpu = *(int *)info; local [all...] |
/linux-master/arch/arm/mach-rockchip/ |
H A D | platsmp.c | 17 #include <linux/cpu.h> 50 static struct reset_control *rockchip_get_core_reset(int cpu) argument 52 struct device *dev = get_cpu_device(cpu); 55 /* The cpu device is only available after the initial core bringup */ 59 np = of_get_cpu_node(cpu, NULL); 77 * We need to soft reset the cpu when we turn off the cpu power domain, 116 static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) argument 121 pr_err("%s: sram or pmu missing for cpu boot\n", __func__); 125 if (cpu > 334 rockchip_cpu_kill(unsigned int cpu) argument 347 rockchip_cpu_die(unsigned int cpu) argument [all...] |
/linux-master/tools/perf/tests/ |
H A D | perf-record.c | 19 int i, cpu = -1, nrcpus = 1024; local 34 if (cpu == -1) 35 cpu = i; 41 return cpu; 69 u32 cpu; local 89 pr_debug("Not enough memory to create thread/cpu maps\n"); 121 cpu = err; 124 * So that we can check perf_sample.cpu on all the samples. 194 pr_info("%" PRIu64" %d ", sample.time, sample.cpu); 206 if (sample.cpu ! [all...] |
/linux-master/arch/arm/mach-omap2/ |
H A D | omap-mpuss-lowpower.c | 216 * @cpu : CPU ID 227 __cpuidle int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state, argument 230 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 271 cpu_clear_prev_logic_pwrst(cpu); 278 set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume)); 279 omap_pm_ops.scu_prepare(cpu, power_state); 280 l2x0_pwrst_prepare(cpu, save_state); 290 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) 312 * @cpu : CPU ID 315 int omap4_hotplug_cpu(unsigned int cpu, unsigne argument [all...] |
/linux-master/lib/ |
H A D | group_cpus.c | 8 #include <linux/cpu.h> 18 int cpu, sibl; local 21 cpu = cpumask_first(nmsk); 24 if (cpu >= nr_cpu_ids) 27 cpumask_clear_cpu(cpu, nmsk); 28 cpumask_set_cpu(cpu, irqmsk); 31 /* If the cpu has siblings, use them first */ 32 siblmsk = topology_sibling_cpumask(cpu); 79 int cpu; local 81 for_each_possible_cpu(cpu) [all...] |
/linux-master/tools/perf/bench/ |
H A D | futex-requeue.c | 124 static void block_threads(pthread_t *w, struct perf_cpu_map *cpu) argument 128 int nrcpus = perf_cpu_map__nr(cpu); 143 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); 171 struct perf_cpu_map *cpu; local 177 cpu = perf_cpu_map__new_online_cpus(); 178 if (!cpu) 192 params.nthreads = perf_cpu_map__nr(cpu); 223 block_threads(worker, cpu); [all...] |
H A D | futex-lock-pi.c | 121 static void create_threads(struct worker *w, struct perf_cpu_map *cpu) argument 125 int nrcpus = perf_cpu_map__nr(cpu); 148 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); 169 struct perf_cpu_map *cpu; local 175 cpu = perf_cpu_map__new_online_cpus(); 176 if (!cpu) 190 params.nthreads = perf_cpu_map__nr(cpu); 210 create_threads(worker, cpu); [all...] |
H A D | futex-wake.c | 98 static void block_threads(pthread_t *w, struct perf_cpu_map *cpu) argument 103 int nrcpus = perf_cpu_map__nr(cpu); 116 CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset); 144 struct perf_cpu_map *cpu; local 152 cpu = perf_cpu_map__new_online_cpus(); 153 if (!cpu) 167 params.nthreads = perf_cpu_map__nr(cpu); 192 block_threads(worker, cpu); [all...] |
/linux-master/tools/testing/selftests/ring-buffer/ |
H A D | map_test.c | 79 int tracefs_cpu_map(struct tracefs_cpu_map_desc *desc, int cpu) argument 86 TRACEFS_ROOT"/per_cpu/cpu%d/trace_pipe_raw", 87 cpu) < 0) 129 int cpu = sched_getcpu(); local 147 ASSERT_GE(cpu, 0); 153 ASSERT_EQ(tracefs_cpu_map(&self->map_desc, cpu), 0); 159 CPU_SET(cpu, &cpu_mask); 273 int cpu = sched_getcpu(); local 275 ASSERT_GE(cpu, 0); 278 ASSERT_EQ(tracefs_cpu_map(&map_desc, cpu), 284 int cpu = sched_getcpu(); local [all...] |
/linux-master/mm/ |
H A D | mmap_lock.c | 59 int cpu; local 62 for_each_possible_cpu(cpu) { 63 memcg_path = per_cpu_ptr(&memcg_paths, cpu); 73 for_each_possible_cpu(cpu) { 83 int cpu; local 97 for_each_possible_cpu(cpu) { 101 rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new); 192 * Note: buffers are allocated per-cpu to avoid locking, so preemption must be
|
/linux-master/kernel/irq/ |
H A D | ipi-mux.c | 10 #include <linux/cpu.h> 27 static void (*ipi_mux_send)(unsigned int cpu); 59 int cpu; local 61 for_each_cpu(cpu, mask) { 62 icpu = per_cpu_ptr(ipi_mux_pcpu, cpu); 87 ipi_mux_send(cpu); 154 int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu)) argument
|
/linux-master/drivers/cpuidle/ |
H A D | cpuidle-qcom-spm.c | 52 * cpu when we intended only to gate the cpu clock. 87 static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu) argument 94 cpu_node = of_cpu_device_node_get(cpu); 117 data->cpuidle_driver.cpumask = (struct cpumask *)cpumask_of(cpu); 129 int cpu, ret; local 138 for_each_possible_cpu(cpu) { 139 ret = spm_cpuidle_register(&pdev->dev, cpu); 142 "Cannot register for CPU%d: %d\n", cpu, ret);
|
/linux-master/arch/mips/kernel/ |
H A D | smp-cps.c | 7 #include <linux/cpu.h> 382 static int cps_boot_secondary(int cpu, struct task_struct *idle) argument 384 unsigned core = cpu_core(&cpu_data[cpu]); 385 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 392 if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) 399 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); 415 if (!cpus_are_siblings(cpu, smp_processor_id())) { 418 if (!cpus_are_siblings(cpu, remote)) 425 core, cpu); 497 unsigned int cpu, cor local 543 unsigned cpu = smp_processor_id(); local 564 unsigned int cpu; local 598 unsigned cpu = (unsigned long)ptr_cpu; local 611 cps_cpu_die(unsigned int cpu) argument 613 cps_cleanup_dead_cpu(unsigned cpu) argument [all...] |
/linux-master/include/linux/ |
H A D | timer.h | 150 extern void add_timer_on(struct timer_list *timer, int cpu); 209 unsigned long __round_jiffies(unsigned long j, int cpu); 210 unsigned long __round_jiffies_relative(unsigned long j, int cpu); 214 unsigned long __round_jiffies_up(unsigned long j, int cpu); 215 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); 220 int timers_prepare_cpu(unsigned int cpu); 221 int timers_dead_cpu(unsigned int cpu);
|
/linux-master/kernel/ |
H A D | softirq.c | 20 #include <linux/cpu.h> 41 - Even if softirq is serialized, only local cpu is marked for 42 execution. Hence, we get something sort of weak cpu binding. 617 int cpu = smp_processor_id(); local 620 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { 902 int cpu; local 904 for_each_possible_cpu(cpu) { 905 per_cpu(tasklet_vec, cpu).tail = 906 &per_cpu(tasklet_vec, cpu) 915 ksoftirqd_should_run(unsigned int cpu) argument 920 run_ksoftirqd(unsigned int cpu) argument 937 takeover_tasklets(unsigned int cpu) argument [all...] |