Searched refs:this_cpu (Results 26 - 50 of 62) sorted by relevance

123

/linux-master/drivers/thermal/intel/
H A Dtherm_throt.c303 unsigned int i, avg, this_cpu = smp_processor_id(); local
313 this_cpu,
349 this_cpu,
359 schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL);
376 unsigned int this_cpu = smp_processor_id(); local
379 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
423 schedule_delayed_work_on(this_cpu, &state->therm_work, THERM_THROT_POLL_INTERVAL);
438 unsigned int this_cpu = smp_processor_id(); local
439 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
/linux-master/kernel/
H A Dsmp.c608 int this_cpu; local
615 this_cpu = get_cpu();
623 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
761 int cpu, last_cpu, this_cpu = smp_processor_id(); local
776 if (cpu_online(this_cpu) && !oops_in_progress &&
789 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
794 if (cpu == this_cpu)
802 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
842 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
H A Dcrash_core.c129 int old_cpu, this_cpu; local
137 this_cpu = raw_smp_processor_id();
139 if (atomic_try_cmpxchg(&panic_cpu, &old_cpu, this_cpu)) {
H A Dwatchdog.c153 unsigned int this_cpu = smp_processor_id(); local
182 if (cpu == this_cpu) {
/linux-master/arch/arm/mm/
H A Dcontext.c51 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, argument
61 if (cpu == this_cpu)
/linux-master/arch/arm/kernel/
H A Dmachine_kexec.c105 int cpu, this_cpu = raw_smp_processor_id(); local
112 if (cpu == this_cpu)
/linux-master/arch/x86/lib/
H A Dmsr-smp.c102 int this_cpu; local
109 this_cpu = get_cpu();
111 if (cpumask_test_cpu(this_cpu, mask))
/linux-master/arch/sparc/kernel/
H A Dnmi.c75 int this_cpu = smp_processor_id(); local
82 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
84 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
H A Dchmc.c593 unsigned long ret, this_cpu; local
597 this_cpu = real_hard_smp_processor_id();
599 if (p->portid == this_cpu) {
H A Dentry.h250 void sun4v_register_mondo_queues(int this_cpu);
/linux-master/arch/x86/hyperv/
H A Dhv_apic.c160 int cur_cpu, vcpu, this_cpu = smp_processor_id(); local
175 (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
204 if (exclude_self && cur_cpu == this_cpu)
/linux-master/arch/arm64/kernel/
H A Dsmp.c722 unsigned int this_cpu; local
726 this_cpu = smp_processor_id();
727 store_cpu_topology(this_cpu);
728 numa_store_cpu_info(this_cpu);
729 numa_add_cpu(this_cpu);
871 int this_cpu = raw_smp_processor_id(); local
876 if (cpu == this_cpu)
/linux-master/arch/x86/kernel/cpu/
H A Dcommon.c196 static const struct cpu_dev *this_cpu = &default_cpu; variable in typeref:struct:cpu_dev
689 if (!this_cpu)
692 info = this_cpu->legacy_models;
833 if (this_cpu->legacy_cache_size)
834 l2size = this_cpu->legacy_cache_size(c, l2size);
857 if (this_cpu->c_detect_tlb)
858 this_cpu->c_detect_tlb(c);
882 this_cpu = cpu_devs[i];
883 c->x86_vendor = this_cpu->c_x86_vendor;
892 this_cpu
[all...]
H A Dmshyperv.c267 unsigned int old_cpu, this_cpu; local
273 this_cpu = raw_smp_processor_id();
274 if (!atomic_try_cmpxchg(&nmi_cpu, &old_cpu, this_cpu))
/linux-master/kernel/debug/kdb/
H A Dkdb_io.c620 int this_cpu, old_cpu; local
630 this_cpu = smp_processor_id();
632 old_cpu = cmpxchg(&kdb_printf_cpu, -1, this_cpu);
633 if (old_cpu == -1 || old_cpu == this_cpu)
/linux-master/arch/x86/xen/
H A Dsmp.c236 unsigned int this_cpu = smp_processor_id(); local
243 if (this_cpu == cpu)
/linux-master/drivers/cpuidle/
H A Dcoupled.c342 * @this_cpu: target cpu
347 static void cpuidle_coupled_poke_others(int this_cpu, argument
353 if (cpu != this_cpu && cpu_online(cpu))
/linux-master/tools/perf/
H A Dbuiltin-sched.c1551 struct perf_cpu this_cpu = { local
1559 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1561 if (this_cpu.cpu > sched->max_cpu.cpu)
1562 sched->max_cpu = this_cpu;
1566 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1567 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1573 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1574 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1595 sched->curr_thread[this_cpu
1692 int this_cpu = sample->cpu, err = 0; local
2937 struct perf_cpu this_cpu = { local
[all...]
/linux-master/kernel/sched/
H A Drt.c1829 int this_cpu = smp_processor_id(); local
1873 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1874 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1882 * "this_cpu" is cheaper to preempt than a
1885 if (this_cpu != -1 &&
1886 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1888 return this_cpu;
1906 if (this_cpu != -1)
1907 return this_cpu;
2302 int this_cpu = this_rq->cpu, cpu; local
[all...]
H A Ddeadline.c2224 int this_cpu = smp_processor_id(); local
2256 * Check if this_cpu is to be skipped (i.e., it is
2259 if (!cpumask_test_cpu(this_cpu, later_mask))
2260 this_cpu = -1;
2268 * If possible, preempting this_cpu is
2271 if (this_cpu != -1 &&
2272 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2274 return this_cpu;
2297 if (this_cpu != -1)
2298 return this_cpu;
2470 int this_cpu = this_rq->cpu, cpu; local
[all...]
H A Dfair.c6010 int this_cpu = smp_processor_id(); local
6051 if (cpu_of(rq) != this_cpu) {
7023 wake_affine_idle(int this_cpu, int prev_cpu, int sync) argument
7026 * If this_cpu is idle, it implies the wakeup is from interrupt
7037 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
7038 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
7040 if (sync && cpu_rq(this_cpu)->nr_running == 1)
7041 return this_cpu;
7051 int this_cpu, in
7050 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) argument
7092 wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) argument
7119 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) argument
10319 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) argument
11259 load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) argument
12100 int this_cpu = this_rq->cpu; local
12244 int this_cpu = this_rq->cpu; local
12292 int this_cpu = this_rq->cpu; local
[all...]
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dhyperv_tlb_flush.c78 u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64)); local
87 expected = READ_ONCE(*this_cpu);
116 if (expected != READ_ONCE(*this_cpu))
/linux-master/arch/x86/kernel/
H A Dprocess.c505 unsigned int this_cpu = smp_processor_id(); local
523 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
524 if (cpu == this_cpu)
/linux-master/kernel/debug/
H A Ddebug_core.c244 int this_cpu = raw_smp_processor_id(); local
250 if (cpu == this_cpu)
/linux-master/arch/sparc/mm/
H A Dinit_64.c229 static inline void set_dcache_dirty(struct folio *folio, int this_cpu) argument
231 unsigned long mask = this_cpu;
299 int this_cpu = get_cpu(); local
304 if (cpu == this_cpu)
467 int this_cpu; local
479 this_cpu = get_cpu();
487 if (dirty_cpu == this_cpu)
491 set_dcache_dirty(folio, this_cpu);

Completed in 254 milliseconds

123