Lines Matching refs:cpc

451 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc);
1083 struct perf_cpu_pmu_context *cpc;
1088 cpc = container_of(hr, struct perf_cpu_pmu_context, hrtimer);
1089 rotations = perf_rotate_context(cpc);
1091 raw_spin_lock(&cpc->hrtimer_lock);
1093 hrtimer_forward_now(hr, cpc->hrtimer_interval);
1095 cpc->hrtimer_active = 0;
1096 raw_spin_unlock(&cpc->hrtimer_lock);
1101 static void __perf_mux_hrtimer_init(struct perf_cpu_pmu_context *cpc, int cpu)
1103 struct hrtimer *timer = &cpc->hrtimer;
1104 struct pmu *pmu = cpc->epc.pmu;
1115 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1117 raw_spin_lock_init(&cpc->hrtimer_lock);
1122 static int perf_mux_hrtimer_restart(struct perf_cpu_pmu_context *cpc)
1124 struct hrtimer *timer = &cpc->hrtimer;
1127 raw_spin_lock_irqsave(&cpc->hrtimer_lock, flags);
1128 if (!cpc->hrtimer_active) {
1129 cpc->hrtimer_active = 1;
1130 hrtimer_forward_now(timer, cpc->hrtimer_interval);
1133 raw_spin_unlock_irqrestore(&cpc->hrtimer_lock, flags);
2257 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2260 // XXX cpc serialization, probably per-cpu IRQ disabled
2304 cpc->active_oncpu--;
2307 if (event->attr.exclusive || !cpc->active_oncpu)
2308 cpc->exclusive = 0;
2375 struct perf_cpu_pmu_context *cpc;
2377 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
2378 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
2379 cpc->task_epc = NULL;
2519 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2560 cpc->active_oncpu++;
2565 cpc->exclusive = 1;
2625 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2636 if (cpc->exclusive)
3246 struct perf_cpu_pmu_context *cpc;
3248 cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3249 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3250 cpc->task_epc = NULL;
3482 struct perf_cpu_pmu_context *cpc;
3485 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3487 if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
3589 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3594 if (!--cpc->sched_cb_usage)
3595 list_del(&cpc->sched_cb_entry);
3601 struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3603 if (!cpc->sched_cb_usage++)
3604 list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3618 static void __perf_pmu_sched_task(struct perf_cpu_pmu_context *cpc, bool sched_in)
3623 pmu = cpc->epc.pmu;
3632 pmu->sched_task(cpc->task_epc, sched_in);
3643 struct perf_cpu_pmu_context *cpc;
3649 list_for_each_entry(cpc, this_cpu_ptr(&sched_cb_list), sched_cb_entry)
3650 __perf_pmu_sched_task(cpc, sched_in);
3719 struct perf_cpu_pmu_context *cpc;
3724 cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3725 WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
3726 cpc->task_epc = pmu_ctx;
3853 struct perf_cpu_pmu_context *cpc;
3856 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
3857 perf_mux_hrtimer_restart(cpc);
4264 static bool perf_rotate_context(struct perf_cpu_pmu_context *cpc)
4277 cpu_epc = &cpc->epc;
4279 task_epc = cpc->task_epc;
4861 struct perf_cpu_pmu_context *cpc;
4863 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu);
4864 epc = &cpc->epc;
11416 struct perf_cpu_pmu_context *cpc;
11417 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11418 cpc->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
11420 cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpc);
11554 struct perf_cpu_pmu_context *cpc;
11556 cpc = per_cpu_ptr(pmu->cpu_pmu_context, cpu);
11557 __perf_init_event_pmu_context(&cpc->epc, pmu);
11558 __perf_mux_hrtimer_init(cpc, cpu);