Lines Matching refs:pmu

21 #include "pmu.h"
37 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
40 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
43 pmu->fixed_ctr_ctrl = data;
44 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
51 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
53 __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use);
62 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
77 if (WARN_ON_ONCE(!pmu->version))
90 counters = pmu->fixed_counters;
91 num_counters = pmu->nr_arch_fixed_counters;
92 bitmask = pmu->counter_bitmask[KVM_PMC_FIXED];
95 counters = pmu->gp_counters;
96 num_counters = pmu->nr_arch_gp_counters;
97 bitmask = pmu->counter_bitmask[KVM_PMC_GP];
124 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
126 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
129 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
152 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
158 return kvm_pmu_has_perf_global_ctrl(pmu);
171 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
172 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
173 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
183 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
186 pmc = get_fixed_pmc(pmu, msr);
187 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
188 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
207 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
239 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
251 pmu->event_count++;
252 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
300 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
306 msr_info->data = pmu->fixed_ctr_ctrl;
309 msr_info->data = pmu->pebs_enable;
312 msr_info->data = pmu->ds_area;
315 msr_info->data = pmu->pebs_data_cfg;
318 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
319 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
322 val & pmu->counter_bitmask[KVM_PMC_GP];
324 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
327 val & pmu->counter_bitmask[KVM_PMC_FIXED];
329 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
343 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
351 if (data & pmu->fixed_ctr_ctrl_mask)
354 if (pmu->fixed_ctr_ctrl != data)
355 reprogram_fixed_counters(pmu, data);
358 if (data & pmu->pebs_enable_mask)
361 if (pmu->pebs_enable != data) {
362 diff = pmu->pebs_enable ^ data;
363 pmu->pebs_enable = data;
364 reprogram_counters(pmu, diff);
371 pmu->ds_area = data;
374 if (data & pmu->pebs_data_cfg_mask)
377 pmu->pebs_data_cfg = data;
380 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
381 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
383 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
391 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
394 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
395 reserved_bits = pmu->reserved_bits;
397 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
453 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
479 pmu->version = eax.split.version_id;
480 if (!pmu->version)
483 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
487 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
490 pmu->available_event_types = ~entry->ebx &
493 if (pmu->version == 1) {
494 pmu->nr_arch_fixed_counters = 0;
496 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
500 pmu->counter_bitmask[KVM_PMC_FIXED] =
504 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
505 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
506 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
507 (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
508 pmu->global_ctrl_mask = counter_mask;
515 pmu->global_status_mask = pmu->global_ctrl_mask
519 pmu->global_status_mask &=
526 pmu->reserved_bits ^= HSW_IN_TX;
527 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
530 bitmap_set(pmu->all_valid_pmc_idx,
531 0, pmu->nr_arch_gp_counters);
532 bitmap_set(pmu->all_valid_pmc_idx,
533 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
543 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
547 pmu->pebs_enable_mask = counter_mask;
548 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
549 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
550 pmu->fixed_ctr_ctrl_mask &=
553 pmu->pebs_data_cfg_mask = ~0xff00000full;
555 pmu->pebs_enable_mask =
556 ~((1ull << pmu->nr_arch_gp_counters) - 1);
564 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
568 pmu->gp_counters[i].type = KVM_PMC_GP;
569 pmu->gp_counters[i].vcpu = vcpu;
570 pmu->gp_counters[i].idx = i;
571 pmu->gp_counters[i].current_config = 0;
575 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
576 pmu->fixed_counters[i].vcpu = vcpu;
577 pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
578 pmu->fixed_counters[i].current_config = 0;
579 pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i);
593 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
661 * pmu resources (e.g. LBR) that were assigned to the guest. This is
665 * confirm that the pmu features enabled to the guest are not reclaimed
671 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
678 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
685 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
702 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
707 kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
718 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);