Lines Matching defs:vcpu

123 		kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
142 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
197 guest_cpuid_is_intel(pmc->vcpu)) {
432 struct kvm *kvm = pmc->vcpu->kvm;
495 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
498 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
506 * other than the task that holds vcpu->mutex, take care to clear only
529 kvm_pmu_cleanup(vcpu);
532 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
545 return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx);
559 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
572 vcpu->kvm->arch.kvmclock_offset;
582 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
584 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
592 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
594 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
598 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
599 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
600 kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
607 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
609 if (lapic_in_kernel(vcpu)) {
610 static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
611 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
615 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
621 return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
625 return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
626 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
629 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
631 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
632 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
638 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
640 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
657 return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
663 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
665 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
715 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
716 return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
722 static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
724 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
743 static_call_cond(kvm_x86_pmu_reset)(vcpu);
751 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
753 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
755 if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
762 kvm_pmu_reset(vcpu);
778 if (!vcpu->kvm->arch.enable_pmu)
781 static_call(kvm_x86_pmu_refresh)(vcpu);
794 void kvm_pmu_init(struct kvm_vcpu *vcpu)
796 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
799 static_call(kvm_x86_pmu_init)(vcpu);
800 kvm_pmu_refresh(vcpu);
804 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
806 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
821 static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
826 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
828 kvm_pmu_reset(vcpu);
860 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
863 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
866 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
988 struct kvm_vcpu *vcpu;
1034 kvm_for_each_vcpu(i, vcpu, kvm)
1035 atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);