Lines Matching defs:vcpu

207 /* Default doubles per-vcpu window every exit. */
211 /* Default resets per-vcpu window every exit to ple_window. */
396 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
416 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
417 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
418 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
419 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
420 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
421 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
538 static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
541 hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
546 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
549 evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
669 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
671 return flexpriority_enabled && lapic_in_kernel(vcpu);
786 return; /* vcpu migration can race with cpu offline */
824 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
825 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
869 void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
889 if ((vcpu->guest_debug &
893 if (to_vmx(vcpu)->rmode.vm86_active)
895 if (!vmx_need_pf_intercept(vcpu))
903 if (is_guest_mode(vcpu))
904 eb |= get_vmcs12(vcpu)->exception_bitmap;
928 if (vcpu->arch.xfd_no_write_intercept)
1101 u64 guest_efer = vmx->vcpu.arch.efer;
1126 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1187 static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1190 return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
1283 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1285 struct vcpu_vmx *vmx = to_vmx(vcpu);
1312 nested_sync_vmcs12_to_shadow(vcpu);
1364 ++vmx->vcpu.stat.host_state_reload;
1414 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1417 struct vcpu_vmx *vmx = to_vmx(vcpu);
1461 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1482 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1483 * vcpu mutex is already taken.
1485 void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1487 struct vcpu_vmx *vmx = to_vmx(vcpu);
1489 vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1491 vmx_vcpu_pi_load(vcpu, cpu);
1496 void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1498 vmx_vcpu_pi_put(vcpu);
1500 vmx_prepare_switch_to_host(to_vmx(vcpu));
1503 bool vmx_emulation_required(struct kvm_vcpu *vcpu)
1505 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1508 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1510 struct vcpu_vmx *vmx = to_vmx(vcpu);
1513 if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1514 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1526 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1528 struct vcpu_vmx *vmx = to_vmx(vcpu);
1536 if (is_unrestricted_guest(vcpu)) {
1537 kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1543 old_rflags = vmx_get_rflags(vcpu);
1552 vmx->emulation_required = vmx_emulation_required(vcpu);
1555 bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
1557 return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
1560 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1573 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1589 static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1591 struct vcpu_vmx *vmx = to_vmx(vcpu);
1661 int vmx_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1671 if (to_vmx(vcpu)->exit_reason.enclave_mode) {
1672 kvm_queue_exception(vcpu, UD_VECTOR);
1678 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1680 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
1718 orig_rip = kvm_rip_read(vcpu);
1726 if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1729 kvm_rip_write(vcpu, rip);
1731 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1737 vmx_set_interrupt_shadow(vcpu, 0);
1746 void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1748 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1749 struct vcpu_vmx *vmx = to_vmx(vcpu);
1751 if (!is_guest_mode(vcpu))
1766 (!vcpu->arch.exception.pending ||
1767 vcpu->arch.exception.vector == DB_VECTOR) &&
1768 (!vcpu->arch.exception_vmexit.pending ||
1769 vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1771 kvm_make_request(KVM_REQ_EVENT, vcpu);
1777 int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1779 vmx_update_emulated_instruction(vcpu);
1780 return skip_emulated_instruction(vcpu);
1783 static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1791 if (kvm_hlt_in_guest(vcpu->kvm) &&
1796 void vmx_inject_exception(struct kvm_vcpu *vcpu)
1798 struct kvm_queued_exception *ex = &vcpu->arch.exception;
1800 struct vcpu_vmx *vmx = to_vmx(vcpu);
1802 kvm_deliver_exception_payload(vcpu, ex);
1822 inc_eip = vcpu->arch.event_exit_inst_len;
1823 kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
1831 vmx->vcpu.arch.event_exit_inst_len);
1838 vmx_clear_hlt(vcpu);
1868 load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1869 (vmx->vcpu.arch.efer & EFER_SCE);
1878 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1879 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
1896 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1898 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1906 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1908 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1917 void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
1919 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1922 void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1924 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
1982 int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1984 struct vcpu_vmx *vmx = to_vmx(vcpu);
2001 return kvm_get_msr_common(vcpu, msr_info);
2004 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2015 !guest_has_spec_ctrl_msr(vcpu))
2018 msr_info->data = to_vmx(vcpu)->spec_ctrl;
2032 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2041 msr_info->data = vcpu->arch.mcg_ext_ctl;
2048 !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
2050 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
2054 if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2067 if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
2068 nested_evmcs_filter_control_msr(vcpu, msr_info->index,
2127 return kvm_get_msr_common(vcpu, msr_info);
2133 static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2137 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
2143 static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
2148 (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2152 (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2163 int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2165 struct vcpu_vmx *vmx = to_vmx(vcpu);
2174 ret = kvm_set_msr_common(vcpu, msr_info);
2189 ret = kvm_set_msr_common(vcpu, msr_info);
2200 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
2202 vcpu->arch.xfd_no_write_intercept = true;
2203 vmx_update_exception_bitmap(vcpu);
2208 if (is_guest_mode(vcpu))
2209 get_vmcs12(vcpu)->guest_sysenter_cs = data;
2213 if (is_guest_mode(vcpu)) {
2214 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2215 get_vmcs12(vcpu)->guest_sysenter_eip = data;
2220 if (is_guest_mode(vcpu)) {
2221 data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2222 get_vmcs12(vcpu)->guest_sysenter_esp = data;
2229 invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
2231 kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
2239 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2241 get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2244 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
2246 intel_pmu_create_guest_lbr_event(vcpu);
2252 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2254 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
2258 if (is_guest_mode(vcpu) &&
2261 get_vmcs12(vcpu)->guest_bndcfgs = data;
2277 !guest_has_spec_ctrl_msr(vcpu))
2299 vmx_disable_intercept_for_msr(vcpu,
2305 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2311 ret = kvm_set_msr_common(vcpu, msr_info);
2315 if (is_guest_mode(vcpu) &&
2316 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2317 get_vmcs12(vcpu)->guest_ia32_pat = data;
2324 !(to_vmx(vcpu)->msr_ia32_feature_control &
2328 vcpu->arch.mcg_ext_ctl = data;
2336 vmx_leave_nested(vcpu);
2339 vmx_write_encls_bitmap(vcpu, NULL);
2354 (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) ||
2364 if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2366 return vmx_set_vmx_msr(vcpu, msr_index, data);
2369 vmx_rtit_ctl_check(vcpu, data) ||
2374 pt_update_intercept_for_msr(vcpu);
2399 if (!pt_output_base_valid(vcpu, data))
2419 if (is_noncanonical_address(data, vcpu))
2427 if (data && !vcpu_to_pmu(vcpu)->version)
2433 if (!cpuid_model_is_consistent(vcpu))
2440 if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
2442 if (!guest_cpuid_has(vcpu, X86_FEATURE_DTES64))
2444 if (!cpuid_model_is_consistent(vcpu))
2447 ret = kvm_set_msr_common(vcpu, msr_info);
2456 ret = kvm_set_msr_common(vcpu, msr_info);
2461 vmx_update_fb_clear_dis(vcpu, vmx);
2466 void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2470 kvm_register_mark_available(vcpu, reg);
2474 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2477 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2481 ept_save_pdptrs(vcpu);
2484 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2486 vcpu->arch.cr0 &= ~guest_owned_bits;
2487 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2494 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2495 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2498 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2500 vcpu->arch.cr4 &= ~guest_owned_bits;
2501 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2504 KVM_BUG_ON(1, vcpu->kvm);
2981 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
2987 * to VMX spec, but in reality it is not always so. Since vcpu
2997 __vmx_set_segment(vcpu, save, seg);
3000 static void enter_pmode(struct kvm_vcpu *vcpu)
3003 struct vcpu_vmx *vmx = to_vmx(vcpu);
3007 * register was written while vcpu was in a guest mode.
3009 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3010 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3011 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3012 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3013 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3014 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3018 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3028 vmx_update_exception_bitmap(vcpu);
3030 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3031 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3032 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3033 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3034 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3035 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3070 static void enter_rmode(struct kvm_vcpu *vcpu)
3073 struct vcpu_vmx *vmx = to_vmx(vcpu);
3074 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
3083 WARN_ON_ONCE(is_guest_mode(vcpu));
3085 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3086 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3087 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3088 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3089 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3090 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3091 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3108 vmx_update_exception_bitmap(vcpu);
3118 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3120 struct vcpu_vmx *vmx = to_vmx(vcpu);
3126 vcpu->arch.efer = efer;
3133 if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3143 static void enter_lmode(struct kvm_vcpu *vcpu)
3147 vmx_segment_cache_clear(to_vmx(vcpu));
3157 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3160 static void exit_lmode(struct kvm_vcpu *vcpu)
3162 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3167 void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
3169 struct vcpu_vmx *vmx = to_vmx(vcpu);
3190 static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
3192 if (is_guest_mode(vcpu))
3193 return nested_get_vpid02(vcpu);
3194 return to_vmx(vcpu)->vpid;
3197 void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
3199 struct kvm_mmu *mmu = vcpu->arch.mmu;
3207 ept_sync_context(construct_eptp(vcpu, root_hpa,
3210 vpid_sync_context(vmx_get_current_vpid(vcpu));
3213 void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3219 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
3222 void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3231 vpid_sync_context(vmx_get_current_vpid(vcpu));
3234 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3236 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3238 if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3241 if (is_pae_paging(vcpu)) {
3249 void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3251 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3253 if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3261 kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3267 bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3269 if (is_guest_mode(vcpu))
3270 return nested_guest_cr0_valid(vcpu, cr0);
3272 if (to_vmx(vcpu)->nested.vmxon)
3273 return nested_host_cr0_valid(vcpu, cr0);
3278 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3280 struct vcpu_vmx *vmx = to_vmx(vcpu);
3284 old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3295 enter_pmode(vcpu);
3298 enter_rmode(vcpu);
3303 vcpu->arch.cr0 = cr0;
3304 kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3307 if (vcpu->arch.efer & EFER_LME) {
3309 enter_lmode(vcpu);
3311 exit_lmode(vcpu);
3322 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3323 vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3342 } else if (!is_guest_mode(vcpu)) {
3347 tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
3351 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3353 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3356 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3360 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3363 /* depends on vcpu->arch.cr0 to be set to a new value */
3364 vmx->emulation_required = vmx_emulation_required(vcpu);
3374 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3381 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3388 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3390 struct kvm *kvm = vcpu->kvm;
3396 eptp = construct_eptp(vcpu, root_hpa, root_level);
3399 hv_track_root_tdp(vcpu, root_hpa);
3401 if (!enable_unrestricted_guest && !is_paging(vcpu))
3403 else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
3404 guest_cr3 = vcpu->arch.cr3;
3407 vmx_ept_load_pdptrs(vcpu);
3409 guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) |
3410 kvm_get_active_cr3_lam_bits(vcpu);
3417 bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3424 if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
3427 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3433 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3435 unsigned long old_cr4 = kvm_read_cr4(vcpu);
3436 struct vcpu_vmx *vmx = to_vmx(vcpu);
3456 } else if (!is_guest_mode(vcpu) ||
3457 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3462 vcpu->arch.cr4 = cr4;
3463 kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3467 if (!is_paging(vcpu)) {
3486 if (!is_paging(vcpu))
3494 kvm_update_cpuid_runtime(vcpu);
3497 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3499 struct vcpu_vmx *vmx = to_vmx(vcpu);
3533 u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3537 if (to_vmx(vcpu)->rmode.vm86_active) {
3538 vmx_get_segment(vcpu, &s, seg);
3541 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3544 int vmx_get_cpl(struct kvm_vcpu *vcpu)
3546 struct vcpu_vmx *vmx = to_vmx(vcpu);
3573 void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3575 struct vcpu_vmx *vmx = to_vmx(vcpu);
3604 if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3610 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3612 __vmx_set_segment(vcpu, var, seg);
3614 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
3617 void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3619 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3625 void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3631 void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3637 void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3643 void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3649 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3654 vmx_get_segment(vcpu, &var, seg);
3670 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3675 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3698 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3703 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3720 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3725 vmx_get_segment(vcpu, &var, seg);
3745 static bool tr_valid(struct kvm_vcpu *vcpu)
3749 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3763 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3767 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3781 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3785 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3786 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3797 bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3800 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3801 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3803 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3805 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3807 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3809 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3811 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3815 if (!cs_ss_rpl_check(vcpu))
3817 if (!code_segment_valid(vcpu))
3819 if (!stack_segment_valid(vcpu))
3821 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3823 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3825 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3827 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3829 if (!tr_valid(vcpu))
3831 if (!ldtr_valid(vcpu))
3963 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3965 struct vcpu_vmx *vmx = to_vmx(vcpu);
3987 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
3993 !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
4005 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4007 struct vcpu_vmx *vmx = to_vmx(vcpu);
4035 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
4044 struct vcpu_vmx *vmx = to_vmx(vcpu);
4048 if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu)))
4055 if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
4074 msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4083 vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
4087 vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
4088 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
4089 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
4091 vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
4095 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
4097 struct vcpu_vmx *vmx = to_vmx(vcpu);
4101 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4102 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4103 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4104 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4106 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4107 vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4111 bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
4113 struct vcpu_vmx *vmx = to_vmx(vcpu);
4118 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
4119 !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
4131 void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
4133 struct vcpu_vmx *vmx = to_vmx(vcpu);
4151 vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
4154 vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
4159 pt_update_intercept_for_msr(vcpu);
4162 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4166 if (vcpu->mode == IN_GUEST_MODE) {
4193 if (vcpu != kvm_get_running_vcpu())
4194 __apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
4203 kvm_vcpu_wake_up(vcpu);
4206 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4209 struct vcpu_vmx *vmx = to_vmx(vcpu);
4211 if (is_guest_mode(vcpu) &&
4218 kvm_make_request(KVM_REQ_EVENT, vcpu);
4221 * This pairs with the smp_mb_*() after setting vcpu->mode in
4224 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as
4228 * vcpu->requests and the load from vcpu->mode.
4233 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
4239 * Send interrupt to vcpu via posted interrupt way.
4240 * 1. If target vcpu is running(non-root mode), send posted interrupt
4241 * notification to vcpu and hardware will sync PIR to vIRR atomically.
4242 * 2. If target vcpu isn't running(root mode), kick it to pick up the
4245 static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4247 struct vcpu_vmx *vmx = to_vmx(vcpu);
4250 r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4255 if (!vcpu->arch.apic->apicv_active)
4267 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
4269 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
4271 kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
4278 struct kvm_vcpu *vcpu = apic->vcpu;
4280 if (vmx_deliver_posted_interrupt(vcpu, vector)) {
4282 kvm_make_request(KVM_REQ_EVENT, vcpu);
4283 kvm_vcpu_kick(vcpu);
4285 trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4365 struct kvm_vcpu *vcpu = &vmx->vcpu;
4367 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4368 ~vcpu->arch.cr4_guest_rsvd_bits;
4370 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4371 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4373 if (is_guest_mode(&vmx->vcpu))
4374 vcpu->arch.cr4_guest_owned_bits &=
4375 ~get_vmcs12(vcpu)->cr4_guest_host_mask;
4376 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4383 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4438 void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4440 struct vcpu_vmx *vmx = to_vmx(vcpu);
4442 if (is_guest_mode(vcpu)) {
4449 if (kvm_vcpu_apicv_active(vcpu)) {
4463 vmx_update_msr_bitmap_x2apic(vcpu);
4483 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4486 if (!cpu_need_tpr_shadow(&vmx->vcpu))
4502 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4505 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4518 if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4570 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4592 struct kvm_vcpu *vcpu = &vmx->vcpu;
4598 if (!cpu_need_virtualize_apic_accesses(vcpu))
4609 if (kvm_pause_in_guest(vmx->vcpu.kvm))
4611 if (!kvm_vcpu_apicv_active(vcpu))
4638 if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4653 guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
4654 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
4669 if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4672 if (!kvm_notify_vmexit_enabled(vcpu->kvm))
4712 struct kvm *kvm = vmx->vcpu.kvm;
4738 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4750 if (vmx_can_use_ipiv(&vmx->vcpu)) {
4784 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4791 vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4792 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4807 vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4823 if (cpu_need_tpr_shadow(&vmx->vcpu))
4825 __pa(vmx->vcpu.arch.apic->regs));
4832 static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4834 struct vcpu_vmx *vmx = to_vmx(vcpu);
4841 vcpu_setup_sgx_lepubkeyhash(vcpu);
4851 vcpu->arch.microcode_version = 0x100000000ULL;
4862 void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4864 struct vcpu_vmx *vmx = to_vmx(vcpu);
4867 __vmx_vcpu_reset(vcpu);
4875 kvm_set_cr8(vcpu, 0);
4878 kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS);
4914 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4918 vmx_update_fb_clear_dis(vcpu, vmx);
4921 void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
4923 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4926 void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
4930 vmx_enable_irq_window(vcpu);
4934 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4937 void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
4939 struct vcpu_vmx *vmx = to_vmx(vcpu);
4941 int irq = vcpu->arch.interrupt.nr;
4943 trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
4945 ++vcpu->stat.irq_injections;
4948 if (vcpu->arch.interrupt.soft)
4949 inc_eip = vcpu->arch.event_exit_inst_len;
4950 kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
4954 if (vcpu->arch.interrupt.soft) {
4957 vmx->vcpu.arch.event_exit_inst_len);
4962 vmx_clear_hlt(vcpu);
4965 void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4967 struct vcpu_vmx *vmx = to_vmx(vcpu);
4982 ++vcpu->stat.nmi_injections;
4986 kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
4993 vmx_clear_hlt(vcpu);
4996 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4998 struct vcpu_vmx *vmx = to_vmx(vcpu);
5010 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5012 struct vcpu_vmx *vmx = to_vmx(vcpu);
5030 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
5032 if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5035 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5043 int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5045 if (to_vmx(vcpu)->nested.nested_run_pending)
5049 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5052 return !vmx_nmi_blocked(vcpu);
5055 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5057 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5060 return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5065 int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5067 if (to_vmx(vcpu)->nested.nested_run_pending)
5074 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5077 return !vmx_interrupt_blocked(vcpu);
5106 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5114 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5116 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5120 return !(vcpu->guest_debug &
5135 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5143 if (kvm_emulate_instruction(vcpu, 0)) {
5144 if (vcpu->arch.halt_request) {
5145 vcpu->arch.halt_request = 0;
5146 return kvm_emulate_halt_noskip(vcpu);
5158 kvm_queue_exception(vcpu, vec);
5162 static int handle_machine_check(struct kvm_vcpu *vcpu)
5179 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
5184 return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
5185 (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5188 static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5190 struct vcpu_vmx *vmx = to_vmx(vcpu);
5191 struct kvm_run *kvm_run = vcpu->run;
5197 intr_info = vmx_get_intr_info(vcpu);
5214 kvm_queue_exception(vcpu, NM_VECTOR);
5219 return handle_ud(vcpu);
5221 if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm))
5237 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
5240 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
5250 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5251 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5252 vcpu->run->internal.ndata = 4;
5253 vcpu->run->internal.data[0] = vect_info;
5254 vcpu->run->internal.data[1] = intr_info;
5255 vcpu->run->internal.data[2] = error_code;
5256 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5261 cr2 = vmx_get_exit_qual(vcpu);
5262 if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
5268 kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5271 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
5276 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5277 return handle_rmode_exception(vcpu, ex_no, error_code);
5281 dr6 = vmx_get_exit_qual(vcpu);
5282 if (!(vcpu->guest_debug &
5307 WARN_ON(!skip_emulated_instruction(vcpu));
5308 else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
5314 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
5326 vmx->vcpu.arch.event_exit_inst_len =
5329 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5333 if (vmx_guest_inject_ac(vcpu)) {
5334 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5343 if (handle_guest_split_lock(kvm_rip_read(vcpu)))
5355 static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
5357 ++vcpu->stat.irq_exits;
5361 static int handle_triple_fault(struct kvm_vcpu *vcpu)
5363 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5364 vcpu->mmio_needed = 0;
5368 static int handle_io(struct kvm_vcpu *vcpu)
5374 exit_qualification = vmx_get_exit_qual(vcpu);
5377 ++vcpu->stat.io_exits;
5380 return kvm_emulate_instruction(vcpu, 0);
5386 return kvm_fast_pio(vcpu, size, port, in);
5389 void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5400 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5402 if (is_guest_mode(vcpu)) {
5403 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5417 if (kvm_set_cr0(vcpu, val))
5422 return kvm_set_cr0(vcpu, val);
5426 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5428 if (is_guest_mode(vcpu)) {
5429 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5435 if (kvm_set_cr4(vcpu, val))
5440 return kvm_set_cr4(vcpu, val);
5443 static int handle_desc(struct kvm_vcpu *vcpu)
5451 WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP));
5452 return kvm_emulate_instruction(vcpu, 0);
5455 static int handle_cr(struct kvm_vcpu *vcpu)
5463 exit_qualification = vmx_get_exit_qual(vcpu);
5468 val = kvm_register_read(vcpu, reg);
5472 err = handle_set_cr0(vcpu, val);
5473 return kvm_complete_insn_gp(vcpu, err);
5477 err = kvm_set_cr3(vcpu, val);
5478 return kvm_complete_insn_gp(vcpu, err);
5480 err = handle_set_cr4(vcpu, val);
5481 return kvm_complete_insn_gp(vcpu, err);
5483 u8 cr8_prev = kvm_get_cr8(vcpu);
5485 err = kvm_set_cr8(vcpu, cr8);
5486 ret = kvm_complete_insn_gp(vcpu, err);
5487 if (lapic_in_kernel(vcpu))
5496 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5502 KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
5509 val = kvm_read_cr3(vcpu);
5510 kvm_register_write(vcpu, reg, val);
5512 return kvm_skip_emulated_instruction(vcpu);
5514 val = kvm_get_cr8(vcpu);
5515 kvm_register_write(vcpu, reg, val);
5517 return kvm_skip_emulated_instruction(vcpu);
5522 trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
5523 kvm_lmsw(vcpu, val);
5525 return kvm_skip_emulated_instruction(vcpu);
5529 vcpu->run->exit_reason = 0;
5530 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5535 static int handle_dr(struct kvm_vcpu *vcpu)
5541 exit_qualification = vmx_get_exit_qual(vcpu);
5545 if (!kvm_require_dr(vcpu, dr))
5548 if (vmx_get_cpl(vcpu) > 0)
5558 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5559 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5560 vcpu->run->debug.arch.dr7 = dr7;
5561 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5562 vcpu->run->debug.arch.exception = DB_VECTOR;
5563 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5566 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5571 if (vcpu->guest_debug == 0) {
5572 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5579 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5585 kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr));
5588 err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
5592 return kvm_complete_insn_gp(vcpu, err);
5595 void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5597 get_debugreg(vcpu->arch.db[0], 0);
5598 get_debugreg(vcpu->arch.db[1], 1);
5599 get_debugreg(vcpu->arch.db[2], 2);
5600 get_debugreg(vcpu->arch.db[3], 3);
5601 get_debugreg(vcpu->arch.dr6, 6);
5602 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5604 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5605 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5614 void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5619 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5621 kvm_apic_update_ppr(vcpu);
5625 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5627 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5629 kvm_make_request(KVM_REQ_EVENT, vcpu);
5631 ++vcpu->stat.irq_window_exits;
5635 static int handle_invlpg(struct kvm_vcpu *vcpu)
5637 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5639 kvm_mmu_invlpg(vcpu, exit_qualification);
5640 return kvm_skip_emulated_instruction(vcpu);
5643 static int handle_apic_access(struct kvm_vcpu *vcpu)
5646 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5658 kvm_lapic_set_eoi(vcpu);
5659 return kvm_skip_emulated_instruction(vcpu);
5662 return kvm_emulate_instruction(vcpu, 0);
5665 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5667 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5671 kvm_apic_set_eoi_accelerated(vcpu, vector);
5675 static int handle_apic_write(struct kvm_vcpu *vcpu)
5677 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5688 kvm_apic_write_nodecode(vcpu, offset);
5692 static int handle_task_switch(struct kvm_vcpu *vcpu)
5694 struct vcpu_vmx *vmx = to_vmx(vcpu);
5705 exit_qualification = vmx_get_exit_qual(vcpu);
5711 vcpu->arch.nmi_injected = false;
5712 vmx_set_nmi_mask(vcpu, true);
5716 kvm_clear_interrupt_queue(vcpu);
5727 kvm_clear_exception_queue(vcpu);
5738 WARN_ON(!skip_emulated_instruction(vcpu));
5744 return kvm_task_switch(vcpu, tss_selector,
5749 static int handle_ept_violation(struct kvm_vcpu *vcpu)
5755 exit_qualification = vmx_get_exit_qual(vcpu);
5763 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5769 trace_kvm_page_fault(vcpu, gpa, exit_qualification);
5795 if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa)))
5796 return kvm_emulate_instruction(vcpu, 0);
5798 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5801 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5805 if (vmx_check_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5813 if (!is_guest_mode(vcpu) &&
5814 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5816 return kvm_skip_emulated_instruction(vcpu);
5819 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5822 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5824 if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
5827 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5828 ++vcpu->stat.nmi_window_exits;
5829 kvm_make_request(KVM_REQ_EVENT, vcpu);
5834 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
5836 struct vcpu_vmx *vmx = to_vmx(vcpu);
5839 (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
5842 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5844 struct vcpu_vmx *vmx = to_vmx(vcpu);
5852 if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
5853 return handle_interrupt_window(&vmx->vcpu);
5855 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
5858 if (!kvm_emulate_instruction(vcpu, 0))
5861 if (vmx_emulation_required_with_pending_exception(vcpu)) {
5862 kvm_prepare_emulation_failure_exit(vcpu);
5866 if (vcpu->arch.halt_request) {
5867 vcpu->arch.halt_request = 0;
5868 return kvm_emulate_halt_noskip(vcpu);
5883 int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
5885 if (vmx_emulation_required_with_pending_exception(vcpu)) {
5886 kvm_prepare_emulation_failure_exit(vcpu);
5893 static void grow_ple_window(struct kvm_vcpu *vcpu)
5895 struct vcpu_vmx *vmx = to_vmx(vcpu);
5904 trace_kvm_ple_window_update(vcpu->vcpu_id,
5909 static void shrink_ple_window(struct kvm_vcpu *vcpu)
5911 struct vcpu_vmx *vmx = to_vmx(vcpu);
5920 trace_kvm_ple_window_update(vcpu->vcpu_id,
5926 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5929 static int handle_pause(struct kvm_vcpu *vcpu)
5931 if (!kvm_pause_in_guest(vcpu->kvm))
5932 grow_ple_window(vcpu);
5938 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5940 kvm_vcpu_on_spin(vcpu, true);
5941 return kvm_skip_emulated_instruction(vcpu);
5944 static int handle_monitor_trap(struct kvm_vcpu *vcpu)
5949 static int handle_invpcid(struct kvm_vcpu *vcpu)
5960 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
5961 kvm_queue_exception(vcpu, UD_VECTOR);
5967 type = kvm_register_read(vcpu, gpr_index);
5972 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5977 return kvm_handle_invpcid(vcpu, type, gva);
5980 static int handle_pml_full(struct kvm_vcpu *vcpu)
5984 trace_kvm_pml_full(vcpu->vcpu_id);
5986 exit_qualification = vmx_get_exit_qual(vcpu);
5992 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
6005 static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu,
6008 struct vcpu_vmx *vmx = to_vmx(vcpu);
6029 if (is_guest_mode(vcpu))
6032 kvm_lapic_expired_hv_timer(vcpu);
6036 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
6043 WARN_ON_ONCE(!is_guest_mode(vcpu));
6044 kvm_lapic_expired_hv_timer(vcpu);
6052 static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
6054 kvm_queue_exception(vcpu, UD_VECTOR);
6059 static int handle_encls(struct kvm_vcpu *vcpu)
6066 kvm_queue_exception(vcpu, UD_VECTOR);
6071 static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
6078 to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
6082 static int handle_notify(struct kvm_vcpu *vcpu)
6084 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
6087 ++vcpu->stat.notify_window_exits;
6097 if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6099 vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6100 vcpu->run->notify.flags = context_invalid ?
6113 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6171 void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
6174 struct vcpu_vmx *vmx = to_vmx(vcpu);
6177 *info1 = vmx_get_exit_qual(vcpu);
6180 *intr_info = vmx_get_intr_info(vcpu);
6200 static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
6202 struct vcpu_vmx *vmx = to_vmx(vcpu);
6224 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
6257 void dump_vmcs(struct kvm_vcpu *vcpu)
6259 struct vcpu_vmx *vmx = to_vmx(vcpu);
6288 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6327 vcpu->arch.efer | (EFER_LMA | EFER_LME));
6330 vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6454 static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6456 struct vcpu_vmx *vmx = to_vmx(vcpu);
6469 if (enable_pml && !is_guest_mode(vcpu))
6470 vmx_flush_pml_buffer(vcpu);
6478 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6481 if (is_guest_mode(vcpu)) {
6500 nested_mark_vmcs12_pages_dirty(vcpu);
6514 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
6518 if (nested_vmx_reflect_vmexit(vcpu))
6524 return handle_invalid_guest_state(vcpu);
6527 dump_vmcs(vcpu);
6528 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6529 vcpu->run->fail_entry.hardware_entry_failure_reason
6531 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6536 dump_vmcs(vcpu);
6537 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6538 vcpu->run->fail_entry.hardware_entry_failure_reason
6540 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6560 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6561 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6562 vcpu->run->internal.data[0] = vectoring_info;
6563 vcpu->run->internal.data[1] = exit_reason.full;
6564 vcpu->run->internal.data[2] = vmx_get_exit_qual(vcpu);
6566 vcpu->run->internal.data[ndata++] =
6569 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6570 vcpu->run->internal.ndata = ndata;
6576 if (!vmx_interrupt_blocked(vcpu)) {
6579 vcpu->arch.nmi_pending) {
6588 __func__, vcpu->vcpu_id);
6600 return kvm_emulate_wrmsr(vcpu);
6602 return handle_preemption_timer(vcpu);
6604 return handle_interrupt_window(vcpu);
6606 return handle_external_interrupt(vcpu);
6608 return kvm_emulate_halt(vcpu);
6610 return handle_ept_misconfig(vcpu);
6618 return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6621 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6623 dump_vmcs(vcpu);
6624 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6625 vcpu->run->internal.suberror =
6627 vcpu->run->internal.ndata = 2;
6628 vcpu->run->internal.data[0] = exit_reason.full;
6629 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6633 int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6635 int ret = __vmx_handle_exit(vcpu, exit_fastpath);
6641 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
6643 vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6645 vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6661 static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
6673 * Clear the per-vcpu flush bit, it gets set again
6677 flush_l1d = vcpu->arch.l1tf_flush_l1d;
6678 vcpu->arch.l1tf_flush_l1d = false;
6691 vcpu->stat.l1d_flush++;
6721 void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6723 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6726 if (is_guest_mode(vcpu) &&
6731 if (is_guest_mode(vcpu))
6732 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6737 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6739 struct vcpu_vmx *vmx = to_vmx(vcpu);
6742 if (!lapic_in_kernel(vcpu))
6750 if (is_guest_mode(vcpu)) {
6759 switch (kvm_get_apic_mode(vcpu)) {
6769 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6777 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6788 vmx_update_msr_bitmap_x2apic(vcpu);
6791 void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6794 struct kvm *kvm = vcpu->kvm;
6801 if (is_guest_mode(vcpu)) {
6802 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6806 if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6838 read_lock(&vcpu->kvm->mmu_lock);
6840 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6841 read_unlock(&vcpu->kvm->mmu_lock);
6846 read_unlock(&vcpu->kvm->mmu_lock);
6894 void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6904 if (!is_guest_mode(vcpu))
6908 int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6910 struct vcpu_vmx *vmx = to_vmx(vcpu);
6914 if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
6925 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6927 max_irr = kvm_lapic_find_highest_irr(vcpu);
6946 if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
6949 kvm_make_request(KVM_REQ_EVENT, vcpu);
6954 void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6956 if (!kvm_vcpu_apicv_active(vcpu))
6965 void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
6967 struct vcpu_vmx *vmx = to_vmx(vcpu);
6976 static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
6992 if (vcpu->arch.guest_fpu.fpstate->xfd)
6993 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
6996 static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
7000 vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
7003 handle_nm_fault_irqoff(vcpu);
7009 static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
7014 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
7018 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
7023 kvm_after_interrupt(vcpu);
7025 vcpu->arch.at_instruction_boundary = true;
7028 void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
7030 struct vcpu_vmx *vmx = to_vmx(vcpu);
7036 handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
7038 handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
7080 exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7107 static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7118 vcpu->arch.nmi_injected = false;
7119 kvm_clear_exception_queue(vcpu);
7120 kvm_clear_interrupt_queue(vcpu);
7125 kvm_make_request(KVM_REQ_EVENT, vcpu);
7132 vcpu->arch.nmi_injected = true;
7138 vmx_set_nmi_mask(vcpu, false);
7141 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7146 kvm_requeue_exception_e(vcpu, vector, err);
7148 kvm_requeue_exception(vcpu, vector);
7151 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7154 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7163 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7168 void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7170 __vmx_complete_interrupts(vcpu,
7182 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7201 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7203 struct vcpu_vmx *vmx = to_vmx(vcpu);
7260 static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
7267 if (is_guest_mode(vcpu) &&
7268 to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_PREEMPTION_TIMER)
7271 switch (to_vmx(vcpu)->exit_reason.basic) {
7273 return handle_fastpath_set_msr_irqoff(vcpu);
7275 return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
7281 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7284 struct vcpu_vmx *vmx = to_vmx(vcpu);
7295 vmx_l1d_flush(vcpu);
7297 kvm_arch_has_assigned_device(vcpu->kvm))
7302 if (vcpu->arch.cr2 != native_read_cr2())
7303 native_write_cr2(vcpu->arch.cr2);
7305 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7308 vcpu->arch.cr2 = native_read_cr2();
7309 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7325 is_nmi(vmx_get_intr_info(vcpu))) {
7326 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
7331 kvm_after_interrupt(vcpu);
7338 fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
7340 struct vcpu_vmx *vmx = to_vmx(vcpu);
7343 /* Record the guest's net vcpu time for enforced NMI injections. */
7358 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
7360 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
7365 trace_kvm_entry(vcpu, force_immediate_exit);
7378 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
7379 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7380 if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
7381 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7382 vcpu->arch.regs_dirty = 0;
7404 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
7405 set_debugreg(vcpu->arch.dr6, 6);
7412 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7413 vmx_set_interrupt_shadow(vcpu, 0);
7415 kvm_load_guest_xsave_state(vcpu);
7420 if (intel_pmu_lbr_is_enabled(vcpu))
7421 vmx_passthrough_lbr_msrs(vcpu);
7424 vmx_update_hv_timer(vcpu, force_immediate_exit);
7426 smp_send_reschedule(vcpu->cpu);
7428 kvm_wait_lapic_expire(vcpu);
7431 vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7438 current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
7460 kvm_load_host_xsave_state(vcpu);
7462 if (is_guest_mode(vcpu)) {
7469 ++vcpu->stat.nested_run;
7480 trace_kvm_exit(vcpu, KVM_ISA_VMX);
7490 return vmx_exit_handlers_fastpath(vcpu, force_immediate_exit);
7493 void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7495 struct vcpu_vmx *vmx = to_vmx(vcpu);
7500 nested_vmx_free_vcpu(vcpu);
7505 int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7511 BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
7512 vmx = to_vmx(vcpu);
7522 * of creating the vcpu, therefore we can simplify PML logic (by
7566 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
7568 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
7569 vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
7570 vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
7572 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
7573 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
7574 vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
7575 if (kvm_cstate_in_guest(vcpu->kvm)) {
7576 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
7577 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
7578 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
7579 vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
7584 if (cpu_need_virtualize_apic_accesses(vcpu)) {
7585 err = kvm_alloc_apic_access_page(vcpu->kvm);
7591 err = init_rmode_identity_map(vcpu->kvm);
7610 if (vmx_can_use_ipiv(vcpu))
7611 WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
7659 u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7682 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
7685 if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
7686 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
7693 return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
7719 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7721 struct vcpu_vmx *vmx = to_vmx(vcpu);
7732 entry = kvm_find_cpuid_entry(vcpu, 0x1);
7748 entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
7756 entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
7762 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7764 struct vcpu_vmx *vmx = to_vmx(vcpu);
7769 best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
7831 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7833 struct vcpu_vmx *vmx = to_vmx(vcpu);
7841 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
7842 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
7844 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
7845 kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
7853 if (guest_can_use(vcpu, X86_FEATURE_VMX))
7862 if (guest_can_use(vcpu, X86_FEATURE_VMX))
7863 nested_vmx_cr_fixed1_bits_update(vcpu);
7866 guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
7867 update_intel_pt_cfg(vcpu);
7873 bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
7879 vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
7880 !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
7883 vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
7884 !guest_has_pred_cmd_msr(vcpu));
7887 vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
7888 !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
7892 vmx_write_encls_bitmap(vcpu, NULL);
7893 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
7898 if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
7906 vmx_update_exception_bitmap(vcpu);
8012 static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
8015 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8040 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
8046 int vmx_check_intercept(struct kvm_vcpu *vcpu,
8051 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
8071 return vmx_check_intercept_io(vcpu, info);
8129 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
8134 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8136 vmx = to_vmx(vcpu);
8138 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
8140 lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
8149 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
8152 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8169 void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
8171 to_vmx(vcpu)->hv_deadline_tsc = -1;
8175 void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
8177 if (!kvm_pause_in_guest(vcpu->kvm))
8178 shrink_ple_window(vcpu);
8181 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
8183 struct vcpu_vmx *vmx = to_vmx(vcpu);
8188 if (is_guest_mode(vcpu)) {
8198 if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8204 void vmx_setup_mce(struct kvm_vcpu *vcpu)
8206 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8207 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
8210 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
8215 int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
8218 if (to_vmx(vcpu)->nested.nested_run_pending)
8220 return !is_smm(vcpu);
8223 int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
8225 struct vcpu_vmx *vmx = to_vmx(vcpu);
8234 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8236 nested_vmx_vmexit(vcpu, -1, 0, 0);
8240 vmx_clear_hlt(vcpu);
8244 int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
8246 struct vcpu_vmx *vmx = to_vmx(vcpu);
8255 ret = nested_vmx_enter_non_root_mode(vcpu, false);
8265 void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
8271 bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
8273 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
8276 void vmx_migrate_timers(struct kvm_vcpu *vcpu)
8278 if (is_guest_mode(vcpu)) {
8279 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
8310 gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
8318 if (!is_64_bit_mode(vcpu))
8326 cr3_bits = kvm_get_active_cr3_lam_bits(vcpu);
8333 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP))
8336 lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47;
8349 struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8352 if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
8355 kvm_make_request(KVM_REQ_PMI, vcpu);
8357 (unsigned long *)&vcpu->arch.pmu.global_status);