Lines Matching refs:nested

13 #include "nested.h"
183 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
196 if (vmx->nested.current_vmptr == INVALID_GPA &&
207 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator);
224 vmx->nested.need_vmcs12_to_shadow_sync = false;
234 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
235 vmx->nested.hv_evmcs = NULL;
238 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
241 hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
242 hv_vcpu->nested.vm_id = 0;
243 hv_vcpu->nested.vp_id = 0;
260 * vmx->nested.hv_evmcs but this shouldn't be a problem.
266 if (nested_vmx_evmcs(vmx) && vmptr == vmx->nested.hv_evmcs_vmptr)
320 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
330 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
335 vmx->nested.vmxon = false;
336 vmx->nested.smm.vmxon = false;
337 vmx->nested.vmxon_ptr = INVALID_GPA;
338 free_vpid(vmx->nested.vpid02);
339 vmx->nested.posted_intr_nv = -1;
340 vmx->nested.current_vmptr = INVALID_GPA;
347 kfree(vmx->nested.cached_vmcs12);
348 vmx->nested.cached_vmcs12 = NULL;
349 kfree(vmx->nested.cached_shadow_vmcs12);
350 vmx->nested.cached_shadow_vmcs12 = NULL;
356 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
357 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
358 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
359 vmx->nested.pi_desc = NULL;
365 free_loaded_vmcs(&vmx->nested.vmcs02);
415 if (vmx->nested.pml_full) {
417 vmx->nested.pml_full = false;
443 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
444 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
603 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
604 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
619 if (!vmx->nested.force_msr_bitmap_recalc) {
694 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
696 vmx->nested.force_msr_bitmap_recalc = false;
705 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
724 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
740 * In nested virtualization, check if L1 has set
923 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
924 vmx->nested.msrs.misc_high);
930 * Load guest's/host's msr at nested entry/exit.
1110 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1157 * while L2 entries are tagged with vmx->nested.vpid02).
1164 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1183 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1205 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1206 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1236 u64 vmx_basic = vmcs_config.nested.basic;
1255 vmx->nested.msrs.basic = data;
1294 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1306 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1320 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1321 vmcs_config.nested.misc_high);
1326 if ((vmx->nested.msrs.pinbased_ctls_high &
1341 vmx->nested.msrs.misc_low = data;
1342 vmx->nested.msrs.misc_high = data >> 32;
1349 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1350 vmcs_config.nested.vpid_caps);
1356 vmx->nested.msrs.ept_caps = data;
1357 vmx->nested.msrs.vpid_caps = data >> 32;
1375 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1384 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1401 if (vmx->nested.vmxon)
1442 vmx->nested.msrs.vmcs_enum = data;
1445 if (data & ~vmcs_config.nested.vmfunc_controls)
1447 vmx->nested.msrs.vmfunc_controls = data;
1607 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1617 hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
1618 hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
1619 hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
1855 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
2031 * This is an equivalent of the nested hypervisor executing the vmptrld
2051 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
2052 vmx->nested.current_vmptr = INVALID_GPA;
2057 &vmx->nested.hv_evmcs_map))
2060 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
2084 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2085 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2090 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2112 vmx->nested.hv_evmcs->hv_clean_fields &=
2115 vmx->nested.force_msr_bitmap_recalc = true;
2133 vmx->nested.need_vmcs12_to_shadow_sync = false;
2139 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2141 vmx->nested.preemption_timer_expired = true;
2156 if (!vmx->nested.has_preemption_timer_deadline) {
2157 vmx->nested.preemption_timer_deadline =
2159 vmx->nested.has_preemption_timer_deadline = true;
2161 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2174 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2184 hrtimer_start(&vmx->nested.preemption_timer,
2191 if (vmx->nested.nested_run_pending &&
2210 if (vmx->nested.vmcs02_initialized)
2212 vmx->nested.vmcs02_initialized = true;
2231 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2269 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2270 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2282 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx))
2293 vmx->nested.pi_pending = false;
2295 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2309 vmx->nested.l1_tpr_threshold = -1;
2425 if (vmx->nested.nested_run_pending) {
2506 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2560 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2578 if (vmx->nested.dirty_vmcs12 || nested_vmx_is_evmptr12_valid(vmx)) {
2580 vmx->nested.dirty_vmcs12 = false;
2586 if (vmx->nested.nested_run_pending &&
2592 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl);
2594 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2596 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs);
2607 if (vmx->nested.nested_run_pending &&
2654 * loading nested state after migration, it is possible to
2670 * on nested VM-Exit, which can occur without actually running L2 and
2730 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2734 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2744 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2748 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2761 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2777 vmx->nested.msrs.pinbased_ctls_low,
2778 vmx->nested.msrs.pinbased_ctls_high)) ||
2780 vmx->nested.msrs.procbased_ctls_low,
2781 vmx->nested.msrs.procbased_ctls_high)))
2786 vmx->nested.msrs.secondary_ctls_low,
2787 vmx->nested.msrs.secondary_ctls_high)))
2814 ~vmx->nested.msrs.vmfunc_controls))
2836 vmx->nested.msrs.exit_ctls_low,
2837 vmx->nested.msrs.exit_ctls_high)) ||
2853 vmx->nested.msrs.entry_ctls_low,
2854 vmx->nested.msrs.entry_ctls_high)))
3019 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
3104 if (to_vmx(vcpu)->nested.nested_run_pending &&
3213 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
3225 vmx->nested.need_vmcs12_to_shadow_sync = true;
3242 * the guest CR3 might be restored prior to setting the nested
3251 map = &vmx->nested.apic_access_page_map;
3267 map = &vmx->nested.virtual_apic_map;
3293 map = &vmx->nested.pi_desc_map;
3296 vmx->nested.pi_desc =
3308 vmx->nested.pi_desc = NULL;
3356 if (WARN_ON_ONCE(vmx->nested.pml_full))
3360 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3368 vmx->nested.pml_full = true;
3393 if (!to_vmx(vcpu)->nested.vmxon) {
3441 vmx->nested.current_vmptr,
3459 if (!vmx->nested.nested_run_pending ||
3461 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3463 (!vmx->nested.nested_run_pending ||
3465 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3469 * nested early checks are disabled. In the event of a "late" VM-Fail,
3475 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3478 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3486 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3551 vmx->nested.preemption_timer_expired = false;
3584 vmx->nested.need_vmcs12_to_shadow_sync = true;
3589 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3590 * for running an L2 nested guest.
3615 vmx->nested.current_vmptr == INVALID_GPA))
3640 * The nested entry process starts with enforcing various prerequisites
3668 * the nested entry.
3670 vmx->nested.nested_run_pending = 1;
3671 vmx->nested.has_preemption_timer_deadline = false;
3678 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3679 vmx->nested.pi_pending = true;
3681 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3684 /* Hide L1D cache contents from the nested guest. */
3710 vmx->nested.nested_run_pending = 0;
3715 vmx->nested.nested_run_pending = 0;
3725 vmx->nested.nested_run_pending = 0;
3735 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3866 if (!vmx->nested.pi_pending)
3869 if (!vmx->nested.pi_desc)
3872 vmx->nested.pi_pending = false;
3874 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3877 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3879 vapic_page = vmx->nested.virtual_apic_map.hva;
3883 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3930 * hardware and avoid inducing failure on nested VM-Entry if L1
4006 to_vmx(vcpu)->nested.preemption_timer_expired;
4012 to_vmx(vcpu)->nested.mtf_pending;
4103 * Only a pending nested run blocks a pending exception. If there is a
4107 bool block_nested_exceptions = vmx->nested.nested_run_pending;
4127 vmx->nested.mtf_pending = false;
4171 if (vmx->nested.mtf_pending) {
4240 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
4340 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4349 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4356 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4362 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4379 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4404 !vmx->nested.nested_run_pending)
4409 * In some cases (usually, nested EPT), L2 is allowed to change its
4443 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4501 * A part of what we need to when the nested L2 guest exits and we want to
4504 * This function is to be called not only on normal nested exit, but also on
4505 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4672 * nested VMENTER (not worth adding a variable in nested_vmx).
4710 * of VMFail), leaving the nested VM's MSRs in the software model
4713 * MSR that was (prematurely) loaded from the nested VMEntry load
4763 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4774 vmx->nested.mtf_pending = false;
4777 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4805 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4873 if (vmx->nested.l1_tpr_threshold != -1)
4874 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4876 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4877 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4881 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4882 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4887 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
4888 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4889 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4890 vmx->nested.pi_desc = NULL;
4892 if (vmx->nested.reload_vmcs01_apic_access_page) {
4893 vmx->nested.reload_vmcs01_apic_access_page = false;
4897 if (vmx->nested.update_vmcs01_apicv_status) {
4898 vmx->nested.update_vmcs01_apicv_status = false;
4904 vmx->nested.need_vmcs12_to_shadow_sync = true;
5123 * when L1 executes VMXOFF or the vCPU is forced out of nested
5144 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
5148 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5149 if (!vmx->nested.cached_vmcs12)
5152 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
5153 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5154 if (!vmx->nested.cached_shadow_vmcs12)
5160 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
5162 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
5164 vmx->nested.vpid02 = allocate_vpid();
5166 vmx->nested.vmcs02_initialized = false;
5167 vmx->nested.vmxon = true;
5177 kfree(vmx->nested.cached_shadow_vmcs12);
5180 kfree(vmx->nested.cached_vmcs12);
5183 free_loaded_vmcs(&vmx->nested.vmcs02);
5235 if (vmx->nested.vmxon)
5263 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
5273 vmx->nested.vmxon_ptr = vmptr;
5285 if (vmx->nested.current_vmptr == INVALID_GPA)
5296 vmx->nested.posted_intr_nv = -1;
5300 vmx->nested.current_vmptr >> PAGE_SHIFT,
5301 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5305 vmx->nested.current_vmptr = INVALID_GPA;
5339 if (vmptr == vmx->nested.vmxon_ptr)
5343 if (vmptr == vmx->nested.current_vmptr)
5402 if (vmx->nested.current_vmptr == INVALID_GPA ||
5512 if (vmx->nested.current_vmptr == INVALID_GPA ||
5584 vmx->nested.dirty_vmcs12 = true;
5592 vmx->nested.current_vmptr = vmptr;
5597 vmx->nested.need_vmcs12_to_shadow_sync = true;
5599 vmx->nested.dirty_vmcs12 = true;
5600 vmx->nested.force_msr_bitmap_recalc = true;
5619 if (vmptr == vmx->nested.vmxon_ptr)
5626 if (vmx->nested.current_vmptr != vmptr) {
5627 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5661 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5678 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5715 if (!(vmx->nested.msrs.secondary_ctls_high &
5717 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5729 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5796 if (!(vmx->nested.msrs.secondary_ctls_high &
5798 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5810 types = (vmx->nested.msrs.vpid_caps &
5916 * VMFUNC for nested VMs, but not for L1.
5950 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
6218 * L0 always deals with the EPT violation. If nested EPT is
6219 * used, and the nested mmu code discovers that the address is
6400 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6403 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6470 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6471 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6472 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6487 if (vmx->nested.smm.vmxon)
6490 if (vmx->nested.smm.guest_mode)
6496 if (vmx->nested.nested_run_pending)
6499 if (vmx->nested.mtf_pending)
6503 vmx->nested.has_preemption_timer_deadline) {
6507 vmx->nested.preemption_timer_deadline;
6533 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6571 to_vmx(vcpu)->nested.nested_run_pending = 0;
6645 !vmx->nested.enlightened_vmcs_enabled))
6653 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6683 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6691 vmx->nested.smm.vmxon = true;
6692 vmx->nested.vmxon = false;
6695 vmx->nested.smm.guest_mode = true;
6708 vmx->nested.nested_run_pending =
6711 vmx->nested.mtf_pending =
6736 vmx->nested.has_preemption_timer_deadline = false;
6738 vmx->nested.has_preemption_timer_deadline = true;
6739 vmx->nested.preemption_timer_deadline =
6748 vmx->nested.dirty_vmcs12 = true;
6749 vmx->nested.force_msr_bitmap_recalc = true;
6754 if (vmx->nested.mtf_pending)
6760 vmx->nested.nested_run_pending = 0;
6932 /* nested EPT: emulate EPT also to L1 */
7037 * returned for the various VMX controls MSRs when nested VMX is enabled.
7039 * valid during nested entry from L1 to L2.
7047 struct nested_vmx_msrs *msrs = &vmcs_conf->nested;