Lines Matching defs:svm

31 #include "svm.h"
39 struct vcpu_svm *svm = to_svm(vcpu);
40 struct vmcb *vmcb = svm->vmcb;
56 nested_svm_vmexit(svm);
61 struct vcpu_svm *svm = to_svm(vcpu);
62 u64 cr3 = svm->nested.ctl.nested_cr3;
75 struct vcpu_svm *svm = to_svm(vcpu);
77 return svm->nested.ctl.nested_cr3;
82 struct vcpu_svm *svm = to_svm(vcpu);
93 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
94 svm->vmcb01.ptr->save.efer,
95 svm->nested.ctl.nested_cr3);
108 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
110 if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
113 if (!nested_npt_enabled(svm))
116 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
122 void recalc_intercepts(struct vcpu_svm *svm)
128 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
130 if (!is_guest_mode(&svm->vcpu))
133 c = &svm->vmcb->control;
134 h = &svm->vmcb01.ptr->control;
135 g = &svm->nested.ctl;
152 if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
160 if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
170 if (nested_vmcb_needs_vls_intercept(svm)) {
188 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
201 if (!svm->nested.force_msr_bitmap_recalc) {
202 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
204 if (kvm_hv_hypercall_enabled(&svm->vcpu) &&
206 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
211 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
227 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
229 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
232 svm->nested.msrpm[p] = svm->msrpm[p] | value;
235 svm->nested.force_msr_bitmap_recalc = false;
240 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
321 struct vcpu_svm *svm = to_svm(vcpu);
322 struct vmcb_save_area_cached *save = &svm->nested.save;
329 struct vcpu_svm *svm = to_svm(vcpu);
330 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
382 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
385 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
404 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
407 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
414 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
417 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
418 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
430 if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
431 !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
434 if (nested_vgif_enabled(svm))
437 if (nested_vnmi_enabled(svm))
440 svm->nested.ctl.int_ctl &= ~mask;
441 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
448 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
451 struct kvm_vcpu *vcpu = &svm->vcpu;
528 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
530 if (!svm->nested.vmcb02.ptr)
534 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
537 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
540 struct vmcb *vmcb01 = svm->vmcb01.ptr;
541 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
542 struct kvm_vcpu *vcpu = &svm->vcpu;
544 nested_vmcb02_compute_g_pat(svm);
547 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
549 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
550 svm->nested.force_msr_bitmap_recalc = true;
570 svm_set_efer(vcpu, svm->nested.save.efer);
572 svm_set_cr0(vcpu, svm->nested.save.cr0);
573 svm_set_cr4(vcpu, svm->nested.save.cr4);
575 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
588 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
589 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
594 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
601 svm_update_lbrv(&svm->vcpu);
632 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
639 struct kvm_vcpu *vcpu = &svm->vcpu;
640 struct vmcb *vmcb01 = svm->vmcb01.ptr;
641 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
651 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
658 svm->vcpu.arch.nmi_pending++;
659 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
661 if (nested_vnmi_enabled(svm))
678 if (nested_npt_enabled(svm))
683 svm->nested.ctl.tsc_offset,
684 svm->tsc_ratio_msr);
689 svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
693 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
696 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
697 vmcb02->control.int_state = svm->nested.ctl.int_state;
698 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
699 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
710 vmcb02->control.next_rip = svm->nested.ctl.next_rip;
714 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
716 svm->soft_int_injected = true;
717 svm->soft_int_csbase = vmcb12_csbase;
718 svm->soft_int_old_rip = vmcb12_rip;
720 svm->soft_int_next_rip = svm->nested.ctl.next_rip;
722 svm->soft_int_next_rip = vmcb12_rip;
729 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
731 if (!nested_vmcb_needs_vls_intercept(svm))
735 pause_count12 = svm->nested.ctl.pause_filter_count;
739 pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
742 if (kvm_pause_in_guest(svm->vcpu.kvm)) {
753 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
770 recalc_intercepts(svm);
788 struct vcpu_svm *svm = to_svm(vcpu);
791 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
809 svm->nested.vmcb12_gpa = vmcb12_gpa;
811 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
813 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
815 svm_switch_vmcb(svm, &svm->nested.vmcb02);
816 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
817 nested_vmcb02_prepare_save(svm, vmcb12);
819 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
820 nested_npt_enabled(svm), from_vmrun);
827 svm_set_gif(svm, true);
839 struct vcpu_svm *svm = to_svm(vcpu);
844 struct vmcb *vmcb01 = svm->vmcb01.ptr;
846 if (!svm->nested.hsave_msr) {
863 vmcb12_gpa = svm->vmcb->save.rax;
876 if (WARN_ON_ONCE(!svm->nested.initialized))
879 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
880 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
904 svm->nested.nested_run_pending = 1;
909 if (nested_svm_vmrun_msrpm(svm))
913 svm->nested.nested_run_pending = 0;
914 svm->nmi_l1_to_l2 = false;
915 svm->soft_int_injected = false;
917 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
918 svm->vmcb->control.exit_code_hi = 0;
919 svm->vmcb->control.exit_info_1 = 0;
920 svm->vmcb->control.exit_info_2 = 0;
922 nested_svm_vmexit(svm);
967 int nested_svm_vmexit(struct vcpu_svm *svm)
969 struct kvm_vcpu *vcpu = &svm->vcpu;
970 struct vmcb *vmcb01 = svm->vmcb01.ptr;
971 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
976 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
987 svm->nested.vmcb12_gpa = 0;
988 WARN_ON_ONCE(svm->nested.nested_run_pending);
993 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
1003 vmcb12->save.efer = svm->vcpu.arch.efer;
1007 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1013 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1023 nested_save_pending_event_to_vmcb12(svm, vmcb12);
1028 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1029 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1030 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1038 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1040 svm_switch_vmcb(svm, &svm->vmcb01);
1061 if (!nested_exit_on_intr(svm))
1062 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1065 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1091 svm_set_gif(svm, false);
1094 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1095 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1096 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1106 svm->nested.ctl.nested_cr3 = 0;
1119 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1120 kvm_update_dr7(&svm->vcpu);
1143 svm->vcpu.arch.nmi_injected = false;
1154 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1168 struct vcpu_svm *svm = to_svm(vcpu);
1170 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1177 int svm_allocate_nested(struct vcpu_svm *svm)
1181 if (svm->nested.initialized)
1184 vmcb02_page = snp_safe_alloc_page(&svm->vcpu);
1187 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1188 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1190 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1191 if (!svm->nested.msrpm)
1193 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1195 svm->nested.initialized = true;
1203 void svm_free_nested(struct vcpu_svm *svm)
1205 if (!svm->nested.initialized)
1208 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1209 svm_switch_vmcb(svm, &svm->vmcb01);
1211 svm_vcpu_free_msrpm(svm->nested.msrpm);
1212 svm->nested.msrpm = NULL;
1214 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1215 svm->nested.vmcb02.ptr = NULL;
1224 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1226 svm->nested.initialized = false;
1231 struct vcpu_svm *svm = to_svm(vcpu);
1234 svm->nested.nested_run_pending = 0;
1235 svm->nested.vmcb12_gpa = INVALID_GPA;
1239 svm_switch_vmcb(svm, &svm->vmcb01);
1242 vmcb_mark_all_dirty(svm->vmcb);
1251 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1256 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1259 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1261 write = svm->vmcb->control.exit_info_1 & 1;
1270 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1276 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1283 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1286 port = svm->vmcb->control.exit_info_1 >> 16;
1287 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1289 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1295 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1301 static int nested_svm_intercept(struct vcpu_svm *svm)
1303 u32 exit_code = svm->vmcb->control.exit_code;
1308 vmexit = nested_svm_exit_handled_msr(svm);
1311 vmexit = nested_svm_intercept_ioio(svm);
1314 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1319 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1337 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1345 int nested_svm_exit_handled(struct vcpu_svm *svm)
1349 vmexit = nested_svm_intercept(svm);
1352 nested_svm_vmexit(svm);
1375 struct vcpu_svm *svm = to_svm(vcpu);
1377 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1383 struct vcpu_svm *svm = to_svm(vcpu);
1384 struct vmcb *vmcb = svm->vmcb;
1413 nested_svm_vmexit(svm);
1416 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1418 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1424 struct vcpu_svm *svm = to_svm(vcpu);
1430 bool block_nested_exceptions = svm->nested.nested_run_pending;
1444 if (!nested_exit_on_init(svm))
1446 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1467 if (!nested_exit_on_smi(svm))
1469 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1477 if (!nested_exit_on_nmi(svm))
1479 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1486 if (!nested_exit_on_intr(svm))
1488 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1489 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1496 int nested_svm_exit_special(struct vcpu_svm *svm)
1498 u32 exit_code = svm->vmcb->control.exit_code;
1499 struct kvm_vcpu *vcpu = &svm->vcpu;
1509 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1513 svm->vcpu.arch.apf.host_apf_flags)
1534 struct vcpu_svm *svm = to_svm(vcpu);
1538 svm->tsc_ratio_msr);
1582 struct vcpu_svm *svm;
1591 &user_kvm_nested_state->data.svm[0];
1596 svm = to_svm(vcpu);
1603 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1607 if (svm->nested.nested_run_pending)
1611 if (gif_set(svm))
1631 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1638 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1649 struct vcpu_svm *svm = to_svm(vcpu);
1651 &user_kvm_nested_state->data.svm[0];
1686 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1690 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1742 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1744 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1746 svm->nested.nested_run_pending =
1749 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1751 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1752 nested_copy_vmcb_control_to_cache(svm, ctl);
1754 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1755 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1764 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1765 nested_npt_enabled(svm), false);
1769 svm->nested.force_msr_bitmap_recalc = true;
1782 struct vcpu_svm *svm = to_svm(vcpu);
1788 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1797 if (!nested_svm_vmrun_msrpm(svm)) {