svm.c (271912) | svm.c (271939) |
---|---|
1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 11 unchanged lines hidden (view full) --- 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 11 unchanged lines hidden (view full) --- 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> |
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271912 2014-09-20 21:46:31Z neel $"); | 28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271939 2014-09-21 23:42:54Z neel $"); |
29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> --- 47 unchanged lines hidden (view full) --- 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \ | 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/smp.h> 33#include <sys/kernel.h> 34#include <sys/malloc.h> 35#include <sys/pcpu.h> 36#include <sys/proc.h> --- 47 unchanged lines hidden (view full) --- 84#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ 85#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ 86#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ 87 88#define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ 89 VMCB_CACHE_IOPM | \ 90 VMCB_CACHE_I | \ 91 VMCB_CACHE_TPR | \ |
92 VMCB_CACHE_CR2 | \ 93 VMCB_CACHE_CR | \ 94 VMCB_CACHE_DT | \ 95 VMCB_CACHE_SEG | \ |
|
92 VMCB_CACHE_NP) 93 | 96 VMCB_CACHE_NP) 97 |
98static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; 99SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 100 0, NULL); 101 |
|
94MALLOC_DEFINE(M_SVM, "svm", "svm"); 95MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 96 97/* Per-CPU context area. */ 98extern struct pcpu __pcpu[]; 99 | 102MALLOC_DEFINE(M_SVM, "svm", "svm"); 103MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); 104 105/* Per-CPU context area. */ 106extern struct pcpu __pcpu[]; 107 |
100static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); 101 | |
102static uint32_t svm_feature; /* AMD SVM features. */ 103SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 104 "SVM features advertised by CPUID.8000000AH:EDX"); 105 106static int disable_npf_assist; 107SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 108 &disable_npf_assist, 0, NULL); 109 --- 14 unchanged lines hidden (view full) --- 124 * S/w saved host context. 125 */ 126static struct svm_regctx host_ctx[MAXCPU]; 127 128static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 129static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 130static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 131 | 108static uint32_t svm_feature; /* AMD SVM features. */ 109SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, 110 "SVM features advertised by CPUID.8000000AH:EDX"); 111 112static int disable_npf_assist; 113SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, 114 &disable_npf_assist, 0, NULL); 115 --- 14 unchanged lines hidden (view full) --- 130 * S/w saved host context. 131 */ 132static struct svm_regctx host_ctx[MAXCPU]; 133 134static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); 135static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); 136static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); 137 |
138static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); 139 |
|
132/* 133 * Common function to enable or disabled SVM for a CPU. 134 */ 135static int 136cpu_svm_enable_disable(boolean_t enable) 137{ 138 uint64_t efer_msr; 139 --- 147 unchanged lines hidden (view full) --- 287svm_init(int ipinum) 288{ 289 int err, cpu; 290 291 err = is_svm_enabled(); 292 if (err) 293 return (err); 294 | 140/* 141 * Common function to enable or disabled SVM for a CPU. 142 */ 143static int 144cpu_svm_enable_disable(boolean_t enable) 145{ 146 uint64_t efer_msr; 147 --- 147 unchanged lines hidden (view full) --- 295svm_init(int ipinum) 296{ 297 int err, cpu; 298 299 err = is_svm_enabled(); 300 if (err) 301 return (err); 302 |
303 vmcb_clean &= VMCB_CACHE_DEFAULT; 304 |
|
295 for (cpu = 0; cpu < MAXCPU; cpu++) { 296 /* 297 * Initialize the host ASIDs to their "highest" valid values. 298 * 299 * The next ASID allocation will rollover both 'gen' and 'num' 300 * and start off the sequence at {1,1}. 301 */ 302 asid[cpu].gen = ~0UL; --- 102 unchanged lines hidden (view full) --- 405} 406 407static int 408svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 409{ 410 return svm_msr_perm(perm_bitmap, msr, true, false); 411} 412 | 305 for (cpu = 0; cpu < MAXCPU; cpu++) { 306 /* 307 * Initialize the host ASIDs to their "highest" valid values. 308 * 309 * The next ASID allocation will rollover both 'gen' and 'num' 310 * and start off the sequence at {1,1}. 311 */ 312 asid[cpu].gen = ~0UL; --- 102 unchanged lines hidden (view full) --- 415} 416 417static int 418svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) 419{ 420 return svm_msr_perm(perm_bitmap, msr, true, false); 421} 422 |
413static __inline void 414vcpu_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 415{ 416 struct svm_vcpu *vcpustate; 417 418 vcpustate = svm_get_vcpu(sc, vcpu); 419 420 vcpustate->dirty |= dirtybits; 421} 422 | |
423static __inline int 424svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 425{ 426 struct vmcb_ctrl *ctrl; 427 428 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 429 430 ctrl = svm_get_vmcb_ctrl(sc, vcpu); --- 13 unchanged lines hidden (view full) --- 444 oldval = ctrl->intercept[idx]; 445 446 if (enabled) 447 ctrl->intercept[idx] |= bitmask; 448 else 449 ctrl->intercept[idx] &= ~bitmask; 450 451 if (ctrl->intercept[idx] != oldval) { | 423static __inline int 424svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) 425{ 426 struct vmcb_ctrl *ctrl; 427 428 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); 429 430 ctrl = svm_get_vmcb_ctrl(sc, vcpu); --- 13 unchanged lines hidden (view full) --- 444 oldval = ctrl->intercept[idx]; 445 446 if (enabled) 447 ctrl->intercept[idx] |= bitmask; 448 else 449 ctrl->intercept[idx] &= ~bitmask; 450 451 if (ctrl->intercept[idx] != oldval) { |
452 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_I); | 452 svm_set_dirty(sc, vcpu, VMCB_CACHE_I); |
453 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 454 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 455 } 456} 457 458static __inline void 459svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 460{ --- 126 unchanged lines hidden (view full) --- 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 590 591 /* For Nested Paging/RVI only. */ 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 593 594 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); | 453 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " 454 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); 455 } 456} 457 458static __inline void 459svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) 460{ --- 126 unchanged lines hidden (view full) --- 587 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); 588 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); 589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); 590 591 /* For Nested Paging/RVI only. */ 592 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); 593 594 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); |
595 596 /* 597 * Intercept writes to make sure that the EFER_SVM bit is not cleared. 598 */ |
|
595 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 596 597 /* Intercept access to all I/O ports. */ 598 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 599 600 /* Cache physical address for multiple vcpus. */ 601 iopm_pa = vtophys(svm_sc->iopm_bitmap); 602 msrpm_pa = vtophys(svm_sc->msr_bitmap); --- 19 unchanged lines hidden (view full) --- 622 * from any segment DPL" 623 */ 624 return (state->cpl); 625} 626 627static enum vm_cpu_mode 628svm_vcpu_mode(struct vmcb *vmcb) 629{ | 599 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); 600 601 /* Intercept access to all I/O ports. */ 602 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); 603 604 /* Cache physical address for multiple vcpus. */ 605 iopm_pa = vtophys(svm_sc->iopm_bitmap); 606 msrpm_pa = vtophys(svm_sc->msr_bitmap); --- 19 unchanged lines hidden (view full) --- 626 * from any segment DPL" 627 */ 628 return (state->cpl); 629} 630 631static enum vm_cpu_mode 632svm_vcpu_mode(struct vmcb *vmcb) 633{ |
630 struct vmcb_segment *seg; | 634 struct vmcb_segment seg; |
631 struct vmcb_state *state; | 635 struct vmcb_state *state; |
636 int error; |
|
632 633 state = &vmcb->state; 634 635 if (state->efer & EFER_LMA) { | 637 638 state = &vmcb->state; 639 640 if (state->efer & EFER_LMA) { |
636 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); | 641 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 642 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, 643 error)); 644 |
637 /* 638 * Section 4.8.1 for APM2, check if Code Segment has 639 * Long attribute set in descriptor. 640 */ | 645 /* 646 * Section 4.8.1 for APM2, check if Code Segment has 647 * Long attribute set in descriptor. 648 */ |
641 if (seg->attrib & VMCB_CS_ATTRIB_L) | 649 if (seg.attrib & VMCB_CS_ATTRIB_L) |
642 return (CPU_MODE_64BIT); 643 else 644 return (CPU_MODE_COMPATIBILITY); 645 } else if (state->cr0 & CR0_PE) { 646 return (CPU_MODE_PROTECTED); 647 } else { 648 return (CPU_MODE_REAL); 649 } --- 45 unchanged lines hidden (view full) --- 695 if (in) { 696 vis->seg_name = VM_REG_GUEST_ES; 697 } else { 698 /* The segment field has standard encoding */ 699 s = (info1 >> 10) & 0x7; 700 vis->seg_name = vm_segment_name(s); 701 } 702 | 650 return (CPU_MODE_64BIT); 651 else 652 return (CPU_MODE_COMPATIBILITY); 653 } else if (state->cr0 & CR0_PE) { 654 return (CPU_MODE_PROTECTED); 655 } else { 656 return (CPU_MODE_REAL); 657 } --- 45 unchanged lines hidden (view full) --- 703 if (in) { 704 vis->seg_name = VM_REG_GUEST_ES; 705 } else { 706 /* The segment field has standard encoding */ 707 s = (info1 >> 10) & 0x7; 708 vis->seg_name = vm_segment_name(s); 709 } 710 |
703 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); | 711 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); |
704 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 705} 706 707static int 708svm_inout_str_addrsize(uint64_t info1) 709{ 710 uint32_t size; 711 --- 107 unchanged lines hidden (view full) --- 819 820 return (true); 821} 822 823static void 824svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 825{ 826 struct vm_guest_paging *paging; | 712 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); 713} 714 715static int 716svm_inout_str_addrsize(uint64_t info1) 717{ 718 uint32_t size; 719 --- 107 unchanged lines hidden (view full) --- 827 828 return (true); 829} 830 831static void 832svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) 833{ 834 struct vm_guest_paging *paging; |
827 struct vmcb_segment *seg; | 835 struct vmcb_segment seg; |
828 struct vmcb_ctrl *ctrl; 829 char *inst_bytes; | 836 struct vmcb_ctrl *ctrl; 837 char *inst_bytes; |
830 int inst_len; | 838 int error, inst_len; |
831 832 ctrl = &vmcb->ctrl; 833 paging = &vmexit->u.inst_emul.paging; 834 835 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 836 vmexit->u.inst_emul.gpa = gpa; 837 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 838 svm_paging_info(vmcb, paging); 839 | 839 840 ctrl = &vmcb->ctrl; 841 paging = &vmexit->u.inst_emul.paging; 842 843 vmexit->exitcode = VM_EXITCODE_INST_EMUL; 844 vmexit->u.inst_emul.gpa = gpa; 845 vmexit->u.inst_emul.gla = VIE_INVALID_GLA; 846 svm_paging_info(vmcb, paging); 847 |
840 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); | 848 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); 849 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); 850 |
841 switch(paging->cpu_mode) { 842 case CPU_MODE_PROTECTED: 843 case CPU_MODE_COMPATIBILITY: 844 /* 845 * Section 4.8.1 of APM2, Default Operand Size or D bit. 846 */ | 851 switch(paging->cpu_mode) { 852 case CPU_MODE_PROTECTED: 853 case CPU_MODE_COMPATIBILITY: 854 /* 855 * Section 4.8.1 of APM2, Default Operand Size or D bit. 856 */ |
847 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ? | 857 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? |
848 1 : 0; 849 break; 850 default: 851 vmexit->u.inst_emul.cs_d = 0; 852 break; 853 } 854 855 /* --- 4 unchanged lines hidden (view full) --- 860 inst_bytes = ctrl->inst_bytes; 861 } else { 862 inst_len = 0; 863 inst_bytes = NULL; 864 } 865 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 866} 867 | 858 1 : 0; 859 break; 860 default: 861 vmexit->u.inst_emul.cs_d = 0; 862 break; 863 } 864 865 /* --- 4 unchanged lines hidden (view full) --- 870 inst_bytes = ctrl->inst_bytes; 871 } else { 872 inst_len = 0; 873 inst_bytes = NULL; 874 } 875 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); 876} 877 |
868/* 869 * Intercept access to MSR_EFER to prevent the guest from clearing the 870 * SVM enable bit. 871 */ 872static int 873svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t val) 874{ 875 struct vmcb_state *state; 876 uint64_t oldval; 877 878 state = svm_get_vmcb_state(sc, vcpu); 879 880 oldval = state->efer; 881 state->efer = val | EFER_SVM; 882 if (state->efer != oldval) { 883 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx", 884 oldval, state->efer); 885 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR); 886 } 887 return (0); 888} 889 | |
890#ifdef KTR 891static const char * 892intrtype_to_str(int intr_type) 893{ 894 switch (intr_type) { 895 case VMCB_EVENTINJ_TYPE_INTR: 896 return ("hwintr"); 897 case VMCB_EVENTINJ_TYPE_NMI: --- 125 unchanged lines hidden (view full) --- 1023 ("%s: vintr intercept should be enabled", __func__)); 1024 return; 1025 } 1026 1027 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1028 ctrl->v_irq = 1; 1029 ctrl->v_ign_tpr = 1; 1030 ctrl->v_intr_vector = 0; | 878#ifdef KTR 879static const char * 880intrtype_to_str(int intr_type) 881{ 882 switch (intr_type) { 883 case VMCB_EVENTINJ_TYPE_INTR: 884 return ("hwintr"); 885 case VMCB_EVENTINJ_TYPE_NMI: --- 125 unchanged lines hidden (view full) --- 1011 ("%s: vintr intercept should be enabled", __func__)); 1012 return; 1013 } 1014 1015 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); 1016 ctrl->v_irq = 1; 1017 ctrl->v_ign_tpr = 1; 1018 ctrl->v_intr_vector = 0; |
1031 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); | 1019 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); |
1032 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1033} 1034 1035static __inline void 1036disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1037{ 1038 struct vmcb_ctrl *ctrl; 1039 --- 8 unchanged lines hidden (view full) --- 1048#ifdef KTR 1049 if (ctrl->v_intr_vector == 0) 1050 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1051 else 1052 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1053#endif 1054 ctrl->v_irq = 0; 1055 ctrl->v_intr_vector = 0; | 1020 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1021} 1022 1023static __inline void 1024disable_intr_window_exiting(struct svm_softc *sc, int vcpu) 1025{ 1026 struct vmcb_ctrl *ctrl; 1027 --- 8 unchanged lines hidden (view full) --- 1036#ifdef KTR 1037 if (ctrl->v_intr_vector == 0) 1038 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); 1039 else 1040 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); 1041#endif 1042 ctrl->v_irq = 0; 1043 ctrl->v_intr_vector = 0; |
1056 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); | 1044 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); |
1057 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1058} 1059 1060static int 1061svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1062{ 1063 struct vmcb_ctrl *ctrl; 1064 int oldval, newval; --- 74 unchanged lines hidden (view full) --- 1139emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1140 bool *retu) 1141{ 1142 int error; 1143 1144 if (lapic_msr(num)) 1145 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1146 else if (num == MSR_EFER) | 1045 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); 1046} 1047 1048static int 1049svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) 1050{ 1051 struct vmcb_ctrl *ctrl; 1052 int oldval, newval; --- 74 unchanged lines hidden (view full) --- 1127emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, 1128 bool *retu) 1129{ 1130 int error; 1131 1132 if (lapic_msr(num)) 1133 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); 1134 else if (num == MSR_EFER) |
1147 error = svm_write_efer(sc, vcpu, val); | 1135 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val); |
1148 else 1149 error = svm_wrmsr(sc, vcpu, num, val, retu); 1150 1151 return (error); 1152} 1153 1154static int 1155emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) --- 461 unchanged lines hidden (view full) --- 1617 * VMRUN. 1618 */ 1619 v_tpr = vlapic_get_cr8(vlapic); 1620 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1621 if (ctrl->v_tpr != v_tpr) { 1622 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1623 ctrl->v_tpr, v_tpr); 1624 ctrl->v_tpr = v_tpr; | 1136 else 1137 error = svm_wrmsr(sc, vcpu, num, val, retu); 1138 1139 return (error); 1140} 1141 1142static int 1143emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) --- 461 unchanged lines hidden (view full) --- 1605 * VMRUN. 1606 */ 1607 v_tpr = vlapic_get_cr8(vlapic); 1608 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); 1609 if (ctrl->v_tpr != v_tpr) { 1610 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", 1611 ctrl->v_tpr, v_tpr); 1612 ctrl->v_tpr = v_tpr; |
1625 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); | 1613 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); |
1626 } 1627 1628 if (pending_apic_vector) { 1629 /* 1630 * If an APIC vector is being injected then interrupt window 1631 * exiting is not possible on this VMRUN. 1632 */ 1633 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1634 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1635 pending_apic_vector); 1636 1637 ctrl->v_irq = 1; 1638 ctrl->v_ign_tpr = 0; 1639 ctrl->v_intr_vector = pending_apic_vector; 1640 ctrl->v_intr_prio = pending_apic_vector >> 4; | 1614 } 1615 1616 if (pending_apic_vector) { 1617 /* 1618 * If an APIC vector is being injected then interrupt window 1619 * exiting is not possible on this VMRUN. 1620 */ 1621 KASSERT(!need_intr_window, ("intr_window exiting impossible")); 1622 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", 1623 pending_apic_vector); 1624 1625 ctrl->v_irq = 1; 1626 ctrl->v_ign_tpr = 0; 1627 ctrl->v_intr_vector = pending_apic_vector; 1628 ctrl->v_intr_prio = pending_apic_vector >> 4; |
1641 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_TPR); | 1629 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); |
1642 } else if (need_intr_window) { 1643 /* 1644 * We use V_IRQ in conjunction with the VINTR intercept to 1645 * trap into the hypervisor as soon as a virtual interrupt 1646 * can be delivered. 1647 * 1648 * Since injected events are not subject to intercept checks 1649 * we need to ensure that the V_IRQ is not actually going to --- 109 unchanged lines hidden (view full) --- 1759 */ 1760 if (!flush_by_asid()) 1761 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1762 } 1763 vcpustate->asid.gen = asid[thiscpu].gen; 1764 vcpustate->asid.num = asid[thiscpu].num; 1765 1766 ctrl->asid = vcpustate->asid.num; | 1630 } else if (need_intr_window) { 1631 /* 1632 * We use V_IRQ in conjunction with the VINTR intercept to 1633 * trap into the hypervisor as soon as a virtual interrupt 1634 * can be delivered. 1635 * 1636 * Since injected events are not subject to intercept checks 1637 * we need to ensure that the V_IRQ is not actually going to --- 109 unchanged lines hidden (view full) --- 1747 */ 1748 if (!flush_by_asid()) 1749 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; 1750 } 1751 vcpustate->asid.gen = asid[thiscpu].gen; 1752 vcpustate->asid.num = asid[thiscpu].num; 1753 1754 ctrl->asid = vcpustate->asid.num; |
1767 vcpu_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); | 1755 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); |
1768 /* 1769 * If this cpu supports "flush-by-asid" then the TLB 1770 * was not flushed after the generation bump. The TLB 1771 * is flushed selectively after every new ASID allocation. 1772 */ 1773 if (flush_by_asid()) 1774 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1775 } --- 49 unchanged lines hidden (view full) --- 1825 /* 1826 * Force new ASID allocation by invalidating the generation. 1827 */ 1828 vcpustate->asid.gen = 0; 1829 1830 /* 1831 * Invalidate the VMCB state cache by marking all fields dirty. 1832 */ | 1756 /* 1757 * If this cpu supports "flush-by-asid" then the TLB 1758 * was not flushed after the generation bump. The TLB 1759 * is flushed selectively after every new ASID allocation. 1760 */ 1761 if (flush_by_asid()) 1762 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; 1763 } --- 49 unchanged lines hidden (view full) --- 1813 /* 1814 * Force new ASID allocation by invalidating the generation. 1815 */ 1816 vcpustate->asid.gen = 0; 1817 1818 /* 1819 * Invalidate the VMCB state cache by marking all fields dirty. 1820 */ |
1833 vcpu_set_dirty(svm_sc, vcpu, 0xffffffff); | 1821 svm_set_dirty(svm_sc, vcpu, 0xffffffff); |
1834 1835 /* 1836 * XXX 1837 * Setting 'vcpustate->lastcpu' here is bit premature because 1838 * we may return from this function without actually executing 1839 * the VMRUN instruction. This could happen if a rendezvous 1840 * or an AST is pending on the first time through the loop. 1841 * --- 44 unchanged lines hidden (view full) --- 1886 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1887 1888 /* 1889 * Check the pmap generation and the ASID generation to 1890 * ensure that the vcpu does not use stale TLB mappings. 1891 */ 1892 check_asid(svm_sc, vcpu, pmap, thiscpu); 1893 | 1822 1823 /* 1824 * XXX 1825 * Setting 'vcpustate->lastcpu' here is bit premature because 1826 * we may return from this function without actually executing 1827 * the VMRUN instruction. This could happen if a rendezvous 1828 * or an AST is pending on the first time through the loop. 1829 * --- 44 unchanged lines hidden (view full) --- 1874 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active); 1875 1876 /* 1877 * Check the pmap generation and the ASID generation to 1878 * ensure that the vcpu does not use stale TLB mappings. 1879 */ 1880 check_asid(svm_sc, vcpu, pmap, thiscpu); 1881 |
1894 ctrl->vmcb_clean = VMCB_CACHE_DEFAULT & ~vcpustate->dirty; | 1882 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; |
1895 vcpustate->dirty = 0; 1896 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1897 1898 /* Launch Virtual Machine. */ 1899 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1900 svm_launch(vmcb_pa, gctx, hctx); 1901 1902 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); --- 93 unchanged lines hidden (view full) --- 1996/* 1997 * Interface to read guest registers. 1998 * This can be SVM h/w saved or hypervisor saved register. 1999 */ 2000static int 2001svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 2002{ 2003 struct svm_softc *svm_sc; | 1883 vcpustate->dirty = 0; 1884 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); 1885 1886 /* Launch Virtual Machine. */ 1887 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); 1888 svm_launch(vmcb_pa, gctx, hctx); 1889 1890 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); --- 93 unchanged lines hidden (view full) --- 1984/* 1985 * Interface to read guest registers. 1986 * This can be SVM h/w saved or hypervisor saved register. 1987 */ 1988static int 1989svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) 1990{ 1991 struct svm_softc *svm_sc; |
2004 struct vmcb *vmcb; | |
2005 register_t *reg; 2006 2007 svm_sc = arg; | 1992 register_t *reg; 1993 1994 svm_sc = arg; |
2008 vmcb = svm_get_vmcb(svm_sc, vcpu); | |
2009 2010 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2011 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 2012 } 2013 | 1995 1996 if (ident == VM_REG_GUEST_INTR_SHADOW) { 1997 return (svm_get_intr_shadow(svm_sc, vcpu, val)); 1998 } 1999 |
2014 if (vmcb_read(vmcb, ident, val) == 0) { | 2000 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { |
2015 return (0); 2016 } 2017 2018 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2019 2020 if (reg != NULL) { 2021 *val = *reg; 2022 return (0); --- 6 unchanged lines hidden (view full) --- 2029/* 2030 * Interface to write to guest registers. 2031 * This can be SVM h/w saved or hypervisor saved register. 2032 */ 2033static int 2034svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2035{ 2036 struct svm_softc *svm_sc; | 2001 return (0); 2002 } 2003 2004 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2005 2006 if (reg != NULL) { 2007 *val = *reg; 2008 return (0); --- 6 unchanged lines hidden (view full) --- 2015/* 2016 * Interface to write to guest registers. 2017 * This can be SVM h/w saved or hypervisor saved register. 2018 */ 2019static int 2020svm_setreg(void *arg, int vcpu, int ident, uint64_t val) 2021{ 2022 struct svm_softc *svm_sc; |
2037 struct vmcb *vmcb; | |
2038 register_t *reg; 2039 2040 svm_sc = arg; | 2023 register_t *reg; 2024 2025 svm_sc = arg; |
2041 vmcb = svm_get_vmcb(svm_sc, vcpu); | |
2042 2043 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2044 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2045 } 2046 | 2026 2027 if (ident == VM_REG_GUEST_INTR_SHADOW) { 2028 return (svm_modify_intr_shadow(svm_sc, vcpu, val)); 2029 } 2030 |
2047 if (vmcb_write(vmcb, ident, val) == 0) { | 2031 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { |
2048 return (0); 2049 } 2050 2051 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2052 2053 if (reg != NULL) { 2054 *reg = val; 2055 return (0); --- 4 unchanged lines hidden (view full) --- 2060 * vcpu's ASID. This needs to be treated differently depending on 2061 * whether 'running' is true/false. 2062 */ 2063 2064 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2065 return (EINVAL); 2066} 2067 | 2032 return (0); 2033 } 2034 2035 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); 2036 2037 if (reg != NULL) { 2038 *reg = val; 2039 return (0); --- 4 unchanged lines hidden (view full) --- 2044 * vcpu's ASID. This needs to be treated differently depending on 2045 * whether 'running' is true/false. 2046 */ 2047 2048 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident); 2049 return (EINVAL); 2050} 2051 |
2068 2069/* 2070 * Inteface to set various descriptors. 2071 */ | |
2072static int | 2052static int |
2073svm_setdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 2074{ 2075 struct svm_softc *svm_sc; 2076 struct vmcb *vmcb; 2077 struct vmcb_segment *seg; 2078 uint16_t attrib; 2079 2080 svm_sc = arg; 2081 vmcb = svm_get_vmcb(svm_sc, vcpu); 2082 2083 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type); 2084 2085 seg = vmcb_seg(vmcb, type); 2086 if (seg == NULL) { 2087 ERR("SVM_ERR:Unsupported segment type%d\n", type); 2088 return (EINVAL); 2089 } 2090 2091 /* Map seg_desc access to VMCB attribute format.*/ 2092 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF); 2093 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n", 2094 type, desc->access, desc->limit); 2095 seg->attrib = attrib; 2096 seg->base = desc->base; 2097 seg->limit = desc->limit; 2098 2099 return (0); 2100} 2101 2102/* 2103 * Interface to get guest descriptor. 2104 */ 2105static int 2106svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc) 2107{ 2108 struct svm_softc *svm_sc; 2109 struct vmcb_segment *seg; 2110 2111 svm_sc = arg; 2112 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type); 2113 2114 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type); 2115 if (!seg) { 2116 ERR("SVM_ERR:Unsupported segment type%d\n", type); 2117 return (EINVAL); 2118 } 2119 2120 /* Map seg_desc access to VMCB attribute format.*/ 2121 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF); 2122 desc->base = seg->base; 2123 desc->limit = seg->limit; 2124 2125 /* 2126 * VT-x uses bit 16 (Unusable) to indicate a segment that has been 2127 * loaded with a NULL segment selector. The 'desc->access' field is 2128 * interpreted in the VT-x format by the processor-independent code. 2129 * 2130 * SVM uses the 'P' bit to convey the same information so convert it 2131 * into the VT-x format. For more details refer to section 2132 * "Segment State in the VMCB" in APMv2. 2133 */ 2134 if (type == VM_REG_GUEST_CS && type == VM_REG_GUEST_TR) 2135 desc->access |= 0x80; /* CS and TS always present */ 2136 2137 if (!(desc->access & 0x80)) 2138 desc->access |= 0x10000; /* Unusable segment */ 2139 2140 return (0); 2141} 2142 2143static int | |
2144svm_setcap(void *arg, int vcpu, int type, int val) 2145{ 2146 struct svm_softc *sc; 2147 int error; 2148 2149 sc = arg; 2150 error = 0; 2151 switch (type) { --- 74 unchanged lines hidden (view full) --- 2226 svm_init, 2227 svm_cleanup, 2228 svm_restore, 2229 svm_vminit, 2230 svm_vmrun, 2231 svm_vmcleanup, 2232 svm_getreg, 2233 svm_setreg, | 2053svm_setcap(void *arg, int vcpu, int type, int val) 2054{ 2055 struct svm_softc *sc; 2056 int error; 2057 2058 sc = arg; 2059 error = 0; 2060 switch (type) { --- 74 unchanged lines hidden (view full) --- 2135 svm_init, 2136 svm_cleanup, 2137 svm_restore, 2138 svm_vminit, 2139 svm_vmrun, 2140 svm_vmcleanup, 2141 svm_getreg, 2142 svm_setreg, |
2234 svm_getdesc, 2235 svm_setdesc, | 2143 vmcb_getdesc, 2144 vmcb_setdesc, |
2236 svm_getcap, 2237 svm_setcap, 2238 svm_npt_alloc, 2239 svm_npt_free, 2240 svm_vlapic_init, 2241 svm_vlapic_cleanup 2242}; | 2145 svm_getcap, 2146 svm_setcap, 2147 svm_npt_alloc, 2148 svm_npt_free, 2149 svm_vlapic_init, 2150 svm_vlapic_cleanup 2151}; |