Deleted Added
full compact
svm.c (271939) svm.c (272195)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271939 2014-09-21 23:42:54Z neel $");
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 272195 2014-09-27 02:04:58Z neel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 84 unchanged lines hidden (view full) ---

121/* Current ASID generation for each host cpu */
122static struct asid asid[MAXCPU];
123
124/*
125 * SVM host state saved area of size 4KB for each core.
126 */
127static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
128
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 84 unchanged lines hidden (view full) ---

121/* Current ASID generation for each host cpu */
122static struct asid asid[MAXCPU];
123
124/*
125 * SVM host state saved area of size 4KB for each core.
126 */
127static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
128
129/*
130 * S/w saved host context.
131 */
132static struct svm_regctx host_ctx[MAXCPU];
133
134static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
135static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
136static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
137
138static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
139
140/*
141 * Common function to enable or disabled SVM for a CPU.

--- 532 unchanged lines hidden (view full) ---

674/*
675 * ins/outs utility routines
676 */
677static uint64_t
678svm_inout_str_index(struct svm_regctx *regs, int in)
679{
680 uint64_t val;
681
129static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
130static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
131static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
132
133static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
134
135/*
136 * Common function to enable or disabled SVM for a CPU.

--- 532 unchanged lines hidden (view full) ---

669/*
670 * ins/outs utility routines
671 */
672static uint64_t
673svm_inout_str_index(struct svm_regctx *regs, int in)
674{
675 uint64_t val;
676
682 val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi;
677 val = in ? regs->sctx_rdi : regs->sctx_rsi;
683
684 return (val);
685}
686
687static uint64_t
688svm_inout_str_count(struct svm_regctx *regs, int rep)
689{
690 uint64_t val;

--- 460 unchanged lines hidden (view full) ---

1151 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1152 else
1153 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1154
1155 if (error == 0) {
1156 state = svm_get_vmcb_state(sc, vcpu);
1157 ctx = svm_get_guest_regctx(sc, vcpu);
1158 state->rax = result & 0xffffffff;
678
679 return (val);
680}
681
682static uint64_t
683svm_inout_str_count(struct svm_regctx *regs, int rep)
684{
685 uint64_t val;

--- 460 unchanged lines hidden (view full) ---

1146 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1147 else
1148 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1149
1150 if (error == 0) {
1151 state = svm_get_vmcb_state(sc, vcpu);
1152 ctx = svm_get_guest_regctx(sc, vcpu);
1153 state->rax = result & 0xffffffff;
1159 ctx->e.g.sctx_rdx = result >> 32;
1154 ctx->sctx_rdx = result >> 32;
1160 }
1161
1162 return (error);
1163}
1164
1165#ifdef KTR
1166static const char *
1167exit_reason_to_str(uint64_t reason)

--- 142 unchanged lines hidden (view full) ---

1310 handled = 1;
1311 break;
1312 case VMCB_EXIT_MC: /* machine check */
1313 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1314 break;
1315 case VMCB_EXIT_MSR: /* MSR access. */
1316 eax = state->rax;
1317 ecx = ctx->sctx_rcx;
1155 }
1156
1157 return (error);
1158}
1159
1160#ifdef KTR
1161static const char *
1162exit_reason_to_str(uint64_t reason)

--- 142 unchanged lines hidden (view full) ---

1305 handled = 1;
1306 break;
1307 case VMCB_EXIT_MC: /* machine check */
1308 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1309 break;
1310 case VMCB_EXIT_MSR: /* MSR access. */
1311 eax = state->rax;
1312 ecx = ctx->sctx_rcx;
1318 edx = ctx->e.g.sctx_rdx;
1313 edx = ctx->sctx_rdx;
1319 retu = false;
1320
1321 if (info1) {
1322 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1323 val = (uint64_t)edx << 32 | eax;
1324 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1325 ecx, val);
1326 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {

--- 25 unchanged lines hidden (view full) ---

1352 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1353 break;
1354 case VMCB_EXIT_CPUID:
1355 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1356 handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
1357 (uint32_t *)&state->rax,
1358 (uint32_t *)&ctx->sctx_rbx,
1359 (uint32_t *)&ctx->sctx_rcx,
1314 retu = false;
1315
1316 if (info1) {
1317 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1318 val = (uint64_t)edx << 32 | eax;
1319 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1320 ecx, val);
1321 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {

--- 25 unchanged lines hidden (view full) ---

1347 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1348 break;
1349 case VMCB_EXIT_CPUID:
1350 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1351 handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
1352 (uint32_t *)&state->rax,
1353 (uint32_t *)&ctx->sctx_rbx,
1354 (uint32_t *)&ctx->sctx_rcx,
1360 (uint32_t *)&ctx->e.g.sctx_rdx);
1355 (uint32_t *)&ctx->sctx_rdx);
1361 break;
1362 case VMCB_EXIT_HLT:
1363 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1364 vmexit->exitcode = VM_EXITCODE_HLT;
1365 vmexit->u.hlt.rflags = state->rflags;
1366 break;
1367 case VMCB_EXIT_PAUSE:
1368 vmexit->exitcode = VM_EXITCODE_PAUSE;

--- 401 unchanged lines hidden (view full) ---

1770
1771/*
1772 * Start vcpu with specified RIP.
1773 */
1774static int
1775svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1776 void *rend_cookie, void *suspended_cookie)
1777{
1356 break;
1357 case VMCB_EXIT_HLT:
1358 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1359 vmexit->exitcode = VM_EXITCODE_HLT;
1360 vmexit->u.hlt.rflags = state->rflags;
1361 break;
1362 case VMCB_EXIT_PAUSE:
1363 vmexit->exitcode = VM_EXITCODE_PAUSE;

--- 401 unchanged lines hidden (view full) ---

1765
1766/*
1767 * Start vcpu with specified RIP.
1768 */
1769static int
1770svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1771 void *rend_cookie, void *suspended_cookie)
1772{
1778 struct svm_regctx *hctx, *gctx;
1773 struct svm_regctx *gctx;
1779 struct svm_softc *svm_sc;
1780 struct svm_vcpu *vcpustate;
1781 struct vmcb_state *state;
1782 struct vmcb_ctrl *ctrl;
1783 struct vm_exit *vmexit;
1784 struct vlapic *vlapic;
1785 struct vm *vm;
1786 uint64_t vmcb_pa;

--- 14 unchanged lines hidden (view full) ---

1801 *
1802 * The per-cpu data area is not accessible until MSR_GSBASE is restored
1803 * after the #VMEXIT. Since VMRUN is executed inside a critical section
1804 * 'curcpu' and 'thiscpu' are guaranteed to identical.
1805 */
1806 thiscpu = curcpu;
1807
1808 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1774 struct svm_softc *svm_sc;
1775 struct svm_vcpu *vcpustate;
1776 struct vmcb_state *state;
1777 struct vmcb_ctrl *ctrl;
1778 struct vm_exit *vmexit;
1779 struct vlapic *vlapic;
1780 struct vm *vm;
1781 uint64_t vmcb_pa;

--- 14 unchanged lines hidden (view full) ---

1796 *
1797 * The per-cpu data area is not accessible until MSR_GSBASE is restored
1798 * after the #VMEXIT. Since VMRUN is executed inside a critical section
1799 * 'curcpu' and 'thiscpu' are guaranteed to identical.
1800 */
1801 thiscpu = curcpu;
1802
1803 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1809 hctx = &host_ctx[thiscpu];
1810 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1811
1812 if (vcpustate->lastcpu != thiscpu) {
1813 /*
1814 * Force new ASID allocation by invalidating the generation.
1815 */
1816 vcpustate->asid.gen = 0;
1817

--- 62 unchanged lines hidden (view full) ---

1880 check_asid(svm_sc, vcpu, pmap, thiscpu);
1881
1882 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
1883 vcpustate->dirty = 0;
1884 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
1885
1886 /* Launch Virtual Machine. */
1887 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
1804 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1805
1806 if (vcpustate->lastcpu != thiscpu) {
1807 /*
1808 * Force new ASID allocation by invalidating the generation.
1809 */
1810 vcpustate->asid.gen = 0;
1811

--- 62 unchanged lines hidden (view full) ---

1874 check_asid(svm_sc, vcpu, pmap, thiscpu);
1875
1876 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
1877 vcpustate->dirty = 0;
1878 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
1879
1880 /* Launch Virtual Machine. */
1881 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
1888 svm_launch(vmcb_pa, gctx, hctx);
1882 svm_launch(vmcb_pa, gctx);
1889
1890 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
1891
1892 /*
1893 * Restore MSR_GSBASE to point to the pcpu data area.
1894 *
1895 * Note that accesses done via PCPU_GET/PCPU_SET will work
1896 * only after MSR_GSBASE is restored.

--- 48 unchanged lines hidden (view full) ---

1945{
1946
1947 switch (reg) {
1948 case VM_REG_GUEST_RBX:
1949 return (&regctx->sctx_rbx);
1950 case VM_REG_GUEST_RCX:
1951 return (&regctx->sctx_rcx);
1952 case VM_REG_GUEST_RDX:
1883
1884 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
1885
1886 /*
1887 * Restore MSR_GSBASE to point to the pcpu data area.
1888 *
1889 * Note that accesses done via PCPU_GET/PCPU_SET will work
1890 * only after MSR_GSBASE is restored.

--- 48 unchanged lines hidden (view full) ---

1939{
1940
1941 switch (reg) {
1942 case VM_REG_GUEST_RBX:
1943 return (&regctx->sctx_rbx);
1944 case VM_REG_GUEST_RCX:
1945 return (&regctx->sctx_rcx);
1946 case VM_REG_GUEST_RDX:
1953 return (&regctx->e.g.sctx_rdx);
1947 return (®ctx->sctx_rdx);
1954 case VM_REG_GUEST_RDI:
1948 case VM_REG_GUEST_RDI:
1955 return (&regctx->e.g.sctx_rdi);
1949 return (®ctx->sctx_rdi);
1956 case VM_REG_GUEST_RSI:
1950 case VM_REG_GUEST_RSI:
1957 return (&regctx->e.g.sctx_rsi);
1951 return (®ctx->sctx_rsi);
1958 case VM_REG_GUEST_RBP:
1959 return (&regctx->sctx_rbp);
1960 case VM_REG_GUEST_R8:
1961 return (&regctx->sctx_r8);
1962 case VM_REG_GUEST_R9:
1963 return (&regctx->sctx_r9);
1964 case VM_REG_GUEST_R10:
1965 return (&regctx->sctx_r10);

--- 186 unchanged lines hidden ---
1952 case VM_REG_GUEST_RBP:
1953 return (&regctx->sctx_rbp);
1954 case VM_REG_GUEST_R8:
1955 return (&regctx->sctx_r8);
1956 case VM_REG_GUEST_R9:
1957 return (&regctx->sctx_r9);
1958 case VM_REG_GUEST_R10:
1959 return (&regctx->sctx_r10);

--- 186 unchanged lines hidden ---