Deleted Added
full compact
svm.c (271715) svm.c (271912)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271715 2014-09-17 18:46:51Z neel $");
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 271912 2014-09-20 21:46:31Z neel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 12 unchanged lines hidden (view full) ---

49#include <machine/smp.h>
50#include <machine/vmm.h>
51#include <machine/vmm_dev.h>
52#include <machine/vmm_instruction_emul.h>
53
54#include <x86/apicreg.h>
55
56#include "vmm_lapic.h"
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 12 unchanged lines hidden (view full) ---

49#include <machine/smp.h>
50#include <machine/vmm.h>
51#include <machine/vmm_dev.h>
52#include <machine/vmm_instruction_emul.h>
53
54#include <x86/apicreg.h>
55
56#include "vmm_lapic.h"
57#include "vmm_msr.h"
58#include "vmm_stat.h"
59#include "vmm_ktr.h"
60#include "vmm_ioport.h"
61#include "vatpic.h"
62#include "vlapic.h"
63#include "vlapic_priv.h"
64
65#include "x86.h"
66#include "vmcb.h"
67#include "svm.h"
68#include "svm_softc.h"
57#include "vmm_stat.h"
58#include "vmm_ktr.h"
59#include "vmm_ioport.h"
60#include "vatpic.h"
61#include "vlapic.h"
62#include "vlapic_priv.h"
63
64#include "x86.h"
65#include "vmcb.h"
66#include "svm.h"
67#include "svm_softc.h"
68#include "svm_msr.h"
69#include "npt.h"
70
71SYSCTL_DECL(_hw_vmm);
72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
73
74/*
75 * SVM CPUID function 0x8000_000A, edx bit decoding.
76 */

--- 221 unchanged lines hidden (view full) ---

298 *
299 * The next ASID allocation will rollover both 'gen' and 'num'
300 * and start off the sequence at {1,1}.
301 */
302 asid[cpu].gen = ~0UL;
303 asid[cpu].num = nasid - 1;
304 }
305
69#include "npt.h"
70
71SYSCTL_DECL(_hw_vmm);
72SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
73
74/*
75 * SVM CPUID function 0x8000_000A, edx bit decoding.
76 */

--- 221 unchanged lines hidden (view full) ---

298 *
299 * The next ASID allocation will rollover both 'gen' and 'num'
300 * and start off the sequence at {1,1}.
301 */
302 asid[cpu].gen = ~0UL;
303 asid[cpu].num = nasid - 1;
304 }
305
306 svm_msr_init();
306 svm_npt_init(ipinum);
307
308 /* Start SVM on all CPUs */
309 smp_rendezvous(NULL, svm_enable, NULL, NULL);
310
311 return (0);
312}
313

--- 287 unchanged lines hidden (view full) ---

601 msrpm_pa = vtophys(svm_sc->msr_bitmap);
602 pml4_pa = svm_sc->nptp;
603
604 for (i = 0; i < VM_MAXCPU; i++) {
605 vcpu = svm_get_vcpu(svm_sc, i);
606 vcpu->lastcpu = NOCPU;
607 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
608 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
307 svm_npt_init(ipinum);
308
309 /* Start SVM on all CPUs */
310 smp_rendezvous(NULL, svm_enable, NULL, NULL);
311
312 return (0);
313}
314

--- 287 unchanged lines hidden (view full) ---

602 msrpm_pa = vtophys(svm_sc->msr_bitmap);
603 pml4_pa = svm_sc->nptp;
604
605 for (i = 0; i < VM_MAXCPU; i++) {
606 vcpu = svm_get_vcpu(svm_sc, i);
607 vcpu->lastcpu = NOCPU;
608 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
609 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
610 svm_msr_guest_init(svm_sc, i);
609 }
610 return (svm_sc);
611}
612
613static int
614svm_cpl(struct vmcb_state *state)
615{
616

--- 245 unchanged lines hidden (view full) ---

862 }
863 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
864}
865
866/*
867 * Intercept access to MSR_EFER to prevent the guest from clearing the
868 * SVM enable bit.
869 */
611 }
612 return (svm_sc);
613}
614
615static int
616svm_cpl(struct vmcb_state *state)
617{
618

--- 245 unchanged lines hidden (view full) ---

864 }
865 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
866}
867
868/*
869 * Intercept access to MSR_EFER to prevent the guest from clearing the
870 * SVM enable bit.
871 */
870static void
871svm_write_efer(struct svm_softc *sc, int vcpu, uint32_t edx, uint32_t eax)
872static int
873svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t val)
872{
873 struct vmcb_state *state;
874 uint64_t oldval;
875
876 state = svm_get_vmcb_state(sc, vcpu);
877
878 oldval = state->efer;
874{
875 struct vmcb_state *state;
876 uint64_t oldval;
877
878 state = svm_get_vmcb_state(sc, vcpu);
879
880 oldval = state->efer;
879 state->efer = (uint64_t)edx << 32 | eax | EFER_SVM;
881 state->efer = val | EFER_SVM;
880 if (state->efer != oldval) {
881 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx",
882 oldval, state->efer);
883 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR);
884 }
882 if (state->efer != oldval) {
883 VCPU_CTR2(sc->vm, vcpu, "Guest EFER changed from %#lx to %#lx",
884 oldval, state->efer);
885 vcpu_set_dirty(sc, vcpu, VMCB_CACHE_CR);
886 }
887 return (0);
885}
886
887#ifdef KTR
888static const char *
889intrtype_to_str(int intr_type)
890{
891 switch (intr_type) {
892 case VMCB_EVENTINJ_TYPE_INTR:

--- 234 unchanged lines hidden (view full) ---

1127 /*
1128 * Set 'intr_shadow' to prevent an NMI from being injected on the
1129 * immediate VMRUN.
1130 */
1131 error = svm_modify_intr_shadow(sc, vcpu, 1);
1132 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1133}
1134
888}
889
890#ifdef KTR
891static const char *
892intrtype_to_str(int intr_type)
893{
894 switch (intr_type) {
895 case VMCB_EVENTINJ_TYPE_INTR:

--- 234 unchanged lines hidden (view full) ---

1130 /*
1131 * Set 'intr_shadow' to prevent an NMI from being injected on the
1132 * immediate VMRUN.
1133 */
1134 error = svm_modify_intr_shadow(sc, vcpu, 1);
1135 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1136}
1137
1138static int
1139emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
1140 bool *retu)
1141{
1142 int error;
1143
1144 if (lapic_msr(num))
1145 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
1146 else if (num == MSR_EFER)
1147 error = svm_write_efer(sc, vcpu, val);
1148 else
1149 error = svm_wrmsr(sc, vcpu, num, val, retu);
1150
1151 return (error);
1152}
1153
1154static int
1155emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
1156{
1157 struct vmcb_state *state;
1158 struct svm_regctx *ctx;
1159 uint64_t result;
1160 int error;
1161
1162 if (lapic_msr(num))
1163 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1164 else
1165 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1166
1167 if (error == 0) {
1168 state = svm_get_vmcb_state(sc, vcpu);
1169 ctx = svm_get_guest_regctx(sc, vcpu);
1170 state->rax = result & 0xffffffff;
1171 ctx->e.g.sctx_rdx = result >> 32;
1172 }
1173
1174 return (error);
1175}
1176
1135#ifdef KTR
1136static const char *
1137exit_reason_to_str(uint64_t reason)
1138{
1139 static char reasonbuf[32];
1140
1141 switch (reason) {
1142 case VMCB_EXIT_INVALID:

--- 140 unchanged lines hidden (view full) ---

1283 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1284 break;
1285 case VMCB_EXIT_MSR: /* MSR access. */
1286 eax = state->rax;
1287 ecx = ctx->sctx_rcx;
1288 edx = ctx->e.g.sctx_rdx;
1289 retu = false;
1290
1177#ifdef KTR
1178static const char *
1179exit_reason_to_str(uint64_t reason)
1180{
1181 static char reasonbuf[32];
1182
1183 switch (reason) {
1184 case VMCB_EXIT_INVALID:

--- 140 unchanged lines hidden (view full) ---

1325 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1326 break;
1327 case VMCB_EXIT_MSR: /* MSR access. */
1328 eax = state->rax;
1329 ecx = ctx->sctx_rcx;
1330 edx = ctx->e.g.sctx_rdx;
1331 retu = false;
1332
1291 if (ecx == MSR_EFER) {
1292 KASSERT(info1 != 0, ("rdmsr(MSR_EFER) is not emulated: "
1293 "info1(%#lx) info2(%#lx)", info1, info2));
1294 svm_write_efer(svm_sc, vcpu, edx, eax);
1295 handled = 1;
1296 break;
1297 }
1298
1299#define MSR_AMDK8_IPM 0xc0010055
1300 /*
1301 * Ignore access to the "Interrupt Pending Message" MSR.
1302 */
1303 if (ecx == MSR_AMDK8_IPM) {
1304 if (!info1)
1305 state->rax = ctx->e.g.sctx_rdx = 0;
1306 handled = 1;
1307 break;
1308 }
1309
1310 if (info1) {
1311 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1312 val = (uint64_t)edx << 32 | eax;
1313 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1314 ecx, val);
1333 if (info1) {
1334 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1335 val = (uint64_t)edx << 32 | eax;
1336 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1337 ecx, val);
1315 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val, &retu)) {
1338 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1316 vmexit->exitcode = VM_EXITCODE_WRMSR;
1317 vmexit->u.msr.code = ecx;
1318 vmexit->u.msr.wval = val;
1319 } else if (!retu) {
1320 handled = 1;
1321 } else {
1322 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1323 ("emulate_wrmsr retu with bogus exitcode"));
1324 }
1325 } else {
1326 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1327 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1339 vmexit->exitcode = VM_EXITCODE_WRMSR;
1340 vmexit->u.msr.code = ecx;
1341 vmexit->u.msr.wval = val;
1342 } else if (!retu) {
1343 handled = 1;
1344 } else {
1345 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1346 ("emulate_wrmsr retu with bogus exitcode"));
1347 }
1348 } else {
1349 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1350 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1328 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx, &retu)) {
1351 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
1329 vmexit->exitcode = VM_EXITCODE_RDMSR;
1330 vmexit->u.msr.code = ecx;
1331 } else if (!retu) {
1332 handled = 1;
1333 } else {
1334 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1335 ("emulate_rdmsr retu with bogus exitcode"));
1336 }

--- 481 unchanged lines hidden (view full) ---

1818 *
1819 * This works for now but any new side-effects of vcpu
1820 * migration should take this case into account.
1821 */
1822 vcpustate->lastcpu = thiscpu;
1823 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1824 }
1825
1352 vmexit->exitcode = VM_EXITCODE_RDMSR;
1353 vmexit->u.msr.code = ecx;
1354 } else if (!retu) {
1355 handled = 1;
1356 } else {
1357 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1358 ("emulate_rdmsr retu with bogus exitcode"));
1359 }

--- 481 unchanged lines hidden (view full) ---

1841 *
1842 * This works for now but any new side-effects of vcpu
1843 * migration should take this case into account.
1844 */
1845 vcpustate->lastcpu = thiscpu;
1846 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1847 }
1848
1849 svm_msr_guest_enter(svm_sc, vcpu);
1850
1826 /* Update Guest RIP */
1827 state->rip = rip;
1828
1829 do {
1830 /*
1831 * Disable global interrupts to guarantee atomicity during
1832 * loading of guest state. This includes not only the state
1833 * loaded by the "vmrun" instruction but also software state

--- 65 unchanged lines hidden (view full) ---

1899
1900 /* #VMEXIT disables interrupts so re-enable them here. */
1901 enable_gintr();
1902
1903 /* Handle #VMEXIT and if required return to user space. */
1904 handled = svm_vmexit(svm_sc, vcpu, vmexit);
1905 } while (handled);
1906
1851 /* Update Guest RIP */
1852 state->rip = rip;
1853
1854 do {
1855 /*
1856 * Disable global interrupts to guarantee atomicity during
1857 * loading of guest state. This includes not only the state
1858 * loaded by the "vmrun" instruction but also software state

--- 65 unchanged lines hidden (view full) ---

1924
1925 /* #VMEXIT disables interrupts so re-enable them here. */
1926 enable_gintr();
1927
1928 /* Handle #VMEXIT and if required return to user space. */
1929 handled = svm_vmexit(svm_sc, vcpu, vmexit);
1930 } while (handled);
1931
1932 svm_msr_guest_exit(svm_sc, vcpu);
1933
1907 return (0);
1908}
1909
1910/*
1911 * Cleanup for virtual machine.
1912 */
1913static void
1914svm_vmcleanup(void *arg)

--- 301 unchanged lines hidden ---
1934 return (0);
1935}
1936
1937/*
1938 * Cleanup for virtual machine.
1939 */
1940static void
1941svm_vmcleanup(void *arg)

--- 301 unchanged lines hidden ---