Deleted Added
full compact
svm.c (272926) svm.c (272929)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 272926 2014-10-11 03:09:34Z neel $");
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 272929 2014-10-11 04:41:21Z neel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>
37#include <sys/sysctl.h>
38
39#include <vm/vm.h>
40#include <vm/pmap.h>
41
42#include <machine/cpufunc.h>
43#include <machine/psl.h>
44#include <machine/pmap.h>
45#include <machine/md_var.h>
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>
37#include <sys/sysctl.h>
38
39#include <vm/vm.h>
40#include <vm/pmap.h>
41
42#include <machine/cpufunc.h>
43#include <machine/psl.h>
44#include <machine/pmap.h>
45#include <machine/md_var.h>
46#include <machine/vmparam.h>
47#include <machine/specialreg.h>
46#include <machine/specialreg.h>
48#include <machine/segments.h>
49#include <machine/smp.h>
50#include <machine/vmm.h>
47#include <machine/smp.h>
48#include <machine/vmm.h>
51#include <machine/vmm_dev.h>
52#include <machine/vmm_instruction_emul.h>
53
49#include <machine/vmm_instruction_emul.h>
50
54#include <x86/apicreg.h>
55
56#include "vmm_lapic.h"
57#include "vmm_stat.h"
58#include "vmm_ktr.h"
59#include "vmm_ioport.h"
60#include "vatpic.h"
61#include "vlapic.h"
62#include "vlapic_priv.h"
63

--- 30 unchanged lines hidden (view full) ---

94 VMCB_CACHE_DT | \
95 VMCB_CACHE_SEG | \
96 VMCB_CACHE_NP)
97
98static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
99SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
100 0, NULL);
101
51#include "vmm_lapic.h"
52#include "vmm_stat.h"
53#include "vmm_ktr.h"
54#include "vmm_ioport.h"
55#include "vatpic.h"
56#include "vlapic.h"
57#include "vlapic_priv.h"
58

--- 30 unchanged lines hidden (view full) ---

89 VMCB_CACHE_DT | \
90 VMCB_CACHE_SEG | \
91 VMCB_CACHE_NP)
92
93static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
94SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
95 0, NULL);
96
102MALLOC_DEFINE(M_SVM, "svm", "svm");
103MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
97static MALLOC_DEFINE(M_SVM, "svm", "svm");
98static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
104
105/* Per-CPU context area. */
106extern struct pcpu __pcpu[];
107
108static uint32_t svm_feature; /* AMD SVM features. */
109SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0,
110 "SVM features advertised by CPUID.8000000AH:EDX");
111

--- 15 unchanged lines hidden (view full) ---

127static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
128
129static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
130static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
131static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
132
133static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
134
99
100/* Per-CPU context area. */
101extern struct pcpu __pcpu[];
102
103static uint32_t svm_feature; /* AMD SVM features. */
104SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0,
105 "SVM features advertised by CPUID.8000000AH:EDX");
106

--- 15 unchanged lines hidden (view full) ---

122static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
123
124static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
125static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
126static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
127
128static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
129
135/*
136 * Common function to enable or disabled SVM for a CPU.
137 */
138static int
139cpu_svm_enable_disable(boolean_t enable)
130static __inline int
131flush_by_asid(void)
140{
132{
141 uint64_t efer_msr;
142
133
143 efer_msr = rdmsr(MSR_EFER);
134 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
135}
144
136
145 if (enable)
146 efer_msr |= EFER_SVM;
147 else
148 efer_msr &= ~EFER_SVM;
137static __inline int
138decode_assist(void)
139{
149
140
150 wrmsr(MSR_EFER, efer_msr);
151
152 return(0);
141 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
153}
154
142}
143
155/*
156 * Disable SVM on a CPU.
157 */
158static void
159svm_disable(void *arg __unused)
160{
144static void
145svm_disable(void *arg __unused)
146{
147 uint64_t efer;
161
148
162 (void)cpu_svm_enable_disable(FALSE);
149 efer = rdmsr(MSR_EFER);
150 efer &= ~EFER_SVM;
151 wrmsr(MSR_EFER, efer);
163}
164
165/*
152}
153
154/*
166 * Disable SVM for all CPUs.
155 * Disable SVM on all CPUs.
167 */
168static int
169svm_cleanup(void)
170{
171
172 smp_rendezvous(NULL, svm_disable, NULL, NULL);
173 return (0);
174}

--- 44 unchanged lines hidden (view full) ---

219 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
220 printf("SVM: NRIP Save feature not available.\n");
221 return (ENXIO);
222 }
223
224 return (0);
225}
226
156 */
157static int
158svm_cleanup(void)
159{
160
161 smp_rendezvous(NULL, svm_disable, NULL, NULL);
162 return (0);
163}

--- 44 unchanged lines hidden (view full) ---

208 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
209 printf("SVM: NRIP Save feature not available.\n");
210 return (ENXIO);
211 }
212
213 return (0);
214}
215
227static __inline int
228flush_by_asid(void)
229{
230
231 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
232}
233
234static __inline int
235decode_assist(void)
236{
237
238 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
239}
240
241/*
242 * Enable SVM for a CPU.
243 */
244static void
245svm_enable(void *arg __unused)
246{
216static void
217svm_enable(void *arg __unused)
218{
247 uint64_t hsave_pa;
219 uint64_t efer;
248
220
249 (void)cpu_svm_enable_disable(TRUE);
221 efer = rdmsr(MSR_EFER);
222 efer |= EFER_SVM;
223 wrmsr(MSR_EFER, efer);
250
224
251 hsave_pa = vtophys(hsave[curcpu]);
252 wrmsr(MSR_VM_HSAVE_PA, hsave_pa);
253
254 if (rdmsr(MSR_VM_HSAVE_PA) != hsave_pa) {
255 panic("VM_HSAVE_PA is wrong on CPU%d\n", curcpu);
256 }
225 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
257}
258
259/*
226}
227
228/*
260 * Verify that SVM is enabled and the processor has all the required features.
229 * Return 1 if SVM is enabled on this processor and 0 otherwise.
261 */
262static int
230 */
231static int
263is_svm_enabled(void)
232svm_available(void)
264{
265 uint64_t msr;
266
267 /* Section 15.4 Enabling SVM from APM2. */
268 if ((amd_feature2 & AMDID2_SVM) == 0) {
269 printf("SVM: not available.\n");
233{
234 uint64_t msr;
235
236 /* Section 15.4 Enabling SVM from APM2. */
237 if ((amd_feature2 & AMDID2_SVM) == 0) {
238 printf("SVM: not available.\n");
270 return (ENXIO);
239 return (0);
271 }
272
273 msr = rdmsr(MSR_VM_CR);
274 if ((msr & VM_CR_SVMDIS) != 0) {
275 printf("SVM: disabled by BIOS.\n");
240 }
241
242 msr = rdmsr(MSR_VM_CR);
243 if ((msr & VM_CR_SVMDIS) != 0) {
244 printf("SVM: disabled by BIOS.\n");
276 return (ENXIO);
245 return (0);
277 }
278
246 }
247
279 return (check_svm_features());
248 return (1);
280}
281
249}
250
282/*
283 * Enable SVM on CPU and initialize nested page table h/w.
284 */
285static int
286svm_init(int ipinum)
287{
251static int
252svm_init(int ipinum)
253{
288 int err, cpu;
254 int error, cpu;
289
255
290 err = is_svm_enabled();
291 if (err)
292 return (err);
256 if (!svm_available())
257 return (ENXIO);
293
258
259 error = check_svm_features();
260 if (error)
261 return (error);
262
294 vmcb_clean &= VMCB_CACHE_DEFAULT;
295
296 for (cpu = 0; cpu < MAXCPU; cpu++) {
297 /*
298 * Initialize the host ASIDs to their "highest" valid values.
299 *
300 * The next ASID allocation will rollover both 'gen' and 'num'
301 * and start off the sequence at {1,1}.
302 */
303 asid[cpu].gen = ~0UL;
304 asid[cpu].num = nasid - 1;
305 }
306
307 svm_msr_init();
308 svm_npt_init(ipinum);
309
263 vmcb_clean &= VMCB_CACHE_DEFAULT;
264
265 for (cpu = 0; cpu < MAXCPU; cpu++) {
266 /*
267 * Initialize the host ASIDs to their "highest" valid values.
268 *
269 * The next ASID allocation will rollover both 'gen' and 'num'
270 * and start off the sequence at {1,1}.
271 */
272 asid[cpu].gen = ~0UL;
273 asid[cpu].num = nasid - 1;
274 }
275
276 svm_msr_init();
277 svm_npt_init(ipinum);
278
310 /* Start SVM on all CPUs */
279 /* Enable SVM on all CPUs */
311 smp_rendezvous(NULL, svm_enable, NULL, NULL);
312
313 return (0);
314}
315
316static void
317svm_restore(void)
318{
280 smp_rendezvous(NULL, svm_enable, NULL, NULL);
281
282 return (0);
283}
284
285static void
286svm_restore(void)
287{
288
319 svm_enable(NULL);
320}
321
289 svm_enable(NULL);
290}
291
322/*
323 * Get index and bit position for a MSR in MSR permission
324 * bitmap. Two bits are used for each MSR, lower bit is
325 * for read and higher bit is for write.
326 */
327static int
328svm_msr_index(uint64_t msr, int *index, int *bit)
329{
330 uint32_t base, off;
331
332/* Pentium compatible MSRs */
333#define MSR_PENTIUM_START 0
334#define MSR_PENTIUM_END 0x1FFF
335/* AMD 6th generation and Intel compatible MSRs */
336#define MSR_AMD6TH_START 0xC0000000UL
337#define MSR_AMD6TH_END 0xC0001FFFUL
338/* AMD 7th and 8th generation compatible MSRs */
339#define MSR_AMD7TH_START 0xC0010000UL
340#define MSR_AMD7TH_END 0xC0011FFFUL
341
292/* Pentium compatible MSRs */
293#define MSR_PENTIUM_START 0
294#define MSR_PENTIUM_END 0x1FFF
295/* AMD 6th generation and Intel compatible MSRs */
296#define MSR_AMD6TH_START 0xC0000000UL
297#define MSR_AMD6TH_END 0xC0001FFFUL
298/* AMD 7th and 8th generation compatible MSRs */
299#define MSR_AMD7TH_START 0xC0010000UL
300#define MSR_AMD7TH_END 0xC0011FFFUL
301
302/*
303 * Get the index and bit position for a MSR in permission bitmap.
304 * Two bits are used for each MSR: lower bit for read and higher bit for write.
305 */
306static int
307svm_msr_index(uint64_t msr, int *index, int *bit)
308{
309 uint32_t base, off;
310
342 *index = -1;
343 *bit = (msr % 4) * 2;
344 base = 0;
345
346 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
347 *index = msr / 4;
348 return (0);
349 }

--- 7 unchanged lines hidden (view full) ---

357
358 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
359 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
360 off = (msr - MSR_AMD7TH_START);
361 *index = (off + base) / 4;
362 return (0);
363 }
364
311 *index = -1;
312 *bit = (msr % 4) * 2;
313 base = 0;
314
315 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
316 *index = msr / 4;
317 return (0);
318 }

--- 7 unchanged lines hidden (view full) ---

326
327 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
328 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
329 off = (msr - MSR_AMD7TH_START);
330 *index = (off + base) / 4;
331 return (0);
332 }
333
365 return (EIO);
334 return (EINVAL);
366}
367
368/*
335}
336
337/*
369 * Give virtual cpu the complete access to MSR(read & write).
338 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
370 */
339 */
371static int
340static void
372svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
373{
341svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
342{
374 int index, bit, err;
343 int index, bit, error;
375
344
376 err = svm_msr_index(msr, &index, &bit);
377 if (err) {
378 ERR("MSR 0x%lx is not writeable by guest.\n", msr);
379 return (err);
380 }
345 error = svm_msr_index(msr, &index, &bit);
346 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
347 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
348 ("%s: invalid index %d for msr %#lx", __func__, index, msr));
349 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
350 "msr %#lx", __func__, bit, msr));
381
351
382 if (index < 0 || index > (SVM_MSR_BITMAP_SIZE)) {
383 ERR("MSR 0x%lx index out of range(%d).\n", msr, index);
384 return (EINVAL);
385 }
386 if (bit < 0 || bit > 8) {
387 ERR("MSR 0x%lx bit out of range(%d).\n", msr, bit);
388 return (EINVAL);
389 }
390
391 /* Disable intercept for read and write. */
392 if (read)
393 perm_bitmap[index] &= ~(1UL << bit);
352 if (read)
353 perm_bitmap[index] &= ~(1UL << bit);
354
394 if (write)
395 perm_bitmap[index] &= ~(2UL << bit);
355 if (write)
356 perm_bitmap[index] &= ~(2UL << bit);
396 CTR2(KTR_VMM, "Guest has control:0x%x on SVM:MSR(0x%lx).\n",
397 (perm_bitmap[index] >> bit) & 0x3, msr);
398
399 return (0);
400}
401
357}
358
402static int
359static void
403svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
404{
360svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
361{
405 return svm_msr_perm(perm_bitmap, msr, true, true);
362
363 svm_msr_perm(perm_bitmap, msr, true, true);
406}
407
364}
365
408static int
366static void
409svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
410{
367svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
368{
411 return svm_msr_perm(perm_bitmap, msr, true, false);
369
370 svm_msr_perm(perm_bitmap, msr, true, false);
412}
413
414static __inline int
415svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
416{
417 struct vmcb_ctrl *ctrl;
418
419 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));

--- 24 unchanged lines hidden (view full) ---

444 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
445 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
446 }
447}
448
449static __inline void
450svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
451{
371}
372
373static __inline int
374svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
375{
376 struct vmcb_ctrl *ctrl;
377
378 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));

--- 24 unchanged lines hidden (view full) ---

403 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
404 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
405 }
406}
407
408static __inline void
409svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
410{
411
452 svm_set_intercept(sc, vcpu, off, bitmask, 0);
453}
454
455static __inline void
456svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
457{
412 svm_set_intercept(sc, vcpu, off, bitmask, 0);
413}
414
415static __inline void
416svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
417{
418
458 svm_set_intercept(sc, vcpu, off, bitmask, 1);
459}
460
461static void
462vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
463 uint64_t msrpm_base_pa, uint64_t np_pml4)
464{
465 struct vmcb_ctrl *ctrl;

--- 71 unchanged lines hidden (view full) ---

537 PAT_VALUE(3, PAT_UNCACHEABLE) |
538 PAT_VALUE(4, PAT_WRITE_BACK) |
539 PAT_VALUE(5, PAT_WRITE_THROUGH) |
540 PAT_VALUE(6, PAT_UNCACHED) |
541 PAT_VALUE(7, PAT_UNCACHEABLE);
542}
543
544/*
419 svm_set_intercept(sc, vcpu, off, bitmask, 1);
420}
421
422static void
423vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
424 uint64_t msrpm_base_pa, uint64_t np_pml4)
425{
426 struct vmcb_ctrl *ctrl;

--- 71 unchanged lines hidden (view full) ---

498 PAT_VALUE(3, PAT_UNCACHEABLE) |
499 PAT_VALUE(4, PAT_WRITE_BACK) |
500 PAT_VALUE(5, PAT_WRITE_THROUGH) |
501 PAT_VALUE(6, PAT_UNCACHED) |
502 PAT_VALUE(7, PAT_UNCACHEABLE);
503}
504
505/*
545 * Initialise a virtual machine.
506 * Initialize a virtual machine.
546 */
547static void *
548svm_vminit(struct vm *vm, pmap_t pmap)
549{
550 struct svm_softc *svm_sc;
551 struct svm_vcpu *vcpu;
552 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
553 int i;
554
507 */
508static void *
509svm_vminit(struct vm *vm, pmap_t pmap)
510{
511 struct svm_softc *svm_sc;
512 struct svm_vcpu *vcpu;
513 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
514 int i;
515
555 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc),
556 M_SVM, M_WAITOK | M_ZERO);
557
516 svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO);
558 svm_sc->vm = vm;
559 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
560
561 /*
517 svm_sc->vm = vm;
518 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
519
520 /*
562 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc.
563 */
564 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap));
521 * Intercept read and write accesses to all MSRs.
522 */
523 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap));
565
566 /*
524
525 /*
567 * Following MSR can be completely controlled by virtual machines
568 * since access to following are translated to access to VMCB.
526 * Access to the following MSRs is redirected to the VMCB when the
527 * guest is executing. Therefore it is safe to allow the guest to
528 * read/write these MSRs directly without hypervisor involvement.
569 */
570 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
571 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
572 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
573
574 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
575 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
576 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
577 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
578 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
579 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
580 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
529 */
530 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
531 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
532 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
533
534 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
535 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
536 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
537 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
538 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
539 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
540 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
581
582 /* For Nested Paging/RVI only. */
583 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
584
585 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
586
587 /*
588 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
589 */
590 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
591
541 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
542
543 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
544
545 /*
546 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
547 */
548 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
549
592 /* Intercept access to all I/O ports. */
550 /* Intercept access to all I/O ports. */
593 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap));
594
551 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap));
552
595 /* Cache physical address for multiple vcpus. */
596 iopm_pa = vtophys(svm_sc->iopm_bitmap);
597 msrpm_pa = vtophys(svm_sc->msr_bitmap);
598 pml4_pa = svm_sc->nptp;
553 iopm_pa = vtophys(svm_sc->iopm_bitmap);
554 msrpm_pa = vtophys(svm_sc->msr_bitmap);
555 pml4_pa = svm_sc->nptp;
599
600 for (i = 0; i < VM_MAXCPU; i++) {
601 vcpu = svm_get_vcpu(svm_sc, i);
602 vcpu->lastcpu = NOCPU;
603 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
604 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
605 svm_msr_guest_init(svm_sc, i);
606 }
607 return (svm_sc);

--- 178 unchanged lines hidden (view full) ---

786 svm_inout_str_seginfo(svm_sc, vcpu, info1,
787 vmexit->u.inout.in, vis);
788 }
789
790 return (UNHANDLED);
791}
792
793static int
556 for (i = 0; i < VM_MAXCPU; i++) {
557 vcpu = svm_get_vcpu(svm_sc, i);
558 vcpu->lastcpu = NOCPU;
559 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
560 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
561 svm_msr_guest_init(svm_sc, i);
562 }
563 return (svm_sc);

--- 178 unchanged lines hidden (view full) ---

742 svm_inout_str_seginfo(svm_sc, vcpu, info1,
743 vmexit->u.inout.in, vis);
744 }
745
746 return (UNHANDLED);
747}
748
749static int
794svm_npf_paging(uint64_t exitinfo1)
750npf_fault_type(uint64_t exitinfo1)
795{
796
797 if (exitinfo1 & VMCB_NPF_INFO1_W)
798 return (VM_PROT_WRITE);
751{
752
753 if (exitinfo1 & VMCB_NPF_INFO1_W)
754 return (VM_PROT_WRITE);
799
800 return (VM_PROT_READ);
755 else
756 return (VM_PROT_READ);
801}
802
803static bool
804svm_npf_emul_fault(uint64_t exitinfo1)
805{
806
807 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
808 return (false);

--- 554 unchanged lines hidden (view full) ---

1363 /* EXITINFO2 contains the faulting guest physical address */
1364 if (info1 & VMCB_NPF_INFO1_RSV) {
1365 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1366 "reserved bits set: info1(%#lx) info2(%#lx)",
1367 info1, info2);
1368 } else if (vm_mem_allocated(svm_sc->vm, info2)) {
1369 vmexit->exitcode = VM_EXITCODE_PAGING;
1370 vmexit->u.paging.gpa = info2;
757}
758
759static bool
760svm_npf_emul_fault(uint64_t exitinfo1)
761{
762
763 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
764 return (false);

--- 554 unchanged lines hidden (view full) ---

1319 /* EXITINFO2 contains the faulting guest physical address */
1320 if (info1 & VMCB_NPF_INFO1_RSV) {
1321 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1322 "reserved bits set: info1(%#lx) info2(%#lx)",
1323 info1, info2);
1324 } else if (vm_mem_allocated(svm_sc->vm, info2)) {
1325 vmexit->exitcode = VM_EXITCODE_PAGING;
1326 vmexit->u.paging.gpa = info2;
1371 vmexit->u.paging.fault_type = svm_npf_paging(info1);
1327 vmexit->u.paging.fault_type = npf_fault_type(info1);
1372 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1373 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1374 "on gpa %#lx/%#lx at rip %#lx",
1375 info2, info1, state->rip);
1376 } else if (svm_npf_emul_fault(info1)) {
1377 svm_handle_inst_emul(vmcb, info2, vmexit);
1378 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1379 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "

--- 374 unchanged lines hidden (view full) ---

1754 }
1755 vcpustate->eptgen = eptgen;
1756
1757 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
1758 KASSERT(ctrl->asid == vcpustate->asid.num,
1759 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
1760}
1761
1328 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1329 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1330 "on gpa %#lx/%#lx at rip %#lx",
1331 info2, info1, state->rip);
1332 } else if (svm_npf_emul_fault(info1)) {
1333 svm_handle_inst_emul(vmcb, info2, vmexit);
1334 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1335 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "

--- 374 unchanged lines hidden (view full) ---

1710 }
1711 vcpustate->eptgen = eptgen;
1712
1713 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
1714 KASSERT(ctrl->asid == vcpustate->asid.num,
1715 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
1716}
1717
1718static __inline void
1719disable_gintr(void)
1720{
1721
1722 __asm __volatile("clgi" : : :);
1723}
1724
1725static __inline void
1726enable_gintr(void)
1727{
1728
1729 __asm __volatile("stgi" : : :);
1730}
1731
1762/*
1763 * Start vcpu with specified RIP.
1764 */
1765static int
1766svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1767 void *rend_cookie, void *suspended_cookie)
1768{
1769 struct svm_regctx *gctx;

--- 137 unchanged lines hidden (view full) ---

1907 handled = svm_vmexit(svm_sc, vcpu, vmexit);
1908 } while (handled);
1909
1910 svm_msr_guest_exit(svm_sc, vcpu);
1911
1912 return (0);
1913}
1914
1732/*
1733 * Start vcpu with specified RIP.
1734 */
1735static int
1736svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1737 void *rend_cookie, void *suspended_cookie)
1738{
1739 struct svm_regctx *gctx;

--- 137 unchanged lines hidden (view full) ---

1877 handled = svm_vmexit(svm_sc, vcpu, vmexit);
1878 } while (handled);
1879
1880 svm_msr_guest_exit(svm_sc, vcpu);
1881
1882 return (0);
1883}
1884
1915/*
1916 * Cleanup for virtual machine.
1917 */
1918static void
1919svm_vmcleanup(void *arg)
1920{
1885static void
1886svm_vmcleanup(void *arg)
1887{
1921 struct svm_softc *svm_sc;
1888 struct svm_softc *sc = arg;
1922
1889
1923 svm_sc = arg;
1924
1925 VCPU_CTR0(svm_sc->vm, 0, "SVM:cleanup\n");
1926
1927 free(svm_sc, M_SVM);
1890 free(sc, M_SVM);
1928}
1929
1891}
1892
1930/*
1931 * Return pointer to hypervisor saved register state.
1932 */
1933static register_t *
1934swctx_regptr(struct svm_regctx *regctx, int reg)
1935{
1936
1937 switch (reg) {
1893static register_t *
1894swctx_regptr(struct svm_regctx *regctx, int reg)
1895{
1896
1897 switch (reg) {
1938 case VM_REG_GUEST_RBX:
1939 return (&regctx->sctx_rbx);
1940 case VM_REG_GUEST_RCX:
1941 return (&regctx->sctx_rcx);
1942 case VM_REG_GUEST_RDX:
1943 return (&regctx->sctx_rdx);
1944 case VM_REG_GUEST_RDI:
1945 return (&regctx->sctx_rdi);
1946 case VM_REG_GUEST_RSI:
1947 return (&regctx->sctx_rsi);
1948 case VM_REG_GUEST_RBP:
1949 return (&regctx->sctx_rbp);
1950 case VM_REG_GUEST_R8:
1951 return (&regctx->sctx_r8);
1952 case VM_REG_GUEST_R9:
1953 return (&regctx->sctx_r9);
1954 case VM_REG_GUEST_R10:
1955 return (&regctx->sctx_r10);
1956 case VM_REG_GUEST_R11:
1957 return (&regctx->sctx_r11);
1958 case VM_REG_GUEST_R12:
1959 return (&regctx->sctx_r12);
1960 case VM_REG_GUEST_R13:
1961 return (&regctx->sctx_r13);
1962 case VM_REG_GUEST_R14:
1963 return (&regctx->sctx_r14);
1964 case VM_REG_GUEST_R15:
1965 return (&regctx->sctx_r15);
1966 default:
1967 ERR("Unknown register requested, reg=%d.\n", reg);
1968 break;
1898 case VM_REG_GUEST_RBX:
1899 return (®ctx->sctx_rbx);
1900 case VM_REG_GUEST_RCX:
1901 return (®ctx->sctx_rcx);
1902 case VM_REG_GUEST_RDX:
1903 return (®ctx->sctx_rdx);
1904 case VM_REG_GUEST_RDI:
1905 return (®ctx->sctx_rdi);
1906 case VM_REG_GUEST_RSI:
1907 return (®ctx->sctx_rsi);
1908 case VM_REG_GUEST_RBP:
1909 return (®ctx->sctx_rbp);
1910 case VM_REG_GUEST_R8:
1911 return (®ctx->sctx_r8);
1912 case VM_REG_GUEST_R9:
1913 return (®ctx->sctx_r9);
1914 case VM_REG_GUEST_R10:
1915 return (®ctx->sctx_r10);
1916 case VM_REG_GUEST_R11:
1917 return (®ctx->sctx_r11);
1918 case VM_REG_GUEST_R12:
1919 return (®ctx->sctx_r12);
1920 case VM_REG_GUEST_R13:
1921 return (®ctx->sctx_r13);
1922 case VM_REG_GUEST_R14:
1923 return (®ctx->sctx_r14);
1924 case VM_REG_GUEST_R15:
1925 return (®ctx->sctx_r15);
1926 default:
1927 return (NULL);
1969 }
1928 }
1970
1971 return (NULL);
1972}
1973
1929}
1930
1974/*
1975 * Interface to read guest registers.
1976 * This can be SVM h/w saved or hypervisor saved register.
1977 */
1978static int
1979svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
1980{
1981 struct svm_softc *svm_sc;
1982 register_t *reg;
1983
1984 svm_sc = arg;
1985

--- 7 unchanged lines hidden (view full) ---

1993
1994 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1995
1996 if (reg != NULL) {
1997 *val = *reg;
1998 return (0);
1999 }
2000
1931static int
1932svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
1933{
1934 struct svm_softc *svm_sc;
1935 register_t *reg;
1936
1937 svm_sc = arg;
1938

--- 7 unchanged lines hidden (view full) ---

1946
1947 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1948
1949 if (reg != NULL) {
1950 *val = *reg;
1951 return (0);
1952 }
1953
2001 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident);
1954 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
2002 return (EINVAL);
2003}
2004
1955 return (EINVAL);
1956}
1957
2005/*
2006 * Interface to write to guest registers.
2007 * This can be SVM h/w saved or hypervisor saved register.
2008 */
2009static int
2010svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2011{
2012 struct svm_softc *svm_sc;
2013 register_t *reg;
2014
2015 svm_sc = arg;
2016

--- 13 unchanged lines hidden (view full) ---

2030 }
2031
2032 /*
2033 * XXX deal with CR3 and invalidate TLB entries tagged with the
2034 * vcpu's ASID. This needs to be treated differently depending on
2035 * whether 'running' is true/false.
2036 */
2037
1958static int
1959svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
1960{
1961 struct svm_softc *svm_sc;
1962 register_t *reg;
1963
1964 svm_sc = arg;
1965

--- 13 unchanged lines hidden (view full) ---

1979 }
1980
1981 /*
1982 * XXX deal with CR3 and invalidate TLB entries tagged with the
1983 * vcpu's ASID. This needs to be treated differently depending on
1984 * whether 'running' is true/false.
1985 */
1986
2038 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident);
1987 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
2039 return (EINVAL);
2040}
2041
2042static int
2043svm_setcap(void *arg, int vcpu, int type, int val)
2044{
2045 struct svm_softc *sc;
2046 int error;

--- 57 unchanged lines hidden (view full) ---

2104
2105 svm_sc = arg;
2106 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2107 vlapic->vm = svm_sc->vm;
2108 vlapic->vcpuid = vcpuid;
2109 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2110
2111 vlapic_init(vlapic);
1988 return (EINVAL);
1989}
1990
1991static int
1992svm_setcap(void *arg, int vcpu, int type, int val)
1993{
1994 struct svm_softc *sc;
1995 int error;

--- 57 unchanged lines hidden (view full) ---

2053
2054 svm_sc = arg;
2055 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2056 vlapic->vm = svm_sc->vm;
2057 vlapic->vcpuid = vcpuid;
2058 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2059
2060 vlapic_init(vlapic);
2112
2061
2113 return (vlapic);
2114}
2115
2116static void
2117svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2118{
2119
2120 vlapic_cleanup(vlapic);

--- 21 unchanged lines hidden ---
2062 return (vlapic);
2063}
2064
2065static void
2066svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2067{
2068
2069 vlapic_cleanup(vlapic);

--- 21 unchanged lines hidden ---