Deleted Added
full compact
svm.c (256867) svm.c (259579)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 256867 2013-10-21 23:46:37Z neel $");
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 259579 2013-12-18 23:39:42Z grehan $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 19 unchanged lines hidden (view full) ---

56
57#include "x86.h"
58#include "vmcb.h"
59#include "svm.h"
60#include "svm_softc.h"
61#include "npt.h"
62
63/*
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 19 unchanged lines hidden (view full) ---

56
57#include "x86.h"
58#include "vmcb.h"
59#include "svm.h"
60#include "svm_softc.h"
61#include "npt.h"
62
63/*
64 * SVM CPUID function 0x8000_000Ai, edx bit decoding.
64 * SVM CPUID function 0x8000_000A, edx bit decoding.
65 */
66#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
67#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
68#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
69#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
70#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
71#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
72#define AMD_CPUID_SVM_ASID_FLUSH BIT(6) /* Flush by ASID */
73#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
74#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
75#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
76
77MALLOC_DEFINE(M_SVM, "svm", "svm");
78
79/* Per-CPU context area. */
80extern struct pcpu __pcpu[];
81
65 */
66#define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
67#define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
68#define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
69#define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
70#define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
71#define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
72#define AMD_CPUID_SVM_ASID_FLUSH BIT(6) /* Flush by ASID */
73#define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
74#define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
75#define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
76
77MALLOC_DEFINE(M_SVM, "svm", "svm");
78
79/* Per-CPU context area. */
80extern struct pcpu __pcpu[];
81
82static int svm_vmexit(struct svm_softc *svm_sc, int vcpu,
82static bool svm_vmexit(struct svm_softc *svm_sc, int vcpu,
83 struct vm_exit *vmexit);
84static int svm_msr_rw_ok(uint8_t *btmap, uint64_t msr);
85static int svm_msr_index(uint64_t msr, int *index, int *bit);
86
87static uint32_t svm_feature; /* AMD SVM features. */
88
89/*
90 * Starting guest ASID, 0 is reserved for host.
91 * Each guest will have its own unique ASID.
92 */
93static uint32_t guest_asid = 1;
94
95/*
96 * Max ASID processor can support.
97 * This limit the maximum number of virtual machines that can be created.
98 */
99static int max_asid;
100
83 struct vm_exit *vmexit);
84static int svm_msr_rw_ok(uint8_t *btmap, uint64_t msr);
85static int svm_msr_index(uint64_t msr, int *index, int *bit);
86
87static uint32_t svm_feature; /* AMD SVM features. */
88
89/*
90 * Starting guest ASID, 0 is reserved for host.
91 * Each guest will have its own unique ASID.
92 */
93static uint32_t guest_asid = 1;
94
95/*
96 * Max ASID processor can support.
97 * This limit the maximum number of virtual machines that can be created.
98 */
99static int max_asid;
100
101/*
102 * Statistics
103 */
104static VMM_STAT_AMD(VMEXIT_NPF_LAPIC, "vm exits due to Local APIC access");
105
106/*
107 * SVM host state saved area of size 4KB for each core.
108 */
109static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
110
111/*
112 * S/w saved host context.
113 */
114static struct svm_regctx host_ctx[MAXCPU];
115
101/*
102 * SVM host state saved area of size 4KB for each core.
103 */
104static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
105
106/*
107 * S/w saved host context.
108 */
109static struct svm_regctx host_ctx[MAXCPU];
110
111static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid EXITINTINFO");
112
116/*
117 * Common function to enable or disabled SVM for a CPU.
118 */
119static int
120cpu_svm_enable_disable(boolean_t enable)
121{
122 uint64_t efer_msr;
123
124 efer_msr = rdmsr(MSR_EFER);
125
113/*
114 * Common function to enable or disabled SVM for a CPU.
115 */
116static int
117cpu_svm_enable_disable(boolean_t enable)
118{
119 uint64_t efer_msr;
120
121 efer_msr = rdmsr(MSR_EFER);
122
126 if (enable) {
123 if (enable)
127 efer_msr |= EFER_SVM;
124 efer_msr |= EFER_SVM;
128 } else {
125 else
129 efer_msr &= ~EFER_SVM;
126 efer_msr &= ~EFER_SVM;
130 }
131
132 wrmsr(MSR_EFER, efer_msr);
133
127
128 wrmsr(MSR_EFER, efer_msr);
129
134 if(rdmsr(MSR_EFER) != efer_msr) {
135 ERR("SVM couldn't be enabled on CPU%d.\n", curcpu);
136 return (EIO);
137 }
138
139 return(0);
140}
141
142/*
143 * Disable SVM on a CPU.
144 */
145static void
146svm_disable(void *arg __unused)

--- 47 unchanged lines hidden (view full) ---

194
195 /* SVM Lock */
196 if (!(svm_feature & AMD_CPUID_SVM_SVML)) {
197 printf("SVM is disabled by BIOS, please enable in BIOS.\n");
198 return (ENXIO);
199 }
200
201 /*
130 return(0);
131}
132
133/*
134 * Disable SVM on a CPU.
135 */
136static void
137svm_disable(void *arg __unused)

--- 47 unchanged lines hidden (view full) ---

185
186 /* SVM Lock */
187 if (!(svm_feature & AMD_CPUID_SVM_SVML)) {
188 printf("SVM is disabled by BIOS, please enable in BIOS.\n");
189 return (ENXIO);
190 }
191
192 /*
202 * XXX: BHyVe need EPT or RVI to work.
193 * bhyve need RVI to work.
203 */
204 if (!(svm_feature & AMD_CPUID_SVM_NP)) {
205 printf("Missing Nested paging or RVI SVM support in processor.\n");
206 return (EIO);
207 }
208
194 */
195 if (!(svm_feature & AMD_CPUID_SVM_NP)) {
196 printf("Missing Nested paging or RVI SVM support in processor.\n");
197 return (EIO);
198 }
199
209 if (svm_feature & (AMD_CPUID_SVM_NRIP_SAVE |
210 AMD_CPUID_SVM_DECODE_ASSIST)) {
200 if (svm_feature & AMD_CPUID_SVM_NRIP_SAVE)
211 return (0);
201 return (0);
212 }
213 /* XXX: Should never be here? */
214 printf("Processor doesn't support nRIP or decode assist, can't"
215 "run BhyVe.\n");
202
216 return (EIO);
217}
218
219/*
220 * Enable SVM for a CPU.
221 */
222static void
223svm_enable(void *arg __unused)

--- 38 unchanged lines hidden (view full) ---

262 * Enable SVM on CPU and initialize nested page table h/w.
263 */
264static int
265svm_init(void)
266{
267 int err;
268
269 err = is_svm_enabled();
203 return (EIO);
204}
205
206/*
207 * Enable SVM for a CPU.
208 */
209static void
210svm_enable(void *arg __unused)

--- 38 unchanged lines hidden (view full) ---

249 * Enable SVM on CPU and initialize nested page table h/w.
250 */
251static int
252svm_init(void)
253{
254 int err;
255
256 err = is_svm_enabled();
270 if (err) {
257 if (err)
271 return (err);
258 return (err);
272 }
259
273
274 svm_npt_init();
275
276 /* Start SVM on all CPUs */
277 smp_rendezvous(NULL, svm_enable, NULL, NULL);
278
260
261 svm_npt_init();
262
263 /* Start SVM on all CPUs */
264 smp_rendezvous(NULL, svm_enable, NULL, NULL);
265
279 return(0);
266 return (0);
280}
281
282/*
283 * Get index and bit position for a MSR in MSR permission
284 * bitmap. Two bits are used for each MSR, lower bit is
285 * for read and higher bit is for write.
286 */
287static int

--- 90 unchanged lines hidden (view full) ---

378
379 return (0);
380}
381
382/*
383 * Initialise a virtual machine.
384 */
385static void *
267}
268
269/*
270 * Get index and bit position for a MSR in MSR permission
271 * bitmap. Two bits are used for each MSR, lower bit is
272 * for read and higher bit is for write.
273 */
274static int

--- 90 unchanged lines hidden (view full) ---

365
366 return (0);
367}
368
369/*
370 * Initialise a virtual machine.
371 */
372static void *
386svm_vminit(struct vm *vm)
373svm_vminit(struct vm *vm, pmap_t pmap)
387{
388 struct svm_softc *svm_sc;
389 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
390 int i;
391
392 if (guest_asid >= max_asid) {
393 ERR("Host support max ASID:%d, can't create more guests.\n",
394 max_asid);
395 return (NULL);
396 }
397
398 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc),
399 M_SVM, M_WAITOK | M_ZERO);
400
401 svm_sc->vm = vm;
402 svm_sc->svm_feature = svm_feature;
403 svm_sc->vcpu_cnt = VM_MAXCPU;
374{
375 struct svm_softc *svm_sc;
376 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
377 int i;
378
379 if (guest_asid >= max_asid) {
380 ERR("Host support max ASID:%d, can't create more guests.\n",
381 max_asid);
382 return (NULL);
383 }
384
385 svm_sc = (struct svm_softc *)malloc(sizeof (struct svm_softc),
386 M_SVM, M_WAITOK | M_ZERO);
387
388 svm_sc->vm = vm;
389 svm_sc->svm_feature = svm_feature;
390 svm_sc->vcpu_cnt = VM_MAXCPU;
404
391 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
405 /*
406 * Each guest has its own unique ASID.
392 /*
393 * Each guest has its own unique ASID.
407 * ASID(Addres Space Identifier) are used by TLB entries.
394 * ASID(Address Space Identifier) is used by TLB entry.
408 */
409 svm_sc->asid = guest_asid++;
410
411 /*
412 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc.
413 */
414 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap));
415

--- 17 unchanged lines hidden (view full) ---

433 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
434
435 /* Intercept access to all I/O ports. */
436 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap));
437
438 /* Cache physical address for multiple vcpus. */
439 iopm_pa = vtophys(svm_sc->iopm_bitmap);
440 msrpm_pa = vtophys(svm_sc->msr_bitmap);
395 */
396 svm_sc->asid = guest_asid++;
397
398 /*
399 * Intercept MSR access to all MSRs except GSBASE, FSBASE,... etc.
400 */
401 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap));
402

--- 17 unchanged lines hidden (view full) ---

420 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
421
422 /* Intercept access to all I/O ports. */
423 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap));
424
425 /* Cache physical address for multiple vcpus. */
426 iopm_pa = vtophys(svm_sc->iopm_bitmap);
427 msrpm_pa = vtophys(svm_sc->msr_bitmap);
441 pml4_pa = vtophys(svm_sc->np_pml4);
428 pml4_pa = svm_sc->nptp;
442
443 for (i = 0; i < svm_sc->vcpu_cnt; i++) {
444 if (svm_init_vcpu(svm_get_vcpu(svm_sc, i), iopm_pa, msrpm_pa,
445 pml4_pa, svm_sc->asid)) {
446 ERR("SVM couldn't initialise VCPU%d\n", i);
447 goto cleanup;
448 }
449 }
450
451 return (svm_sc);
452
453cleanup:
454 free(svm_sc, M_SVM);
455 return (NULL);
456}
457
458/*
459 * Handle guest I/O intercept.
460 */
429
430 for (i = 0; i < svm_sc->vcpu_cnt; i++) {
431 if (svm_init_vcpu(svm_get_vcpu(svm_sc, i), iopm_pa, msrpm_pa,
432 pml4_pa, svm_sc->asid)) {
433 ERR("SVM couldn't initialise VCPU%d\n", i);
434 goto cleanup;
435 }
436 }
437
438 return (svm_sc);
439
440cleanup:
441 free(svm_sc, M_SVM);
442 return (NULL);
443}
444
445/*
446 * Handle guest I/O intercept.
447 */
461static int
448static bool
462svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
463{
464 struct vmcb_ctrl *ctrl;
465 struct vmcb_state *state;
466 uint64_t info1;
467
468 state = svm_get_vmcb_state(svm_sc, vcpu);
469 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
470 info1 = ctrl->exitinfo1;
471
472 vmexit->exitcode = VM_EXITCODE_INOUT;
473 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
474 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0;
475 vmexit->u. inout.rep = (info1 & BIT(3)) ? 1 : 0;
476 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
477 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
478 vmexit->u.inout.eax = (uint32_t)(state->rax);
479
449svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
450{
451 struct vmcb_ctrl *ctrl;
452 struct vmcb_state *state;
453 uint64_t info1;
454
455 state = svm_get_vmcb_state(svm_sc, vcpu);
456 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
457 info1 = ctrl->exitinfo1;
458
459 vmexit->exitcode = VM_EXITCODE_INOUT;
460 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
461 vmexit->u.inout.string = (info1 & BIT(2)) ? 1 : 0;
462 vmexit->u. inout.rep = (info1 & BIT(3)) ? 1 : 0;
463 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
464 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
465 vmexit->u.inout.eax = (uint32_t)(state->rax);
466
480 return (1);
467 return (false);
481}
482
468}
469
483/*
484 * SVM Nested Page(RVI) Fault handler.
485 * Nested page fault handler used by local APIC emulation.
486 */
487static int
488svm_handle_npf(struct vm *vm, int vcpu, uint64_t gpa, uint64_t rip,
489 uint64_t exitinfo1, uint64_t cr3, struct vie *vie)
470static void
471svm_npf_paging(uint64_t exitinfo1, int *type, int *prot)
490{
472{
491 int err;
492
473
493 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
494 VMM_CTR0(vm, vcpu, "SVM:NPF for code access.");
495 return (0);
496 }
474 if (exitinfo1 & VMCB_NPF_INFO1_W)
475 *type = VM_PROT_WRITE;
476 else
477 *type = VM_PROT_READ;
497
478
498 if (exitinfo1 & VMCB_NPF_INFO1_RSV) {
499 VMM_CTR0(vm, vcpu, "SVM:NPF reserved bits are set.");
500 return (0);
501 }
479 /* XXX: protection is not used. */
480 *prot = 0;
481}
482
483static bool
484svm_npf_emul_fault(uint64_t exitinfo1)
485{
502
486
503 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
504 VMM_CTR0(vm, vcpu, "SVM:NPF during guest page table walk.");
505 return (0);
487 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
488 return (false);
506 }
507
489 }
490
508 /*
509 * nRIP is NULL for NPF so we don't have the length of instruction,
510 * we rely on instruction decode s/w to determine the size of
511 * instruction.
512 *
513 * XXX: DecodeAssist can use instruction from buffer.
514 */
515 if (vmm_fetch_instruction(vm, vcpu, rip, VIE_INST_SIZE,
516 cr3, vie) != 0) {
517 ERR("SVM:NPF instruction fetch failed, RIP:0x%lx\n", rip);
518 return (EINVAL);
491 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
492 return (false);
519 }
520
493 }
494
521 KASSERT(vie->num_valid, ("No instruction to emulate."));
522 /*
523 * SVM doesn't provide GLA unlike Intel VM-x. VIE_INVALID_GLA
524 * which is a non-cannonical address indicate that GLA is not
525 * available to instruction emulation.
526 *
527 * XXX: Which SVM capability can provided GLA?
528 */
529 if(vmm_decode_instruction(vm, vcpu, VIE_INVALID_GLA, vie)) {
530 ERR("SVM: Couldn't decode instruction.\n");
531 return (0);
495 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
496 return (false);
532 }
533
497 }
498
534 /*
535 * XXX: Decoding for user space(IOAPIC) should be done in
536 * user space.
537 */
538 if (gpa < DEFAULT_APIC_BASE || gpa >= (DEFAULT_APIC_BASE + PAGE_SIZE)) {
539 VMM_CTR2(vm, vcpu, "SVM:NPF GPA(0x%lx) outside of local APIC"
540 " range(0x%x)\n", gpa, DEFAULT_APIC_BASE);
541 return (0);
542 }
543
544 err = vmm_emulate_instruction(vm, vcpu, gpa, vie, lapic_mmio_read,
545 lapic_mmio_write, 0);
546
547 return (err ? 0 : 1);
499 return (true);
548}
549
550/*
551 * Special handling of EFER MSR.
552 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM
553 * enable bit in EFER.
554 */
555static void

--- 10 unchanged lines hidden (view full) ---

566 ((uint32_t)state->rax) | EFER_SVM;
567 } else {
568 state->rax = (uint32_t)state->efer;
569 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32);
570 }
571}
572
573/*
500}
501
502/*
503 * Special handling of EFER MSR.
504 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM
505 * enable bit in EFER.
506 */
507static void

--- 10 unchanged lines hidden (view full) ---

518 ((uint32_t)state->rax) | EFER_SVM;
519 } else {
520 state->rax = (uint32_t)state->efer;
521 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32);
522 }
523}
524
525/*
574 * Determine the cause of virtual cpu exit and return to user space if exit
575 * demand so.
576 * Return: 1 - Return to user space.
577 * 0 - Continue vcpu run.
526 * Determine the cause of virtual cpu exit and handle VMEXIT.
527 * Return: false - Break vcpu execution loop and handle vmexit
528 * in kernel or user space.
529 * true - Continue vcpu run.
578 */
530 */
579static int
531static bool
580svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
581{
582 struct vmcb_state *state;
583 struct vmcb_ctrl *ctrl;
584 struct svm_regctx *ctx;
585 uint64_t code, info1, info2, val;
586 uint32_t eax, ecx, edx;
532svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
533{
534 struct vmcb_state *state;
535 struct vmcb_ctrl *ctrl;
536 struct svm_regctx *ctx;
537 uint64_t code, info1, info2, val;
538 uint32_t eax, ecx, edx;
587 int user; /* Flag for user mode */
588 int update_rip; /* Flag for updating RIP */
589 int inst_len;
539 bool update_rip, loop;
590
591 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
592
593 state = svm_get_vmcb_state(svm_sc, vcpu);
594 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
595 ctx = svm_get_guest_regctx(svm_sc, vcpu);
540
541 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
542
543 state = svm_get_vmcb_state(svm_sc, vcpu);
544 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
545 ctx = svm_get_guest_regctx(svm_sc, vcpu);
596 update_rip = 1;
597 user = 0;
598
599 vmexit->exitcode = VM_EXITCODE_VMX;
600 vmexit->u.vmx.error = 0;
601 code = ctrl->exitcode;
546 code = ctrl->exitcode;
602 info1 = ctrl->exitinfo1;
603 info2 = ctrl->exitinfo2;
604
547 info1 = ctrl->exitinfo1;
548 info2 = ctrl->exitinfo2;
549
605 if (ctrl->nrip) {
606 inst_len = ctrl->nrip - state->rip;
607 } else {
608 inst_len = ctrl->inst_decode_size;
609 }
550 update_rip = true;
551 loop = true;
552 vmexit->exitcode = VM_EXITCODE_VMX;
553 vmexit->u.vmx.error = 0;
610
611 switch (code) {
612 case VMCB_EXIT_MC: /* Machine Check. */
613 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1);
614 vmexit->exitcode = VM_EXITCODE_MTRAP;
554
555 switch (code) {
556 case VMCB_EXIT_MC: /* Machine Check. */
557 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1);
558 vmexit->exitcode = VM_EXITCODE_MTRAP;
615 user = 1;
559 loop = false;
616 break;
617
618 case VMCB_EXIT_MSR: /* MSR access. */
619 eax = state->rax;
620 ecx = ctx->sctx_rcx;
621 edx = ctx->e.g.sctx_rdx;
622
623 if (ecx == MSR_EFER) {
624 VMM_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n");
625 svm_efer(svm_sc, vcpu, info1);
626 break;
627 }
628
629 if (info1) {
630 /* VM exited because of write MSR */
560 break;
561
562 case VMCB_EXIT_MSR: /* MSR access. */
563 eax = state->rax;
564 ecx = ctx->sctx_rcx;
565 edx = ctx->e.g.sctx_rdx;
566
567 if (ecx == MSR_EFER) {
568 VMM_CTR0(svm_sc->vm, vcpu,"VMEXIT EFER\n");
569 svm_efer(svm_sc, vcpu, info1);
570 break;
571 }
572
573 if (info1) {
574 /* VM exited because of write MSR */
631 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
575 vmm_stat_incr(svm_sc->vm, vcpu,
576 VMEXIT_WRMSR, 1);
632 vmexit->exitcode = VM_EXITCODE_WRMSR;
633 vmexit->u.msr.code = ecx;
634 val = (uint64_t)edx << 32 | eax;
635 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val)) {
636 vmexit->u.msr.wval = val;
577 vmexit->exitcode = VM_EXITCODE_WRMSR;
578 vmexit->u.msr.code = ecx;
579 val = (uint64_t)edx << 32 | eax;
580 if (emulate_wrmsr(svm_sc->vm, vcpu, ecx, val)) {
581 vmexit->u.msr.wval = val;
637 user = 1;
582 loop = false;
638 }
639 VMM_CTR3(svm_sc->vm, vcpu,
640 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x",
583 }
584 VMM_CTR3(svm_sc->vm, vcpu,
585 "VMEXIT WRMSR(%s handling) 0x%lx @0x%x",
641 user ? "user" : "kernel", val, ecx);
586 loop ? "kernel" : "user", val, ecx);
642 } else {
587 } else {
643 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
588 vmm_stat_incr(svm_sc->vm, vcpu,
589 VMEXIT_RDMSR, 1);
644 vmexit->exitcode = VM_EXITCODE_RDMSR;
645 vmexit->u.msr.code = ecx;
646 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx)) {
590 vmexit->exitcode = VM_EXITCODE_RDMSR;
591 vmexit->u.msr.code = ecx;
592 if (emulate_rdmsr(svm_sc->vm, vcpu, ecx)) {
647 user = 1;
593 loop = false;
648 }
649 VMM_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR"
594 }
595 VMM_CTR3(svm_sc->vm, vcpu, "SVM:VMEXIT RDMSR"
650 " 0x%lx,%lx @0x%x", ctx->e.g.sctx_rdx,
651 state->rax, ecx);
596 " MSB=0x%08x, LSB=%08x @0x%x",
597 ctx->e.g.sctx_rdx, state->rax, ecx);
652 }
653
654#define MSR_AMDK8_IPM 0xc0010055
655 /*
656 * We can't hide AMD C1E idle capability since its
657 * based on CPU generation, for now ignore access to
658 * this MSR by vcpus
659 * XXX: special handling of AMD C1E - Ignore.
660 */
661 if (ecx == MSR_AMDK8_IPM)
598 }
599
600#define MSR_AMDK8_IPM 0xc0010055
601 /*
602 * We can't hide AMD C1E idle capability since its
603 * based on CPU generation, for now ignore access to
604 * this MSR by vcpus
605 * XXX: special handling of AMD C1E - Ignore.
606 */
607 if (ecx == MSR_AMDK8_IPM)
662 user = 0;
608 loop = true;
663 break;
664
609 break;
610
665 case VMCB_EXIT_INTR:
611 case VMCB_EXIT_INTR:
666 /*
667 * Exit on External Interrupt.
668 * Give host interrupt handler to run and if its guest
669 * interrupt, local APIC will inject event in guest.
670 */
612 /*
613 * Exit on External Interrupt.
614 * Give host interrupt handler to run and if its guest
615 * interrupt, local APIC will inject event in guest.
616 */
671 user = 0;
672 update_rip = 0;
617 update_rip = false;
673 VMM_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt"
674 " RIP:0x%lx.\n", state->rip);
675 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
676 break;
677
678 case VMCB_EXIT_IO:
679 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
618 VMM_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt"
619 " RIP:0x%lx.\n", state->rip);
620 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
621 break;
622
623 case VMCB_EXIT_IO:
624 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
680 user = svm_handle_io(svm_sc, vcpu, vmexit);
681 VMM_CTR1(svm_sc->vm, vcpu, "SVM:I/O VMEXIT RIP:0x%lx\n",
682 state->rip);
625 loop = svm_handle_io(svm_sc, vcpu, vmexit);
626 update_rip = true;
683 break;
684
685 case VMCB_EXIT_CPUID:
686 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
687 (void)x86_emulate_cpuid(svm_sc->vm, vcpu,
688 (uint32_t *)&state->rax,
689 (uint32_t *)&ctx->sctx_rbx,
690 (uint32_t *)&ctx->sctx_rcx,
691 (uint32_t *)&ctx->e.g.sctx_rdx);
692 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n");
627 break;
628
629 case VMCB_EXIT_CPUID:
630 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
631 (void)x86_emulate_cpuid(svm_sc->vm, vcpu,
632 (uint32_t *)&state->rax,
633 (uint32_t *)&ctx->sctx_rbx,
634 (uint32_t *)&ctx->sctx_rcx,
635 (uint32_t *)&ctx->e.g.sctx_rdx);
636 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT CPUID\n");
693 user = 0;
694 break;
695
696 case VMCB_EXIT_HLT:
697 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
698 if (ctrl->v_irq) {
699 /* Interrupt is pending, can't halt guest. */
637 break;
638
639 case VMCB_EXIT_HLT:
640 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
641 if (ctrl->v_irq) {
642 /* Interrupt is pending, can't halt guest. */
700 user = 0;
701 vmm_stat_incr(svm_sc->vm, vcpu,
702 VMEXIT_HLT_IGNORED, 1);
703 VMM_CTR0(svm_sc->vm, vcpu,
704 "VMEXIT halt ignored.");
705 } else {
706 VMM_CTR0(svm_sc->vm, vcpu,
707 "VMEXIT halted CPU.");
708 vmexit->exitcode = VM_EXITCODE_HLT;
643 vmm_stat_incr(svm_sc->vm, vcpu,
644 VMEXIT_HLT_IGNORED, 1);
645 VMM_CTR0(svm_sc->vm, vcpu,
646 "VMEXIT halt ignored.");
647 } else {
648 VMM_CTR0(svm_sc->vm, vcpu,
649 "VMEXIT halted CPU.");
650 vmexit->exitcode = VM_EXITCODE_HLT;
709 user = 1;
651 loop = false;
710
711 }
712 break;
713
714 case VMCB_EXIT_PAUSE:
715 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause");
716 vmexit->exitcode = VM_EXITCODE_PAUSE;
717 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
718
719 break;
720
721 case VMCB_EXIT_NPF:
652
653 }
654 break;
655
656 case VMCB_EXIT_PAUSE:
657 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT pause");
658 vmexit->exitcode = VM_EXITCODE_PAUSE;
659 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
660
661 break;
662
663 case VMCB_EXIT_NPF:
664 loop = false;
665 update_rip = false;
666 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EPT_FAULT, 1);
667
668 if (info1 & VMCB_NPF_INFO1_RSV) {
669 VMM_CTR2(svm_sc->vm, vcpu, "SVM_ERR:NPT"
670 " reserved bit is set,"
671 "INFO1:0x%lx INFO2:0x%lx .\n",
672 info1, info2);
673 break;
674 }
675
722 /* EXITINFO2 has the physical fault address (GPA). */
676 /* EXITINFO2 has the physical fault address (GPA). */
723 if (!svm_handle_npf(svm_sc->vm, vcpu, info2,
724 state->rip, info1, state->cr3,
725 &vmexit->u.paging.vie)) {
726 /* I/O APIC for MSI/X. */
677 if(vm_mem_allocated(svm_sc->vm, info2)) {
678 VMM_CTR3(svm_sc->vm, vcpu, "SVM:NPF-paging,"
679 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n",
680 state->rip, info1, info2);
727 vmexit->exitcode = VM_EXITCODE_PAGING;
681 vmexit->exitcode = VM_EXITCODE_PAGING;
728 user = 1;
729 vmexit->u.paging.gpa = info2;
682 vmexit->u.paging.gpa = info2;
730 } else {
731 /* Local APIC NPF */
732 update_rip = 1;
733 vmm_stat_incr(svm_sc->vm, vcpu,
734 VMEXIT_NPF_LAPIC, 1);
683 svm_npf_paging(info1, &vmexit->u.paging.fault_type,
684 &vmexit->u.paging.protection);
685 } else if (svm_npf_emul_fault(info1)) {
686 VMM_CTR3(svm_sc->vm, vcpu, "SVM:NPF-inst_emul,"
687 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n",
688 state->rip, info1, info2);
689 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
690 vmexit->u.inst_emul.gpa = info2;
691 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
692 vmexit->u.inst_emul.cr3 = state->cr3;
693 vmexit->inst_length = VIE_INST_SIZE;
735 }
736
694 }
695
737 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EPT_FAULT, 1);
738 inst_len = vmexit->u.paging.vie.num_processed;
739 VMM_CTR3(svm_sc->vm, vcpu, "VMEXIT NPF, GPA:0x%lx "
740 "user=%d instr len=%d.\n", info2, user,
741 inst_len);
742 break;
743
744 case VMCB_EXIT_SHUTDOWN:
696 break;
697
698 case VMCB_EXIT_SHUTDOWN:
745 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT guest shutdown.");
746 user = 1;
747 vmexit->exitcode = VM_EXITCODE_VMX;
699 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown.");
700 loop = false;
748 break;
749
750 case VMCB_EXIT_INVALID:
751 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID.");
701 break;
702
703 case VMCB_EXIT_INVALID:
704 VMM_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT INVALID.");
752 user = 1;
753 vmexit->exitcode = VM_EXITCODE_VMX;
705 loop = false;
754 break;
755
756 default:
757 /* Return to user space. */
706 break;
707
708 default:
709 /* Return to user space. */
758 user = 1;
759 update_rip = 0;
710 loop = false;
711 update_rip = false;
760 VMM_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx"
761 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n",
762 ctrl->exitcode, info1, info2);
763 VMM_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx"
764 " Inst decoder len:%d\n", state->rip,
765 ctrl->nrip, ctrl->inst_decode_size);
766 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
767 break;
768 }
769
712 VMM_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx"
713 " EXITINFO1: 0x%lx EXITINFO2:0x%lx\n",
714 ctrl->exitcode, info1, info2);
715 VMM_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx"
716 " Inst decoder len:%d\n", state->rip,
717 ctrl->nrip, ctrl->inst_decode_size);
718 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
719 break;
720 }
721
770 if (ctrl->v_irq) {
771 VMM_CTR2(svm_sc->vm, vcpu, "SVM:SVM intr pending vector:0x%x"
772 " priority:0x%x", ctrl->v_intr_vector, ctrl->v_intr_prio);
773 }
774
775 vmexit->rip = state->rip;
776 if (update_rip) {
722 vmexit->rip = state->rip;
723 if (update_rip) {
777 vmexit->rip += inst_len;
724 if (ctrl->nrip == 0) {
725 VMM_CTR1(svm_sc->vm, vcpu, "SVM_ERR:nRIP is not set "
726 "for RIP0x%lx.\n", state->rip);
727 vmexit->exitcode = VM_EXITCODE_VMX;
728 } else
729 vmexit->rip = ctrl->nrip;
778 }
779
730 }
731
780 /* Return to userland for APs to start. */
781 if (vmexit->exitcode == VM_EXITCODE_SPINUP_AP) {
782 VMM_CTR1(svm_sc->vm, vcpu, "SVM:Starting APs, RIP0x%lx.\n",
783 vmexit->rip);
784 user = 1;
732 /* If vcpu execution is continued, update RIP. */
733 if (loop) {
734 state->rip = vmexit->rip;
785 }
786
735 }
736
787 /* XXX: Set next RIP before restarting virtual cpus. */
788 if (ctrl->nrip == 0) {
789 ctrl->nrip = state->rip;
737 if (state->rip == 0) {
738 VMM_CTR0(svm_sc->vm, vcpu, "SVM_ERR:RIP is NULL\n");
739 vmexit->exitcode = VM_EXITCODE_VMX;
790 }
791
740 }
741
792 return (user);
742 return (loop);
793}
794
795/*
796 * Inject NMI to virtual cpu.
797 */
798static int
799svm_inject_nmi(struct svm_softc *svm_sc, int vcpu)
800{
801 struct vmcb_ctrl *ctrl;
802
803 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
804
805 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
806 /* Can't inject another NMI if last one is pending.*/
807 if (!vm_nmi_pending(svm_sc->vm, vcpu))
808 return (0);
809
810 /* Inject NMI, vector number is not used.*/
743}
744
745/*
746 * Inject NMI to virtual cpu.
747 */
748static int
749svm_inject_nmi(struct svm_softc *svm_sc, int vcpu)
750{
751 struct vmcb_ctrl *ctrl;
752
753 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
754
755 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
756 /* Can't inject another NMI if last one is pending.*/
757 if (!vm_nmi_pending(svm_sc->vm, vcpu))
758 return (0);
759
760 /* Inject NMI, vector number is not used.*/
811 if (vmcb_eventinject(ctrl, VM_NMI, IDT_NMI, 0, FALSE)) {
761 if (vmcb_eventinject(ctrl, VM_NMI, IDT_NMI, 0, false)) {
812 VMM_CTR0(svm_sc->vm, vcpu, "SVM:NMI injection failed.\n");
813 return (EIO);
814 }
815
816 /* Acknowledge the request is accepted.*/
817 vm_nmi_clear(svm_sc->vm, vcpu);
818
819 VMM_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n");

--- 21 unchanged lines hidden (view full) ---

841 VMM_CTR1(svm_sc->vm, vcpu,
842 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj);
843 return ;
844 }
845
846 /* Wait for guest to come out of interrupt shadow. */
847 if (ctrl->intr_shadow) {
848 VMM_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n");
762 VMM_CTR0(svm_sc->vm, vcpu, "SVM:NMI injection failed.\n");
763 return (EIO);
764 }
765
766 /* Acknowledge the request is accepted.*/
767 vm_nmi_clear(svm_sc->vm, vcpu);
768
769 VMM_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n");

--- 21 unchanged lines hidden (view full) ---

791 VMM_CTR1(svm_sc->vm, vcpu,
792 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj);
793 return ;
794 }
795
796 /* Wait for guest to come out of interrupt shadow. */
797 if (ctrl->intr_shadow) {
798 VMM_CTR0(svm_sc->vm, vcpu, "SVM:Guest in interrupt shadow.\n");
849 goto inject_failed;
799 return;
850 }
800 }
851
852 /* Make sure no interrupt is pending.*/
853 if (ctrl->v_irq) {
854 VMM_CTR0(svm_sc->vm, vcpu,
855 "SVM:virtual interrupt is pending.\n");
856 goto inject_failed;
857 }
858
859 /* NMI event has priority over interrupts.*/
860 if (svm_inject_nmi(svm_sc, vcpu)) {
861 return;
862 }
863
864 vector = lapic_pending_intr(svm_sc->vm, vcpu);
801
802 /* NMI event has priority over interrupts.*/
803 if (svm_inject_nmi(svm_sc, vcpu)) {
804 return;
805 }
806
807 vector = lapic_pending_intr(svm_sc->vm, vcpu);
865 if (vector < 0) {
866 return;
867 }
868
808
809 /* No interrupt is pending. */
810 if (vector < 0)
811 return;
812
869 if (vector < 32 || vector > 255) {
813 if (vector < 32 || vector > 255) {
870 ERR("Invalid vector number:%d\n", vector);
814 VMM_CTR1(svm_sc->vm, vcpu, "SVM_ERR:Event injection"
815 "invalid vector=%d.\n", vector);
816 ERR("SVM_ERR:Event injection invalid vector=%d.\n", vector);
871 return;
872 }
873
874 if ((state->rflags & PSL_I) == 0) {
875 VMM_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n");
817 return;
818 }
819
820 if ((state->rflags & PSL_I) == 0) {
821 VMM_CTR0(svm_sc->vm, vcpu, "SVM:Interrupt is disabled\n");
876 goto inject_failed;
822 return;
877 }
878
823 }
824
879 if(vmcb_eventinject(ctrl, VM_HW_INTR, vector, 0, FALSE)) {
880 VMM_CTR2(svm_sc->vm, vcpu, "SVM:Event injection failed to"
881 " VCPU%d,vector=%d.\n", vcpu, vector);
825 if (vmcb_eventinject(ctrl, VM_HW_INTR, vector, 0, false)) {
826 VMM_CTR1(svm_sc->vm, vcpu, "SVM:Event injection failed to"
827 " vector=%d.\n", vector);
882 return;
883 }
884
885 /* Acknowledge that event is accepted.*/
886 lapic_intr_accepted(svm_sc->vm, vcpu, vector);
887 VMM_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector);
828 return;
829 }
830
831 /* Acknowledge that event is accepted.*/
832 lapic_intr_accepted(svm_sc->vm, vcpu, vector);
833 VMM_CTR1(svm_sc->vm, vcpu, "SVM:event injected,vector=%d.\n", vector);
888
889inject_failed:
890 return;
891}
892
893/*
894 * Restore host Task Register selector type after every vcpu exit.
895 */
896static void
897setup_tss_type(void)
898{

--- 4 unchanged lines hidden (view full) ---

903 /*
904 * Task selector that should be restored in host is
905 * 64-bit available(9), not what is read(0xb), see
906 * APMvol2 Rev3.21 4.8.3 System Descriptors table.
907 */
908 desc->sd_type = 9;
909}
910
834}
835
836/*
837 * Restore host Task Register selector type after every vcpu exit.
838 */
839static void
840setup_tss_type(void)
841{

--- 4 unchanged lines hidden (view full) ---

846 /*
847 * Task selector that should be restored in host is
848 * 64-bit available(9), not what is read(0xb), see
849 * APMvol2 Rev3.21 4.8.3 System Descriptors table.
850 */
851 desc->sd_type = 9;
852}
853
854static void
855svm_handle_exitintinfo(struct svm_softc *svm_sc, int vcpu)
856{
857 struct vmcb_ctrl *ctrl;
858 uint64_t intinfo;
859
860 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
861
862 /*
863 * VMEXIT while delivering an exception or interrupt.
864 * Inject it as virtual interrupt.
865 * Section 15.7.2 Intercepts during IDT interrupt delivery.
866 */
867 intinfo = ctrl->exitintinfo;
868
869 if (intinfo & VMCB_EXITINTINFO_VALID) {
870 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
871 VMM_CTR1(svm_sc->vm, vcpu, "SVM:EXITINTINFO:0x%lx is valid\n",
872 intinfo);
873 if (vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo),
874 VMCB_EXITINTINFO_VECTOR(intinfo),
875 VMCB_EXITINTINFO_EC(intinfo),
876 VMCB_EXITINTINFO_EC_VALID & intinfo)) {
877 VMM_CTR1(svm_sc->vm, vcpu, "SVM:couldn't inject pending"
878 " interrupt, exitintinfo:0x%lx\n", intinfo);
879 }
880 }
881}
911/*
912 * Start vcpu with specified RIP.
913 */
914static int
882/*
883 * Start vcpu with specified RIP.
884 */
885static int
915svm_vmrun(void *arg, int vcpu, register_t rip)
886svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap)
916{
917 struct svm_regctx *hctx, *gctx;
918 struct svm_softc *svm_sc;
919 struct svm_vcpu *vcpustate;
920 struct vmcb_state *state;
921 struct vmcb_ctrl *ctrl;
922 struct vm_exit *vmexit;
887{
888 struct svm_regctx *hctx, *gctx;
889 struct svm_softc *svm_sc;
890 struct svm_vcpu *vcpustate;
891 struct vmcb_state *state;
892 struct vmcb_ctrl *ctrl;
893 struct vm_exit *vmexit;
923 int user;
924 uint64_t vmcb_pa;
925 static uint64_t host_cr2;
894 uint64_t vmcb_pa;
895 static uint64_t host_cr2;
896 bool loop; /* Continue vcpu execution loop. */
926
897
927 user = 0;
898 loop = true;
928 svm_sc = arg;
929
899 svm_sc = arg;
900
930 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
931
932 vcpustate = svm_get_vcpu(svm_sc, vcpu);
901 vcpustate = svm_get_vcpu(svm_sc, vcpu);
933 state = svm_get_vmcb_state(svm_sc, vcpu);
934 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
935 vmexit = vm_exitinfo(svm_sc->vm , vcpu);
936 if (vmexit->exitcode == VM_EXITCODE_VMX) {
937 ERR("vcpu%d shouldn't run again.\n", vcpu);
938 return(EIO);
939 }
902 state = svm_get_vmcb_state(svm_sc, vcpu);
903 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
904 vmexit = vm_exitinfo(svm_sc->vm, vcpu);
940
941 gctx = svm_get_guest_regctx(svm_sc, vcpu);
942 hctx = &host_ctx[curcpu];
943 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
944
945 if (vcpustate->lastcpu != curcpu) {
946 /* Virtual CPU is running on a diiferent CPU now.*/
947 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_MIGRATIONS, 1);

--- 21 unchanged lines hidden (view full) ---

969 * ASID is unique for a guest.
970 * IOPM is unchanged.
971 * RVI/EPT is unchanged.
972 *
973 */
974 ctrl->vmcb_clean = VMCB_CACHE_ASID |
975 VMCB_CACHE_IOPM |
976 VMCB_CACHE_NP;
905
906 gctx = svm_get_guest_regctx(svm_sc, vcpu);
907 hctx = &host_ctx[curcpu];
908 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
909
910 if (vcpustate->lastcpu != curcpu) {
911 /* Virtual CPU is running on a diiferent CPU now.*/
912 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_MIGRATIONS, 1);

--- 21 unchanged lines hidden (view full) ---

934 * ASID is unique for a guest.
935 * IOPM is unchanged.
936 * RVI/EPT is unchanged.
937 *
938 */
939 ctrl->vmcb_clean = VMCB_CACHE_ASID |
940 VMCB_CACHE_IOPM |
941 VMCB_CACHE_NP;
977
978 }
979
980 vcpustate->lastcpu = curcpu;
942 }
943
944 vcpustate->lastcpu = curcpu;
981
945 VMM_CTR3(svm_sc->vm, vcpu, "SVM:Enter vmrun old RIP:0x%lx"
946 " new RIP:0x%lx inst len=%d\n",
947 state->rip, rip, vmexit->inst_length);
982 /* Update Guest RIP */
983 state->rip = rip;
984
948 /* Update Guest RIP */
949 state->rip = rip;
950
985 VMM_CTR1(svm_sc->vm, vcpu, "SVM:entered with RIP:0x%lx\n",
986 state->rip);
987 do {
951 do {
952 vmexit->inst_length = 0;
988 /* We are asked to give the cpu by scheduler. */
989 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
990 vmexit->exitcode = VM_EXITCODE_BOGUS;
953 /* We are asked to give the cpu by scheduler. */
954 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
955 vmexit->exitcode = VM_EXITCODE_BOGUS;
991 vmexit->inst_length = 0;
992 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_ASTPENDING, 1);
956 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_ASTPENDING, 1);
993 VMM_CTR1(svm_sc->vm, vcpu, "SVM:gave up cpu, RIP:0x%lx\n",
994 state->rip);
957 VMM_CTR1(svm_sc->vm, vcpu,
958 "SVM: gave up CPU, RIP:0x%lx\n", state->rip);
959 vmexit->rip = state->rip;
995 break;
996 }
997
998 lapic_timer_tick(svm_sc->vm, vcpu);
999
1000 (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid);
1001
960 break;
961 }
962
963 lapic_timer_tick(svm_sc->vm, vcpu);
964
965 (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid);
966
967 svm_handle_exitintinfo(svm_sc, vcpu);
968
1002 (void)svm_inj_interrupts(svm_sc, vcpu);
1003
1004 /* Change TSS type to available.*/
1005 setup_tss_type();
1006
1007 /*
1008 * Disable global interrupt to guarantee atomicity
1009 * during loading of guest state.
1010 * See 15.5.1 "Loading guest state" APM2.
1011 */
1012 disable_gintr();
1013
1014 save_cr2(&host_cr2);
1015 load_cr2(&state->cr2);
969 (void)svm_inj_interrupts(svm_sc, vcpu);
970
971 /* Change TSS type to available.*/
972 setup_tss_type();
973
974 /*
975 * Disable global interrupt to guarantee atomicity
976 * during loading of guest state.
977 * See 15.5.1 "Loading guest state" APM2.
978 */
979 disable_gintr();
980
981 save_cr2(&host_cr2);
982 load_cr2(&state->cr2);
1016
983
984
1017 /* Launch Virtual Machine. */
1018 svm_launch(vmcb_pa, gctx, hctx);
985 /* Launch Virtual Machine. */
986 svm_launch(vmcb_pa, gctx, hctx);
1019
987
1020 save_cr2(&state->cr2);
1021 load_cr2(&host_cr2);
1022
1023 /*
1024 * Only GDTR and IDTR of host is saved and restore by SVM,
1025 * LDTR and TR need to be restored by VMM.
1026 * XXX: kernel doesn't use LDT, only user space.
1027 */

--- 12 unchanged lines hidden (view full) ---

1040 */
1041 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]);
1042 wrmsr(MSR_KGSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]);
1043
1044 /* vcpu exit with glbal interrupt disabled. */
1045 enable_gintr();
1046
1047 /* Handle #VMEXIT and if required return to user space. */
988 save_cr2(&state->cr2);
989 load_cr2(&host_cr2);
990
991 /*
992 * Only GDTR and IDTR of host is saved and restore by SVM,
993 * LDTR and TR need to be restored by VMM.
994 * XXX: kernel doesn't use LDT, only user space.
995 */

--- 12 unchanged lines hidden (view full) ---

1008 */
1009 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]);
1010 wrmsr(MSR_KGSBASE, (uint64_t)&__pcpu[vcpustate->lastcpu]);
1011
1012 /* vcpu exit with glbal interrupt disabled. */
1013 enable_gintr();
1014
1015 /* Handle #VMEXIT and if required return to user space. */
1048 user = svm_vmexit(svm_sc, vcpu, vmexit);
1016 loop = svm_vmexit(svm_sc, vcpu, vmexit);
1049 vcpustate->loop++;
1050 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1051
1017 vcpustate->loop++;
1018 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1019
1052 /* Update RIP since we are continuing vcpu execution.*/
1053 state->rip = vmexit->rip;
1054
1055 VMM_CTR1(svm_sc->vm, vcpu, "SVM:loop RIP:0x%lx\n", state->rip);
1056 } while (!user);
1057 VMM_CTR1(svm_sc->vm, vcpu, "SVM:exited with RIP:0x%lx\n",
1058 state->rip);
1020 } while (loop);
1059
1060 return (0);
1061}
1062
1063/*
1064 * Cleanup for virtual machine.
1065 */
1066static void
1067svm_vmcleanup(void *arg)
1068{
1069 struct svm_softc *svm_sc;
1070
1071 svm_sc = arg;
1072
1073 VMM_CTR0(svm_sc->vm, 0, "SVM:cleanup\n");
1074
1021
1022 return (0);
1023}
1024
1025/*
1026 * Cleanup for virtual machine.
1027 */
1028static void
1029svm_vmcleanup(void *arg)
1030{
1031 struct svm_softc *svm_sc;
1032
1033 svm_sc = arg;
1034
1035 VMM_CTR0(svm_sc->vm, 0, "SVM:cleanup\n");
1036
1075 svm_npt_cleanup(svm_sc);
1076 free(svm_sc, M_SVM);
1077}
1078
1079/*
1080 * Return pointer to hypervisor saved register state.
1081 */
1082static register_t *
1083swctx_regptr(struct svm_regctx *regctx, int reg)

--- 24 unchanged lines hidden (view full) ---

1108 return (&regctx->sctx_r12);
1109 case VM_REG_GUEST_R13:
1110 return (&regctx->sctx_r13);
1111 case VM_REG_GUEST_R14:
1112 return (&regctx->sctx_r14);
1113 case VM_REG_GUEST_R15:
1114 return (&regctx->sctx_r15);
1115 default:
1037 free(svm_sc, M_SVM);
1038}
1039
1040/*
1041 * Return pointer to hypervisor saved register state.
1042 */
1043static register_t *
1044swctx_regptr(struct svm_regctx *regctx, int reg)

--- 24 unchanged lines hidden (view full) ---

1069 return (&regctx->sctx_r12);
1070 case VM_REG_GUEST_R13:
1071 return (&regctx->sctx_r13);
1072 case VM_REG_GUEST_R14:
1073 return (&regctx->sctx_r14);
1074 case VM_REG_GUEST_R15:
1075 return (&regctx->sctx_r15);
1076 default:
1116 ERR("Unknown register requested.\n");
1077 ERR("Unknown register requested, reg=%d.\n", reg);
1117 break;
1118 }
1119
1120 return (NULL);
1121}
1122
1123/*
1124 * Interface to read guest registers.

--- 11 unchanged lines hidden (view full) ---

1136
1137 vmcb = svm_get_vmcb(svm_sc, vcpu);
1138
1139 if (vmcb_read(vmcb, ident, val) == 0) {
1140 return (0);
1141 }
1142
1143 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1078 break;
1079 }
1080
1081 return (NULL);
1082}
1083
1084/*
1085 * Interface to read guest registers.

--- 11 unchanged lines hidden (view full) ---

1097
1098 vmcb = svm_get_vmcb(svm_sc, vcpu);
1099
1100 if (vmcb_read(vmcb, ident, val) == 0) {
1101 return (0);
1102 }
1103
1104 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1105
1144 if (reg != NULL) {
1145 *val = *reg;
1146 return (0);
1147 }
1148
1106 if (reg != NULL) {
1107 *val = *reg;
1108 return (0);
1109 }
1110
1149 ERR("reg type %x is not saved n VMCB\n", ident);
1111 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident);
1150 return (EINVAL);
1151}
1152
1153/*
1154 * Interface to write to guest registers.
1155 * This can be SVM h/w saved or hypervisor saved register.
1156 */
1157static int

--- 13 unchanged lines hidden (view full) ---

1171
1172 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1173
1174 if (reg != NULL) {
1175 *reg = val;
1176 return (0);
1177 }
1178
1112 return (EINVAL);
1113}
1114
1115/*
1116 * Interface to write to guest registers.
1117 * This can be SVM h/w saved or hypervisor saved register.
1118 */
1119static int

--- 13 unchanged lines hidden (view full) ---

1133
1134 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1135
1136 if (reg != NULL) {
1137 *reg = val;
1138 return (0);
1139 }
1140
1179 ERR("reg type %x is not saved n VMCB\n", ident);
1141 ERR("SVM_ERR:reg type %x is not saved in VMCB.\n", ident);
1180 return (EINVAL);
1181}
1182
1183
1184/*
1185 * Inteface to set various descriptors.
1186 */
1187static int

--- 8 unchanged lines hidden (view full) ---

1196 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
1197
1198 vmcb = svm_get_vmcb(svm_sc, vcpu);
1199
1200 VMM_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type);
1201
1202 seg = vmcb_seg(vmcb, type);
1203 if (seg == NULL) {
1142 return (EINVAL);
1143}
1144
1145
1146/*
1147 * Inteface to set various descriptors.
1148 */
1149static int

--- 8 unchanged lines hidden (view full) ---

1158 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
1159
1160 vmcb = svm_get_vmcb(svm_sc, vcpu);
1161
1162 VMM_CTR1(svm_sc->vm, vcpu, "SVM:set_desc: Type%d\n", type);
1163
1164 seg = vmcb_seg(vmcb, type);
1165 if (seg == NULL) {
1204 ERR("Unsupported seg type %d\n", type);
1166 ERR("SVM_ERR:Unsupported segment type%d\n", type);
1205 return (EINVAL);
1206 }
1207
1208 /* Map seg_desc access to VMCB attribute format.*/
1209 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
1210 VMM_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n",
1211 type, desc->access, desc->limit);
1212 seg->attrib = attrib;

--- 14 unchanged lines hidden (view full) ---

1227
1228 svm_sc = arg;
1229 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
1230
1231 VMM_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type);
1232
1233 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type);
1234 if (!seg) {
1167 return (EINVAL);
1168 }
1169
1170 /* Map seg_desc access to VMCB attribute format.*/
1171 attrib = ((desc->access & 0xF000) >> 4) | (desc->access & 0xFF);
1172 VMM_CTR3(svm_sc->vm, vcpu, "SVM:[sel %d attribute 0x%x limit:0x%x]\n",
1173 type, desc->access, desc->limit);
1174 seg->attrib = attrib;

--- 14 unchanged lines hidden (view full) ---

1189
1190 svm_sc = arg;
1191 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
1192
1193 VMM_CTR1(svm_sc->vm, vcpu, "SVM:get_desc: Type%d\n", type);
1194
1195 seg = vmcb_seg(svm_get_vmcb(svm_sc, vcpu), type);
1196 if (!seg) {
1235 ERR("Unsupported seg type %d\n", type);
1197 ERR("SVM_ERR:Unsupported segment type%d\n", type);
1236 return (EINVAL);
1237 }
1238
1239 /* Map seg_desc access to VMCB attribute format.*/
1240 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF);
1241 desc->base = seg->base;
1242 desc->limit = seg->limit;
1243

--- 117 unchanged lines hidden (view full) ---

1361}
1362
1363struct vmm_ops vmm_ops_amd = {
1364 svm_init,
1365 svm_cleanup,
1366 svm_vminit,
1367 svm_vmrun,
1368 svm_vmcleanup,
1198 return (EINVAL);
1199 }
1200
1201 /* Map seg_desc access to VMCB attribute format.*/
1202 desc->access = ((seg->attrib & 0xF00) << 4) | (seg->attrib & 0xFF);
1203 desc->base = seg->base;
1204 desc->limit = seg->limit;
1205

--- 117 unchanged lines hidden (view full) ---

1323}
1324
1325struct vmm_ops vmm_ops_amd = {
1326 svm_init,
1327 svm_cleanup,
1328 svm_vminit,
1329 svm_vmrun,
1330 svm_vmcleanup,
1369 svm_npt_vmmap_set,
1370 svm_npt_vmmap_get,
1371 svm_getreg,
1372 svm_setreg,
1373 svm_getdesc,
1374 svm_setdesc,
1375 svm_inject_event,
1376 svm_getcap,
1331 svm_getreg,
1332 svm_setreg,
1333 svm_getdesc,
1334 svm_setdesc,
1335 svm_inject_event,
1336 svm_getcap,
1377 svm_setcap
1337 svm_setcap,
1338 svm_npt_alloc,
1339 svm_npt_free
1378};
1340};