Deleted Added
full compact
svm.c (270511) svm.c (270962)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 270511 2014-08-25 00:58:20Z neel $");
28__FBSDID("$FreeBSD: projects/bhyve_svm/sys/amd64/vmm/amd/svm.c 270962 2014-09-02 04:22:42Z neel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 75 unchanged lines hidden (view full) ---

112 */
113static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
114
115/*
116 * S/w saved host context.
117 */
118static struct svm_regctx host_ctx[MAXCPU];
119
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 75 unchanged lines hidden (view full) ---

112 */
113static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
114
115/*
116 * S/w saved host context.
117 */
118static struct svm_regctx host_ctx[MAXCPU];
119
120static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid EXITINTINFO");
120static VMM_STAT_AMD(VCPU_EXITINTINFO, "Valid VMCB EXITINTINFO");
121static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "VMM pending exception injected");
121
122/*
123 * Common function to enable or disabled SVM for a CPU.
124 */
125static int
126cpu_svm_enable_disable(boolean_t enable)
127{
128 uint64_t efer_msr;

--- 352 unchanged lines hidden (view full) ---

481 * From APMv2:
482 * "Retrieve the CPL from the CPL field in the VMCB, not
483 * from any segment DPL"
484 */
485 return (state->cpl);
486}
487
488static enum vm_cpu_mode
122
123/*
124 * Common function to enable or disabled SVM for a CPU.
125 */
126static int
127cpu_svm_enable_disable(boolean_t enable)
128{
129 uint64_t efer_msr;

--- 352 unchanged lines hidden (view full) ---

482 * From APMv2:
483 * "Retrieve the CPL from the CPL field in the VMCB, not
484 * from any segment DPL"
485 */
486 return (state->cpl);
487}
488
489static enum vm_cpu_mode
489svm_vcpu_mode(uint64_t efer)
490svm_vcpu_mode(struct vmcb *vmcb)
490{
491{
492 struct vmcb_segment *seg;
493 struct vmcb_state *state;
491
494
492 if (efer & EFER_LMA)
493 return (CPU_MODE_64BIT);
494 else
495 return (CPU_MODE_COMPATIBILITY);
495 state = &vmcb->state;
496
497 if (state->efer & EFER_LMA) {
498 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
499 /*
500 * Section 4.8.1 for APM2, check if Code Segment has
501 * Long attribute set in descriptor.
502 */
503 if (seg->attrib & VMCB_CS_ATTRIB_L)
504 return (CPU_MODE_64BIT);
505 else
506 return (CPU_MODE_COMPATIBILITY);
507 } else if (state->cr0 & CR0_PE) {
508 return (CPU_MODE_PROTECTED);
509 } else {
510 return (CPU_MODE_REAL);
511 }
496}
497
498static enum vm_paging_mode
499svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
500{
501
502 if ((cr0 & CR0_PG) == 0)
503 return (PAGING_MODE_FLAT);

--- 60 unchanged lines hidden (view full) ---

564 case 4:
565 return (8); /* 64 bit */
566 default:
567 panic("%s: invalid size encoding %d", __func__, size);
568 }
569}
570
571static void
512}
513
514static enum vm_paging_mode
515svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
516{
517
518 if ((cr0 & CR0_PG) == 0)
519 return (PAGING_MODE_FLAT);

--- 60 unchanged lines hidden (view full) ---

580 case 4:
581 return (8); /* 64 bit */
582 default:
583 panic("%s: invalid size encoding %d", __func__, size);
584 }
585}
586
587static void
572svm_paging_info(struct vmcb_state *state, struct vm_guest_paging *paging)
588svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
573{
589{
590 struct vmcb_state *state;
574
591
592 state = &vmcb->state;
575 paging->cr3 = state->cr3;
576 paging->cpl = svm_cpl(state);
593 paging->cr3 = state->cr3;
594 paging->cpl = svm_cpl(state);
577 paging->cpu_mode = svm_vcpu_mode(state->efer);
595 paging->cpu_mode = svm_vcpu_mode(vmcb);
578 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
596 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
579 state->efer);
597 state->efer);
580}
581
598}
599
600
582/*
583 * Handle guest I/O intercept.
584 */
585static bool
586svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
587{
588 struct vmcb_ctrl *ctrl;
589 struct vmcb_state *state;

--- 12 unchanged lines hidden (view full) ---

602 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
603 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
604 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
605 vmexit->u.inout.eax = (uint32_t)(state->rax);
606
607 if (vmexit->u.inout.string) {
608 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
609 vis = &vmexit->u.inout_str;
601/*
602 * Handle guest I/O intercept.
603 */
604static bool
605svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
606{
607 struct vmcb_ctrl *ctrl;
608 struct vmcb_state *state;

--- 12 unchanged lines hidden (view full) ---

621 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
622 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
623 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
624 vmexit->u.inout.eax = (uint32_t)(state->rax);
625
626 if (vmexit->u.inout.string) {
627 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
628 vis = &vmexit->u.inout_str;
610 svm_paging_info(state, &vis->paging);
629 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
611 vis->rflags = state->rflags;
612 vis->cr0 = state->cr0;
613 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
614 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
615 vis->addrsize = svm_inout_str_addrsize(info1);
616 svm_inout_str_seginfo(svm_sc, vcpu, info1,
617 vmexit->u.inout.in, vis);
618 }

--- 25 unchanged lines hidden (view full) ---

644
645 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
646 return (false);
647 }
648
649 return (true);
650}
651
630 vis->rflags = state->rflags;
631 vis->cr0 = state->cr0;
632 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
633 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
634 vis->addrsize = svm_inout_str_addrsize(info1);
635 svm_inout_str_seginfo(svm_sc, vcpu, info1,
636 vmexit->u.inout.in, vis);
637 }

--- 25 unchanged lines hidden (view full) ---

663
664 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
665 return (false);
666 }
667
668 return (true);
669}
670
671static void
672svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
673{
674 struct vm_guest_paging *paging;
675 struct vmcb_segment *seg;
676
677 paging = &vmexit->u.inst_emul.paging;
678 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
679 vmexit->u.inst_emul.gpa = gpa;
680 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
681 svm_paging_info(vmcb, paging);
682
683 /*
684 * If DecodeAssist SVM feature doesn't exist, we don't have NPF
685 * instuction length. RIP will be calculated based on the length
686 * determined by instruction emulation.
687 */
688 vmexit->inst_length = VIE_INST_SIZE;
689
690 seg = vmcb_seg(vmcb, VM_REG_GUEST_CS);
691 switch(paging->cpu_mode) {
692 case CPU_MODE_PROTECTED:
693 case CPU_MODE_COMPATIBILITY:
694 /*
695 * Section 4.8.1 of APM2, Default Operand Size or D bit.
696 */
697 vmexit->u.inst_emul.cs_d = (seg->attrib & VMCB_CS_ATTRIB_D) ?
698 1 : 0;
699 break;
700 default:
701 vmexit->u.inst_emul.cs_d = 0;
702 break;
703 }
704}
705
652/*
653 * Special handling of EFER MSR.
654 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM
655 * enable bit in EFER.
656 */
657static void
658svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write)
659{

--- 7 unchanged lines hidden (view full) ---

667 state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) |
668 ((uint32_t)state->rax) | EFER_SVM;
669 } else {
670 state->rax = (uint32_t)state->efer;
671 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32);
672 }
673}
674
706/*
707 * Special handling of EFER MSR.
708 * SVM guest must have SVM EFER bit set, prohibit guest from cleareing SVM
709 * enable bit in EFER.
710 */
711static void
712svm_efer(struct svm_softc *svm_sc, int vcpu, boolean_t write)
713{

--- 7 unchanged lines hidden (view full) ---

721 state->efer = ((swctx->e.g.sctx_rdx & (uint32_t)~0) << 32) |
722 ((uint32_t)state->rax) | EFER_SVM;
723 } else {
724 state->rax = (uint32_t)state->efer;
725 swctx->e.g.sctx_rdx = (uint32_t)(state->efer >> 32);
726 }
727}
728
729static void
730svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
731{
732 struct vmcb_ctrl *ctrl;
733 uint64_t intinfo;
734
735 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
736 intinfo = ctrl->exitintinfo;
737 if (!VMCB_EXITINTINFO_VALID(intinfo))
738 return;
739
740 /*
741 * From APMv2, Section "Intercepts during IDT interrupt delivery"
742 *
743 * If a #VMEXIT happened during event delivery then record the event
744 * that was being delivered.
745 */
746 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
747 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
748 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
749 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
750}
751
675/*
676 * Determine the cause of virtual cpu exit and handle VMEXIT.
677 * Return: false - Break vcpu execution loop and handle vmexit
678 * in kernel or user space.
679 * true - Continue vcpu run.
680 */
681static bool
682svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)

--- 17 unchanged lines hidden (view full) ---

700 update_rip = true;
701 loop = true;
702 vmexit->exitcode = VM_EXITCODE_VMX;
703 vmexit->u.vmx.status = 0;
704
705 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
706 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
707
752/*
753 * Determine the cause of virtual cpu exit and handle VMEXIT.
754 * Return: false - Break vcpu execution loop and handle vmexit
755 * in kernel or user space.
756 * true - Continue vcpu run.
757 */
758static bool
759svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)

--- 17 unchanged lines hidden (view full) ---

777 update_rip = true;
778 loop = true;
779 vmexit->exitcode = VM_EXITCODE_VMX;
780 vmexit->u.vmx.status = 0;
781
782 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
783 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
784
785 svm_save_intinfo(svm_sc, vcpu);
786
708 switch (code) {
709 case VMCB_EXIT_MC: /* Machine Check. */
710 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1);
711 vmexit->exitcode = VM_EXITCODE_MTRAP;
712 loop = false;
713 break;
714
715 case VMCB_EXIT_MSR: /* MSR access. */

--- 61 unchanged lines hidden (view full) ---

777 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt"
778 " RIP:0x%lx.\n", state->rip);
779 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
780 break;
781
782 case VMCB_EXIT_IO:
783 loop = svm_handle_io(svm_sc, vcpu, vmexit);
784 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
787 switch (code) {
788 case VMCB_EXIT_MC: /* Machine Check. */
789 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_MTRAP, 1);
790 vmexit->exitcode = VM_EXITCODE_MTRAP;
791 loop = false;
792 break;
793
794 case VMCB_EXIT_MSR: /* MSR access. */

--- 61 unchanged lines hidden (view full) ---

856 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:VMEXIT ExtInt"
857 " RIP:0x%lx.\n", state->rip);
858 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
859 break;
860
861 case VMCB_EXIT_IO:
862 loop = svm_handle_io(svm_sc, vcpu, vmexit);
863 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
785 update_rip = true;
786 break;
787
788 case VMCB_EXIT_CPUID:
789 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
790 (void)x86_emulate_cpuid(svm_sc->vm, vcpu,
791 (uint32_t *)&state->rax,
792 (uint32_t *)&ctx->sctx_rbx,
793 (uint32_t *)&ctx->sctx_rcx,

--- 48 unchanged lines hidden (view full) ---

842 vmexit->u.paging.fault_type =
843 svm_npf_paging(info1);
844 vmm_stat_incr(svm_sc->vm, vcpu,
845 VMEXIT_NESTED_FAULT, 1);
846 } else if (svm_npf_emul_fault(info1)) {
847 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF inst_emul,"
848 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n",
849 state->rip, info1, info2);
864 break;
865
866 case VMCB_EXIT_CPUID:
867 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
868 (void)x86_emulate_cpuid(svm_sc->vm, vcpu,
869 (uint32_t *)&state->rax,
870 (uint32_t *)&ctx->sctx_rbx,
871 (uint32_t *)&ctx->sctx_rcx,

--- 48 unchanged lines hidden (view full) ---

920 vmexit->u.paging.fault_type =
921 svm_npf_paging(info1);
922 vmm_stat_incr(svm_sc->vm, vcpu,
923 VMEXIT_NESTED_FAULT, 1);
924 } else if (svm_npf_emul_fault(info1)) {
925 VCPU_CTR3(svm_sc->vm, vcpu, "SVM:NPF inst_emul,"
926 "RIP:0x%lx INFO1:0x%lx INFO2:0x%lx .\n",
927 state->rip, info1, info2);
850 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
851 vmexit->u.inst_emul.gpa = info2;
852 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
853 vmexit->u.inst_emul.paging.cr3 = state->cr3;
854 vmexit->u.inst_emul.paging.cpu_mode =
855 svm_vcpu_mode(state->efer);
856 vmexit->u.inst_emul.paging.paging_mode =
857 svm_paging_mode(state->cr0, state->cr4,
858 state->efer);
859 /* XXX: get CPL from SS */
860 vmexit->u.inst_emul.paging.cpl = 0;
861 /*
862 * If DecodeAssist SVM feature doesn't exist,
863 * we don't have faulty instuction length. New
864 * RIP will be calculated based on software
865 * instruction emulation.
866 */
867 vmexit->inst_length = VIE_INST_SIZE;
928 svm_handle_inst_emul(svm_get_vmcb(svm_sc, vcpu),
929 info2, vmexit);
868 vmm_stat_incr(svm_sc->vm, vcpu,
869 VMEXIT_INST_EMUL, 1);
870 }
871
872 break;
873
874 case VMCB_EXIT_SHUTDOWN:
875 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown.");

--- 63 unchanged lines hidden (view full) ---

939 /* Acknowledge the request is accepted.*/
940 vm_nmi_clear(svm_sc->vm, vcpu);
941
942 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n");
943
944 return (1);
945}
946
930 vmm_stat_incr(svm_sc->vm, vcpu,
931 VMEXIT_INST_EMUL, 1);
932 }
933
934 break;
935
936 case VMCB_EXIT_SHUTDOWN:
937 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:VMEXIT shutdown.");

--- 63 unchanged lines hidden (view full) ---

1001 /* Acknowledge the request is accepted.*/
1002 vm_nmi_clear(svm_sc->vm, vcpu);
1003
1004 VCPU_CTR0(svm_sc->vm, vcpu, "SVM:Injected NMI.\n");
1005
1006 return (1);
1007}
1008
1009static void
1010svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
1011{
1012 struct vmcb_ctrl *ctrl;
1013 uint64_t intinfo;
1014
1015 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1016
1017 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
1018 return;
1019
1020 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
1021 "valid: %#lx", __func__, intinfo));
1022
1023 vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo),
1024 VMCB_EXITINTINFO_VECTOR(intinfo),
1025 VMCB_EXITINTINFO_EC(intinfo),
1026 VMCB_EXITINTINFO_EC_VALID(intinfo));
1027 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1028 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
1029}
1030
947/*
948 * Inject event to virtual cpu.
949 */
950static void
951svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic)
952{
953 struct vmcb_ctrl *ctrl;
954 struct vmcb_state *state;
1031/*
1032 * Inject event to virtual cpu.
1033 */
1034static void
1035svm_inj_interrupts(struct svm_softc *svm_sc, int vcpu, struct vlapic *vlapic)
1036{
1037 struct vmcb_ctrl *ctrl;
1038 struct vmcb_state *state;
955 struct vm_exception exc;
956 int extint_pending;
957 int vector;
958
959 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
960
961 state = svm_get_vmcb_state(svm_sc, vcpu);
962 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
963
1039 int extint_pending;
1040 int vector;
1041
1042 KASSERT(vcpu < svm_sc->vcpu_cnt, ("Guest doesn't have VCPU%d", vcpu));
1043
1044 state = svm_get_vmcb_state(svm_sc, vcpu);
1045 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1046
964 if (vm_exception_pending(svm_sc->vm, vcpu, &exc)) {
965 KASSERT(exc.vector >= 0 && exc.vector < 32,
966 ("Exception vector% invalid", exc.vector));
967 vmcb_eventinject(ctrl, VMCB_EVENTINJ_TYPE_EXCEPTION, exc.vector,
968 exc.error_code, exc.error_code_valid);
969 }
1047 svm_inj_intinfo(svm_sc, vcpu);
970
971 /* Can't inject multiple events at once. */
972 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
973 VCPU_CTR1(svm_sc->vm, vcpu,
974 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj);
975 return ;
976 }
977

--- 61 unchanged lines hidden (view full) ---

1039 /*
1040 * Task selector that should be restored in host is
1041 * 64-bit available(9), not what is read(0xb), see
1042 * APMvol2 Rev3.21 4.8.3 System Descriptors table.
1043 */
1044 desc->sd_type = 9;
1045}
1046
1048
1049 /* Can't inject multiple events at once. */
1050 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1051 VCPU_CTR1(svm_sc->vm, vcpu,
1052 "SVM:Last event(0x%lx) is pending.\n", ctrl->eventinj);
1053 return ;
1054 }
1055

--- 61 unchanged lines hidden (view full) ---

1117 /*
1118 * Task selector that should be restored in host is
1119 * 64-bit available(9), not what is read(0xb), see
1120 * APMvol2 Rev3.21 4.8.3 System Descriptors table.
1121 */
1122 desc->sd_type = 9;
1123}
1124
1047static void
1048svm_handle_exitintinfo(struct svm_softc *svm_sc, int vcpu)
1049{
1050 struct vmcb_ctrl *ctrl;
1051 uint64_t intinfo;
1052
1053 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1054
1055 /*
1056 * VMEXIT while delivering an exception or interrupt.
1057 * Inject it as virtual interrupt.
1058 * Section 15.7.2 Intercepts during IDT interrupt delivery.
1059 */
1060 intinfo = ctrl->exitintinfo;
1061
1062 if (VMCB_EXITINTINFO_VALID(intinfo)) {
1063 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
1064 VCPU_CTR1(svm_sc->vm, vcpu, "SVM:EXITINTINFO:0x%lx is valid\n",
1065 intinfo);
1066 vmcb_eventinject(ctrl, VMCB_EXITINTINFO_TYPE(intinfo),
1067 VMCB_EXITINTINFO_VECTOR(intinfo),
1068 VMCB_EXITINTINFO_EC(intinfo),
1069 VMCB_EXITINTINFO_EC_VALID(intinfo));
1070 }
1071}
1072/*
1073 * Start vcpu with specified RIP.
1074 */
1075static int
1076svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1077 void *rend_cookie, void *suspended_cookie)
1078{
1079 struct svm_regctx *hctx, *gctx;

--- 21 unchanged lines hidden (view full) ---

1101 hctx = &host_ctx[curcpu];
1102 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1103
1104 if (vcpustate->lastcpu != curcpu) {
1105 /* Virtual CPU is running on a diiferent CPU now.*/
1106 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1107
1108 /*
1125/*
1126 * Start vcpu with specified RIP.
1127 */
1128static int
1129svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1130 void *rend_cookie, void *suspended_cookie)
1131{
1132 struct svm_regctx *hctx, *gctx;

--- 21 unchanged lines hidden (view full) ---

1154 hctx = &host_ctx[curcpu];
1155 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1156
1157 if (vcpustate->lastcpu != curcpu) {
1158 /* Virtual CPU is running on a diiferent CPU now.*/
1159 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1160
1161 /*
1109 * Flush all TLB mapping for this guest on this CPU,
1110 * it might have stale entries.
1162 * Flush all TLB mappings for this guest on this CPU,
1163 * it might have stale entries since vcpu has migrated
1164 * or vmm is restarted.
1111 */
1112 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
1113
1114 /* Can't use any cached VMCB state by cpu.*/
1115 ctrl->vmcb_clean = VMCB_CACHE_NONE;
1116 } else {
1117 /*
1118 * XXX: Using same ASID for all vcpus of a VM will cause TLB

--- 61 unchanged lines hidden (view full) ---

1180 VCPU_CTR1(vm, vcpu,
1181 "SVM: ASTPENDING, RIP:0x%lx\n", state->rip);
1182 vmexit->rip = state->rip;
1183 break;
1184 }
1185
1186 (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid);
1187
1165 */
1166 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
1167
1168 /* Can't use any cached VMCB state by cpu.*/
1169 ctrl->vmcb_clean = VMCB_CACHE_NONE;
1170 } else {
1171 /*
1172 * XXX: Using same ASID for all vcpus of a VM will cause TLB

--- 61 unchanged lines hidden (view full) ---

1234 VCPU_CTR1(vm, vcpu,
1235 "SVM: ASTPENDING, RIP:0x%lx\n", state->rip);
1236 vmexit->rip = state->rip;
1237 break;
1238 }
1239
1240 (void)svm_set_vmcb(svm_get_vmcb(svm_sc, vcpu), svm_sc->asid);
1241
1188 svm_handle_exitintinfo(svm_sc, vcpu);
1189
1190 svm_inj_interrupts(svm_sc, vcpu, vlapic);
1191
1192 /* Change TSS type to available.*/
1193 setup_tss_type();
1194
1195 /* Launch Virtual Machine. */
1196 svm_launch(vmcb_pa, gctx, hctx);
1197

--- 373 unchanged lines hidden ---
1242 svm_inj_interrupts(svm_sc, vcpu, vlapic);
1243
1244 /* Change TSS type to available.*/
1245 setup_tss_type();
1246
1247 /* Launch Virtual Machine. */
1248 svm_launch(vmcb_pa, gctx, hctx);
1249

--- 373 unchanged lines hidden ---