Deleted Added
sdiff udiff text old ( 276349 ) new ( 276403 )
full compact
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276349 2014-12-28 21:27:13Z neel $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/intel/vmx.c 276349 2014-12-28 21:27:13Z neel $");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/smp.h>
35#include <sys/kernel.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/proc.h>

--- 239 unchanged lines hidden (view full) ---

278 case EXIT_REASON_MWAIT:
279 return "mwait";
280 case EXIT_REASON_MTF:
281 return "mtf";
282 case EXIT_REASON_MONITOR:
283 return "monitor";
284 case EXIT_REASON_PAUSE:
285 return "pause";
286 case EXIT_REASON_MCE:
287 return "mce";
288 case EXIT_REASON_TPR:
289 return "tpr";
290 case EXIT_REASON_APIC_ACCESS:
291 return "apic-access";
292 case EXIT_REASON_GDTR_IDTR:
293 return "gdtridtr";
294 case EXIT_REASON_LDTR_TR:
295 return "ldtrtr";

--- 520 unchanged lines hidden (view full) ---

816
817static void *
818vmx_vminit(struct vm *vm, pmap_t pmap)
819{
820 uint16_t vpid[VM_MAXCPU];
821 int i, error;
822 struct vmx *vmx;
823 struct vmcs *vmcs;
824
825 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
826 if ((uintptr_t)vmx & PAGE_MASK) {
827 panic("malloc of struct vmx not aligned on %d byte boundary",
828 PAGE_SIZE);
829 }
830 vmx->vm = vm;
831

--- 74 unchanged lines hidden (view full) ---

906 error += vmwrite(VMCS_EPTP, vmx->eptp);
907 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
908 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
909 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
910 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
911 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
912 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
913 error += vmwrite(VMCS_VPID, vpid[i]);
914 if (virtual_interrupt_delivery) {
915 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
916 error += vmwrite(VMCS_VIRTUAL_APIC,
917 vtophys(&vmx->apic_page[i]));
918 error += vmwrite(VMCS_EOI_EXIT0, 0);
919 error += vmwrite(VMCS_EOI_EXIT1, 0);
920 error += vmwrite(VMCS_EOI_EXIT2, 0);
921 error += vmwrite(VMCS_EOI_EXIT3, 0);

--- 819 unchanged lines hidden (view full) ---

1741 vis->seg_name = VM_REG_GUEST_ES;
1742 } else {
1743 s = (inst_info >> 15) & 0x7;
1744 vis->seg_name = vm_segment_name(s);
1745 }
1746
1747 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1748 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1749
1750 /* XXX modify svm.c to update bit 16 of seg_desc.access (unusable) */
1751}
1752
1753static void
1754vmx_paging_info(struct vm_guest_paging *paging)
1755{
1756 paging->cr3 = vmcs_guest_cr3();
1757 paging->cpl = vmx_cpl();
1758 paging->cpu_mode = vmx_cpu_mode();

--- 17 unchanged lines hidden (view full) ---

1776 case CPU_MODE_COMPATIBILITY:
1777 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1778 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1779 break;
1780 default:
1781 vmexit->u.inst_emul.cs_d = 0;
1782 break;
1783 }
1784}
1785
1786static int
1787ept_fault_type(uint64_t ept_qual)
1788{
1789 int fault_type;
1790
1791 if (ept_qual & EPT_VIOLATION_DATA_WRITE)

--- 260 unchanged lines hidden (view full) ---

2052static int
2053vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2054{
2055 int error, handled, in;
2056 struct vmxctx *vmxctx;
2057 struct vlapic *vlapic;
2058 struct vm_inout_str *vis;
2059 struct vm_task_switch *ts;
2060 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2061 uint32_t intr_type, reason;
2062 uint64_t exitintinfo, qual, gpa;
2063 bool retu;
2064
2065 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2066 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2067
2068 handled = UNHANDLED;
2069 vmxctx = &vmx->ctx[vcpu];
2070
2071 qual = vmexit->u.vmx.exit_qualification;
2072 reason = vmexit->u.vmx.exit_reason;
2073 vmexit->exitcode = VM_EXITCODE_BOGUS;
2074
2075 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2076
2077 /*
2078 * VM exits that can be triggered during event delivery need to
2079 * be handled specially by re-injecting the event if the IDT
2080 * vectoring information field's valid bit is set.
2081 *
2082 * See "Information for VM Exits During Event Delivery" in Intel SDM
2083 * for details.
2084 */
2085 idtvec_info = vmcs_idt_vectoring_info();

--- 215 unchanged lines hidden (view full) ---

2301 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2302 break;
2303 case EXIT_REASON_EXCEPTION:
2304 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2305 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2306 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2307 ("VM exit interruption info invalid: %#x", intr_info));
2308
2309 /*
2310 * If Virtual NMIs control is 1 and the VM-exit is due to a
2311 * fault encountered during the execution of IRET then we must
2312 * restore the state of "virtual-NMI blocking" before resuming
2313 * the guest.
2314 *
2315 * See "Resuming Guest Software after Handling an Exception".
2316 * See "Information for VM Exits Due to Vectored Events".
2317 */
2318 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2319 (intr_info & 0xff) != IDT_DF &&
2320 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2321 vmx_restore_nmi_blocking(vmx, vcpu);
2322
2323 /*
2324 * The NMI has already been handled in vmx_exit_handle_nmi().
2325 */
2326 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI)
2327 return (1);
2328 break;
2329 case EXIT_REASON_EPT_FAULT:
2330 /*
2331 * If 'gpa' lies within the address space allocated to
2332 * memory then this must be a nested page fault otherwise
2333 * this must be an instruction that accesses MMIO space.
2334 */
2335 gpa = vmcs_gpa();
2336 if (vm_mem_allocated(vmx->vm, gpa) ||

--- 1000 unchanged lines hidden ---