Deleted Added
full compact
svm.c (284900) svm.c (285015)
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm.c 284900 2015-06-28 03:22:26Z neel $");
28__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/amd/svm.c 285015 2015-07-01 19:46:57Z neel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 1874 unchanged lines hidden (view full) ---

1911 struct svm_softc *svm_sc;
1912 struct svm_vcpu *vcpustate;
1913 struct vmcb_state *state;
1914 struct vmcb_ctrl *ctrl;
1915 struct vm_exit *vmexit;
1916 struct vlapic *vlapic;
1917 struct vm *vm;
1918 uint64_t vmcb_pa;
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>

--- 1874 unchanged lines hidden (view full) ---

1911 struct svm_softc *svm_sc;
1912 struct svm_vcpu *vcpustate;
1913 struct vmcb_state *state;
1914 struct vmcb_ctrl *ctrl;
1915 struct vm_exit *vmexit;
1916 struct vlapic *vlapic;
1917 struct vm *vm;
1918 uint64_t vmcb_pa;
1919 u_int thiscpu;
1920 int handled;
1921
1922 svm_sc = arg;
1923 vm = svm_sc->vm;
1924
1925 vcpustate = svm_get_vcpu(svm_sc, vcpu);
1926 state = svm_get_vmcb_state(svm_sc, vcpu);
1927 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1928 vmexit = vm_exitinfo(vm, vcpu);
1929 vlapic = vm_lapic(vm, vcpu);
1930
1919 int handled;
1920
1921 svm_sc = arg;
1922 vm = svm_sc->vm;
1923
1924 vcpustate = svm_get_vcpu(svm_sc, vcpu);
1925 state = svm_get_vmcb_state(svm_sc, vcpu);
1926 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1927 vmexit = vm_exitinfo(vm, vcpu);
1928 vlapic = vm_lapic(vm, vcpu);
1929
1931 /*
1932 * Stash 'curcpu' on the stack as 'thiscpu'.
1933 *
1934 * The per-cpu data area is not accessible until MSR_GSBASE is restored
1935 * after the #VMEXIT. Since VMRUN is executed inside a critical section
1936 * 'curcpu' and 'thiscpu' are guaranteed to identical.
1937 */
1938 thiscpu = curcpu;
1939
1940 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1941 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1942
1930 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1931 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1932
1943 if (vcpustate->lastcpu != thiscpu) {
1933 if (vcpustate->lastcpu != curcpu) {
1944 /*
1945 * Force new ASID allocation by invalidating the generation.
1946 */
1947 vcpustate->asid.gen = 0;
1948
1949 /*
1950 * Invalidate the VMCB state cache by marking all fields dirty.
1951 */

--- 4 unchanged lines hidden (view full) ---

1956 * Setting 'vcpustate->lastcpu' here is bit premature because
1957 * we may return from this function without actually executing
1958 * the VMRUN instruction. This could happen if a rendezvous
1959 * or an AST is pending on the first time through the loop.
1960 *
1961 * This works for now but any new side-effects of vcpu
1962 * migration should take this case into account.
1963 */
1934 /*
1935 * Force new ASID allocation by invalidating the generation.
1936 */
1937 vcpustate->asid.gen = 0;
1938
1939 /*
1940 * Invalidate the VMCB state cache by marking all fields dirty.
1941 */

--- 4 unchanged lines hidden (view full) ---

1946 * Setting 'vcpustate->lastcpu' here is bit premature because
1947 * we may return from this function without actually executing
1948 * the VMRUN instruction. This could happen if a rendezvous
1949 * or an AST is pending on the first time through the loop.
1950 *
1951 * This works for now but any new side-effects of vcpu
1952 * migration should take this case into account.
1953 */
1964 vcpustate->lastcpu = thiscpu;
1954 vcpustate->lastcpu = curcpu;
1965 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1966 }
1967
1968 svm_msr_guest_enter(svm_sc, vcpu);
1969
1970 /* Update Guest RIP */
1971 state->rip = rip;
1972

--- 29 unchanged lines hidden (view full) ---

2002 if (vcpu_should_yield(vm, vcpu)) {
2003 enable_gintr();
2004 vm_exit_astpending(vm, vcpu, state->rip);
2005 break;
2006 }
2007
2008 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2009
1955 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1956 }
1957
1958 svm_msr_guest_enter(svm_sc, vcpu);
1959
1960 /* Update Guest RIP */
1961 state->rip = rip;
1962

--- 29 unchanged lines hidden (view full) ---

1992 if (vcpu_should_yield(vm, vcpu)) {
1993 enable_gintr();
1994 vm_exit_astpending(vm, vcpu, state->rip);
1995 break;
1996 }
1997
1998 svm_inj_interrupts(svm_sc, vcpu, vlapic);
1999
2010 /* Activate the nested pmap on 'thiscpu' */
2011 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active);
2000 /* Activate the nested pmap on 'curcpu' */
2001 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active);
2012
2013 /*
2014 * Check the pmap generation and the ASID generation to
2015 * ensure that the vcpu does not use stale TLB mappings.
2016 */
2002
2003 /*
2004 * Check the pmap generation and the ASID generation to
2005 * ensure that the vcpu does not use stale TLB mappings.
2006 */
2017 check_asid(svm_sc, vcpu, pmap, thiscpu);
2007 check_asid(svm_sc, vcpu, pmap, curcpu);
2018
2019 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
2020 vcpustate->dirty = 0;
2021 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2022
2023 /* Launch Virtual Machine. */
2024 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
2008
2009 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
2010 vcpustate->dirty = 0;
2011 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2012
2013 /* Launch Virtual Machine. */
2014 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
2025 svm_launch(vmcb_pa, gctx);
2015 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]);
2026
2016
2027 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
2017 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
2028
2029 /*
2018
2019 /*
2030 * Restore MSR_GSBASE to point to the pcpu data area.
2031 *
2032 * Note that accesses done via PCPU_GET/PCPU_SET will work
2033 * only after MSR_GSBASE is restored.
2034 *
2035 * Also note that we don't bother restoring MSR_KGSBASE
2036 * since it is not used in the kernel and will be restored
2037 * when the VMRUN ioctl returns to userspace.
2038 */
2039 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]);
2040 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch",
2041 thiscpu, curcpu));
2042
2043 /*
2044 * The host GDTR and IDTR is saved by VMRUN and restored
2045 * automatically on #VMEXIT. However, the host TSS needs
2046 * to be restored explicitly.
2047 */
2048 restore_host_tss();
2049
2050 /* #VMEXIT disables interrupts so re-enable them here. */
2051 enable_gintr();

--- 219 unchanged lines hidden ---
2020 * The host GDTR and IDTR is saved by VMRUN and restored
2021 * automatically on #VMEXIT. However, the host TSS needs
2022 * to be restored explicitly.
2023 */
2024 restore_host_tss();
2025
2026 /* #VMEXIT disables interrupts so re-enable them here. */
2027 enable_gintr();

--- 219 unchanged lines hidden ---