Deleted Added
sdiff udiff text old ( 264619 ) new ( 266339 )
full compact
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_support.S 264619 2014-04-17 18:00:07Z jhb $
28 */
29
30#include <machine/asmacros.h>
31
32#include "vmx_assym.s"
33
34#ifdef SMP
35#define LK lock ;

--- 56 unchanged lines hidden (view full) ---

92 movq VMXCTX_HOST_RSP(%rdi), %rsp; \
93 movq VMXCTX_HOST_RBX(%rdi), %rbx; \
94 movq VMXCTX_HOST_RIP(%rdi), tmpreg; \
95 movq tmpreg, (%rsp) /* return address */
96
97/*
98 * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
99 * %rdi: pointer to the 'vmxctx'
100 * %esi: launch state of the VMCS
101 * Interrupts must be disabled on entry.
102 */
103ENTRY(vmx_enter_guest)
104 /*
105 * Save host state before doing anything else.
106 */
107 VMX_HOST_SAVE(%r10)
108
109 /*
110 * Activate guest pmap on this cpu.
111 */
112 movq VMXCTX_PMAP(%rdi), %r11
113 movl PCPU(CPUID), %eax
114 LK btsl %eax, PM_ACTIVE(%r11)
115
116 /*
117 * If 'vmxctx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
118 * then we must invalidate all mappings associated with this EPTP.
119 */
120 movq PM_EPTGEN(%r11), %r10
121 cmpq %r10, VMXCTX_EPTGEN(%rdi, %rax, 8)
122 je guest_restore
123
124 /* Refresh 'vmxctx->eptgen[curcpu]' */
125 movq %r10, VMXCTX_EPTGEN(%rdi, %rax, 8)
126
127 /* Setup the invept descriptor on the host stack */
128 mov %rsp, %r11
129 movq VMXCTX_EPTP(%rdi), %rax
130 movq %rax, -16(%r11)
131 movq $0x0, -8(%r11)
132 mov $0x1, %eax /* Single context invalidate */
133 invept -16(%r11), %rax
134 jbe invept_error /* Check invept instruction error */
135
136guest_restore:
137 cmpl $0, %esi
138 je do_launch
139
140 VMX_GUEST_RESTORE
141 vmresume
142 /*
143 * In the common case 'vmresume' returns back to the host through
144 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
145 *

--- 83 unchanged lines hidden (view full) ---

229
230 /*
231 * This will return to the caller of 'vmx_enter_guest()' with a return
232 * value of VMX_GUEST_VMEXIT.
233 */
234 movl $VMX_GUEST_VMEXIT, %eax
235 ret
236END(vmx_exit_guest)