1221828Sgrehan/*-
2336190Saraujo * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3336190Saraujo *
4221828Sgrehan * Copyright (c) 2011 NetApp, Inc.
5260167Sneel * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
6221828Sgrehan * All rights reserved.
7221828Sgrehan *
8221828Sgrehan * Redistribution and use in source and binary forms, with or without
9221828Sgrehan * modification, are permitted provided that the following conditions
10221828Sgrehan * are met:
11221828Sgrehan * 1. Redistributions of source code must retain the above copyright
12221828Sgrehan *    notice, this list of conditions and the following disclaimer.
13221828Sgrehan * 2. Redistributions in binary form must reproduce the above copyright
14221828Sgrehan *    notice, this list of conditions and the following disclaimer in the
15221828Sgrehan *    documentation and/or other materials provided with the distribution.
16221828Sgrehan *
17221828Sgrehan * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18221828Sgrehan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19221828Sgrehan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20221828Sgrehan * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21221828Sgrehan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22221828Sgrehan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23221828Sgrehan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24221828Sgrehan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25221828Sgrehan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26221828Sgrehan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27221828Sgrehan * SUCH DAMAGE.
28221828Sgrehan *
29221828Sgrehan * $FreeBSD: stable/11/sys/amd64/vmm/intel/vmx_support.S 338427 2018-09-02 10:51:31Z kib $
30221828Sgrehan */
31221828Sgrehan
32221828Sgrehan#include <machine/asmacros.h>
33337794Skib#include <machine/specialreg.h>
34221828Sgrehan
35273214Simp#include "vmx_assym.h"
36221828Sgrehan
37256072Sneel#ifdef SMP
38256072Sneel#define	LK	lock ;
39256072Sneel#else
40256072Sneel#define	LK
41256072Sneel#endif
42256072Sneel
43266390Sgrehan/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
44266390Sgrehan#define VENTER  push %rbp ; mov %rsp,%rbp
45266390Sgrehan#define VLEAVE  pop %rbp
46266390Sgrehan
47221828Sgrehan/*
48330704Stychon * Save the guest context.
49330704Stychon */
50330704Stychon#define	VMX_GUEST_SAVE							\
51330704Stychon	movq	%rdi,VMXCTX_GUEST_RDI(%rsp);				\
52330704Stychon	movq	%rsi,VMXCTX_GUEST_RSI(%rsp);				\
53330704Stychon	movq	%rdx,VMXCTX_GUEST_RDX(%rsp);				\
54330704Stychon	movq	%rcx,VMXCTX_GUEST_RCX(%rsp);				\
55330704Stychon	movq	%r8,VMXCTX_GUEST_R8(%rsp);				\
56330704Stychon	movq	%r9,VMXCTX_GUEST_R9(%rsp);				\
57330704Stychon	movq	%rax,VMXCTX_GUEST_RAX(%rsp);				\
58330704Stychon	movq	%rbx,VMXCTX_GUEST_RBX(%rsp);				\
59330704Stychon	movq	%rbp,VMXCTX_GUEST_RBP(%rsp);				\
60330704Stychon	movq	%r10,VMXCTX_GUEST_R10(%rsp);				\
61330704Stychon	movq	%r11,VMXCTX_GUEST_R11(%rsp);				\
62330704Stychon	movq	%r12,VMXCTX_GUEST_R12(%rsp);				\
63330704Stychon	movq	%r13,VMXCTX_GUEST_R13(%rsp);				\
64330704Stychon	movq	%r14,VMXCTX_GUEST_R14(%rsp);				\
65330704Stychon	movq	%r15,VMXCTX_GUEST_R15(%rsp);				\
66330704Stychon	movq	%cr2,%rdi;						\
67330704Stychon	movq	%rdi,VMXCTX_GUEST_CR2(%rsp);				\
68330704Stychon	movq	%rsp,%rdi;
69330704Stychon
70330704Stychon/*
71222112Sneel * Assumes that %rdi holds a pointer to the 'vmxctx'.
72222112Sneel *
73222112Sneel * On "return" all registers are updated to reflect guest state. The two
74222112Sneel * exceptions are %rip and %rsp. These registers are atomically switched
75222112Sneel * by hardware from the guest area of the vmcs.
76222112Sneel *
77222112Sneel * We modify %rsp to point to the 'vmxctx' so we can use it to restore
78222112Sneel * host context in case of an error with 'vmlaunch' or 'vmresume'.
79221828Sgrehan */
80221828Sgrehan#define	VMX_GUEST_RESTORE						\
81222112Sneel	movq	%rdi,%rsp;						\
82221828Sgrehan	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
83221828Sgrehan	movq	%rsi,%cr2;						\
84221828Sgrehan	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
85221828Sgrehan	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
86221828Sgrehan	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
87221828Sgrehan	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
88221828Sgrehan	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
89221828Sgrehan	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
90221828Sgrehan	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
91221828Sgrehan	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
92221828Sgrehan	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
93221828Sgrehan	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
94221828Sgrehan	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
95221828Sgrehan	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
96221828Sgrehan	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
97221828Sgrehan	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
98221828Sgrehan	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
99221828Sgrehan
100256072Sneel/*
101330704Stychon * Clobber the remaining registers with guest contents so they can't
102330704Stychon * be misused.
103330704Stychon */
104330704Stychon#define	VMX_GUEST_CLOBBER						\
105330704Stychon	xor	%rax, %rax;						\
106330704Stychon	xor	%rcx, %rcx;						\
107330704Stychon	xor	%rdx, %rdx;						\
108330704Stychon	xor	%rsi, %rsi;						\
109330704Stychon	xor	%r8, %r8;						\
110330704Stychon	xor	%r9, %r9;						\
111330704Stychon	xor	%r10, %r10;						\
112330704Stychon	xor	%r11, %r11;
113330704Stychon
114330704Stychon/*
115260167Sneel * Save and restore the host context.
116260167Sneel *
117260167Sneel * Assumes that %rdi holds a pointer to the 'vmxctx'.
118256072Sneel */
119264353Sneel#define	VMX_HOST_SAVE							\
120260167Sneel	movq    %r15, VMXCTX_HOST_R15(%rdi);				\
121260167Sneel	movq    %r14, VMXCTX_HOST_R14(%rdi);				\
122260167Sneel	movq    %r13, VMXCTX_HOST_R13(%rdi);				\
123260167Sneel	movq    %r12, VMXCTX_HOST_R12(%rdi);				\
124260167Sneel	movq    %rbp, VMXCTX_HOST_RBP(%rdi);				\
125260167Sneel	movq    %rsp, VMXCTX_HOST_RSP(%rdi);				\
126260167Sneel	movq    %rbx, VMXCTX_HOST_RBX(%rdi);				\
127221828Sgrehan
128264353Sneel#define	VMX_HOST_RESTORE						\
129260167Sneel	movq	VMXCTX_HOST_R15(%rdi), %r15;				\
130260167Sneel	movq	VMXCTX_HOST_R14(%rdi), %r14;				\
131260167Sneel	movq	VMXCTX_HOST_R13(%rdi), %r13;				\
132260167Sneel	movq	VMXCTX_HOST_R12(%rdi), %r12;				\
133260167Sneel	movq	VMXCTX_HOST_RBP(%rdi), %rbp;				\
134260167Sneel	movq	VMXCTX_HOST_RSP(%rdi), %rsp;				\
135260167Sneel	movq	VMXCTX_HOST_RBX(%rdi), %rbx;				\
136260167Sneel
137256072Sneel/*
138260167Sneel * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
139260167Sneel * %rdi: pointer to the 'vmxctx'
140261453Sneel * %rsi: pointer to the 'vmx'
141261453Sneel * %edx: launch state of the VMCS
142260167Sneel * Interrupts must be disabled on entry.
143256072Sneel */
144260167SneelENTRY(vmx_enter_guest)
145266390Sgrehan	VENTER
146260167Sneel	/*
147260167Sneel	 * Save host state before doing anything else.
148260167Sneel	 */
149264353Sneel	VMX_HOST_SAVE
150260167Sneel
151260167Sneel	/*
152260167Sneel	 * Activate guest pmap on this cpu.
153260167Sneel	 */
154260167Sneel	movq	VMXCTX_PMAP(%rdi), %r11
155260167Sneel	movl	PCPU(CPUID), %eax
156256072Sneel	LK btsl	%eax, PM_ACTIVE(%r11)
157256072Sneel
158260167Sneel	/*
159261453Sneel	 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
160260167Sneel	 * then we must invalidate all mappings associated with this EPTP.
161260167Sneel	 */
162260167Sneel	movq	PM_EPTGEN(%r11), %r10
163261453Sneel	cmpq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
164260167Sneel	je	guest_restore
165256072Sneel
166261453Sneel	/* Refresh 'vmx->eptgen[curcpu]' */
167261453Sneel	movq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
168256072Sneel
169260167Sneel	/* Setup the invept descriptor on the host stack */
170260167Sneel	mov	%rsp, %r11
171261453Sneel	movq	VMX_EPTP(%rsi), %rax
172260167Sneel	movq	%rax, -16(%r11)
173260167Sneel	movq	$0x0, -8(%r11)
174260167Sneel	mov	$0x1, %eax		/* Single context invalidate */
175260167Sneel	invept	-16(%r11), %rax
176260167Sneel	jbe	invept_error		/* Check invept instruction error */
177221828Sgrehan
178260167Sneelguest_restore:
179337794Skib	movl	%edx, %r8d
180338427Skib	cmpb	$0, guest_l1d_flush_sw(%rip)
181337794Skib	je	after_l1d
182338427Skib	call	flush_l1d_sw
183337794Skibafter_l1d:
184337794Skib	cmpl	$0, %r8d
185260167Sneel	je	do_launch
186260167Sneel	VMX_GUEST_RESTORE
187260167Sneel	vmresume
188221828Sgrehan	/*
189260167Sneel	 * In the common case 'vmresume' returns back to the host through
190260167Sneel	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
191260167Sneel	 *
192260167Sneel	 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
193221828Sgrehan	 */
194260167Sneel	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
195260167Sneel	movl	$VMX_VMRESUME_ERROR, %eax
196260167Sneel	jmp	decode_inst_error
197221828Sgrehan
198260167Sneeldo_launch:
199260167Sneel	VMX_GUEST_RESTORE
200260167Sneel	vmlaunch
201260167Sneel	/*
202260167Sneel	 * In the common case 'vmlaunch' returns back to the host through
203260167Sneel	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
204260167Sneel	 *
205260167Sneel	 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
206260167Sneel	 */
207260167Sneel	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
208260167Sneel	movl	$VMX_VMLAUNCH_ERROR, %eax
209260167Sneel	jmp	decode_inst_error
210256072Sneel
211260167Sneelinvept_error:
212260167Sneel	movl	$VMX_INVEPT_ERROR, %eax
213260167Sneel	jmp	decode_inst_error
214221828Sgrehan
215260167Sneeldecode_inst_error:
216260167Sneel	movl	$VM_FAIL_VALID, %r11d
217260167Sneel	jz	inst_error
218260167Sneel	movl	$VM_FAIL_INVALID, %r11d
219260167Sneelinst_error:
220260167Sneel	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
221260167Sneel
222221828Sgrehan	/*
223260167Sneel	 * The return value is already populated in %eax so we cannot use
224260167Sneel	 * it as a scratch register beyond this point.
225221828Sgrehan	 */
226260167Sneel
227260167Sneel	/*
228260167Sneel	 * Deactivate guest pmap from this cpu.
229260167Sneel	 */
230260167Sneel	movq	VMXCTX_PMAP(%rdi), %r11
231260167Sneel	movl	PCPU(CPUID), %r10d
232260167Sneel	LK btrl	%r10d, PM_ACTIVE(%r11)
233260167Sneel
234264353Sneel	VMX_HOST_RESTORE
235266390Sgrehan	VLEAVE
236221828Sgrehan	ret
237221828Sgrehan
238221828Sgrehan/*
239266390Sgrehan * Non-error VM-exit from the guest. Make this a label so it can
240266390Sgrehan * be used by C code when setting up the VMCS.
241266390Sgrehan * The VMCS-restored %rsp points to the struct vmxctx
242221828Sgrehan */
243266390Sgrehan	ALIGN_TEXT
244330704Stychon	.globl	vmx_exit_guest_flush_rsb
245330704Stychonvmx_exit_guest_flush_rsb:
246221828Sgrehan	/*
247221828Sgrehan	 * Save guest state that is not automatically saved in the vmcs.
248221828Sgrehan	 */
249330704Stychon	VMX_GUEST_SAVE
250221828Sgrehan
251330704Stychon	/*
252330704Stychon	 * Deactivate guest pmap from this cpu.
253330704Stychon	 */
254330704Stychon	movq	VMXCTX_PMAP(%rdi), %r11
255330704Stychon	movl	PCPU(CPUID), %r10d
256330704Stychon	LK btrl	%r10d, PM_ACTIVE(%r11)
257221828Sgrehan
258330704Stychon	VMX_HOST_RESTORE
259222112Sneel
260330704Stychon	VMX_GUEST_CLOBBER
261330704Stychon
262221828Sgrehan	/*
263330704Stychon	 * To prevent malicious branch target predictions from
264330704Stychon	 * affecting the host, overwrite all entries in the RSB upon
265330704Stychon	 * exiting a guest.
266330704Stychon	 */
267330704Stychon	mov	$16, %ecx	/* 16 iterations, two calls per loop */
268330704Stychon	mov	%rsp, %rax
269330704Stychon0:	call	2f		/* create an RSB entry. */
270330704Stychon1:	pause
271330704Stychon	call	1b		/* capture rogue speculation. */
272330704Stychon2:	call	2f		/* create an RSB entry. */
273330704Stychon1:	pause
274330704Stychon	call	1b		/* capture rogue speculation. */
275330704Stychon2:	sub	$1, %ecx
276330704Stychon	jnz	0b
277330704Stychon	mov	%rax, %rsp
278330704Stychon
279330704Stychon	/*
280330704Stychon	 * This will return to the caller of 'vmx_enter_guest()' with a return
281330704Stychon	 * value of VMX_GUEST_VMEXIT.
282330704Stychon	 */
283330704Stychon	movl	$VMX_GUEST_VMEXIT, %eax
284330704Stychon	VLEAVE
285330704Stychon	ret
286330704Stychon
287330704Stychon	.globl	vmx_exit_guest
288330704Stychonvmx_exit_guest:
289330704Stychon	/*
290330704Stychon	 * Save guest state that is not automatically saved in the vmcs.
291330704Stychon	 */
292330704Stychon	VMX_GUEST_SAVE
293330704Stychon
294330704Stychon	/*
295260167Sneel	 * Deactivate guest pmap from this cpu.
296221828Sgrehan	 */
297260167Sneel	movq	VMXCTX_PMAP(%rdi), %r11
298260167Sneel	movl	PCPU(CPUID), %r10d
299260167Sneel	LK btrl	%r10d, PM_ACTIVE(%r11)
300221828Sgrehan
301264353Sneel	VMX_HOST_RESTORE
302221828Sgrehan
303330704Stychon	VMX_GUEST_CLOBBER
304330704Stychon
305221828Sgrehan	/*
306260167Sneel	 * This will return to the caller of 'vmx_enter_guest()' with a return
307260167Sneel	 * value of VMX_GUEST_VMEXIT.
308221828Sgrehan	 */
309260167Sneel	movl	$VMX_GUEST_VMEXIT, %eax
310266390Sgrehan	VLEAVE
311260167Sneel	ret
312266390SgrehanEND(vmx_enter_guest)
313260531Sneel
314260531Sneel/*
315260531Sneel * %rdi = interrupt handler entry point
316260531Sneel *
317260531Sneel * Calling sequence described in the "Instruction Set Reference" for the "INT"
318260531Sneel * instruction in Intel SDM, Vol 2.
319260531Sneel */
320260531SneelENTRY(vmx_call_isr)
321266390Sgrehan	VENTER
322260531Sneel	mov	%rsp, %r11			/* save %rsp */
323260531Sneel	and	$~0xf, %rsp			/* align on 16-byte boundary */
324260531Sneel	pushq	$KERNEL_SS			/* %ss */
325260531Sneel	pushq	%r11				/* %rsp */
326260531Sneel	pushfq					/* %rflags */
327260531Sneel	pushq	$KERNEL_CS			/* %cs */
328260531Sneel	cli					/* disable interrupts */
329260531Sneel	callq	*%rdi				/* push %rip and call isr */
330266390Sgrehan	VLEAVE
331260531Sneel	ret
332260531SneelEND(vmx_call_isr)
333