vmx_support.S revision 268953
133965Sjdp/*-
2218822Sdim * Copyright (c) 2011 NetApp, Inc.
333965Sjdp * Copyright (c) 2013 Neel Natu <neel@freebsd.org>
433965Sjdp * All rights reserved.
533965Sjdp *
633965Sjdp * Redistribution and use in source and binary forms, with or without
733965Sjdp * modification, are permitted provided that the following conditions
833965Sjdp * are met:
933965Sjdp * 1. Redistributions of source code must retain the above copyright
1033965Sjdp *    notice, this list of conditions and the following disclaimer.
1133965Sjdp * 2. Redistributions in binary form must reproduce the above copyright
1233965Sjdp *    notice, this list of conditions and the following disclaimer in the
1333965Sjdp *    documentation and/or other materials provided with the distribution.
1433965Sjdp *
1533965Sjdp * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
1633965Sjdp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1733965Sjdp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1833965Sjdp * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
1933965Sjdp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2033965Sjdp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21218822Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22218822Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2333965Sjdp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24218822Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2533965Sjdp * SUCH DAMAGE.
2633965Sjdp *
2733965Sjdp * $FreeBSD: stable/10/sys/amd64/vmm/intel/vmx_support.S 268953 2014-07-21 19:08:02Z jhb $
2833965Sjdp */
2933965Sjdp
3033965Sjdp#include <machine/asmacros.h>
3133965Sjdp
3233965Sjdp#include "vmx_assym.s"
3333965Sjdp
3433965Sjdp#ifdef SMP
3533965Sjdp#define	LK	lock ;
3633965Sjdp#else
3733965Sjdp#define	LK
3833965Sjdp#endif
39218822Sdim
4033965Sjdp/* Be friendly to DTrace FBT's prologue/epilogue pattern matching */
41218822Sdim#define VENTER  push %rbp ; mov %rsp,%rbp
42130561Sobrien#define VLEAVE  pop %rbp
4333965Sjdp
4433965Sjdp/*
45218822Sdim * Assumes that %rdi holds a pointer to the 'vmxctx'.
4633965Sjdp *
4733965Sjdp * On "return" all registers are updated to reflect guest state. The two
48218822Sdim * exceptions are %rip and %rsp. These registers are atomically switched
49218822Sdim * by hardware from the guest area of the vmcs.
50218822Sdim *
51218822Sdim * We modify %rsp to point to the 'vmxctx' so we can use it to restore
5233965Sjdp * host context in case of an error with 'vmlaunch' or 'vmresume'.
53218822Sdim */
5433965Sjdp#define	VMX_GUEST_RESTORE						\
55218822Sdim	movq	%rdi,%rsp;						\
56130561Sobrien	movq	VMXCTX_GUEST_CR2(%rdi),%rsi;				\
5733965Sjdp	movq	%rsi,%cr2;						\
58218822Sdim	movq	VMXCTX_GUEST_RSI(%rdi),%rsi;				\
5933965Sjdp	movq	VMXCTX_GUEST_RDX(%rdi),%rdx;				\
6033965Sjdp	movq	VMXCTX_GUEST_RCX(%rdi),%rcx;				\
61218822Sdim	movq	VMXCTX_GUEST_R8(%rdi),%r8;				\
6233965Sjdp	movq	VMXCTX_GUEST_R9(%rdi),%r9;				\
6333965Sjdp	movq	VMXCTX_GUEST_RAX(%rdi),%rax;				\
6433965Sjdp	movq	VMXCTX_GUEST_RBX(%rdi),%rbx;				\
6533965Sjdp	movq	VMXCTX_GUEST_RBP(%rdi),%rbp;				\
66218822Sdim	movq	VMXCTX_GUEST_R10(%rdi),%r10;				\
6733965Sjdp	movq	VMXCTX_GUEST_R11(%rdi),%r11;				\
6833965Sjdp	movq	VMXCTX_GUEST_R12(%rdi),%r12;				\
6933965Sjdp	movq	VMXCTX_GUEST_R13(%rdi),%r13;				\
7033965Sjdp	movq	VMXCTX_GUEST_R14(%rdi),%r14;				\
7133965Sjdp	movq	VMXCTX_GUEST_R15(%rdi),%r15;				\
7233965Sjdp	movq	VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
7333965Sjdp
74218822Sdim/*
7533965Sjdp * Save and restore the host context.
7633965Sjdp *
77218822Sdim * Assumes that %rdi holds a pointer to the 'vmxctx'.
7833965Sjdp */
7933965Sjdp#define	VMX_HOST_SAVE							\
8033965Sjdp	movq    %r15, VMXCTX_HOST_R15(%rdi);				\
8133965Sjdp	movq    %r14, VMXCTX_HOST_R14(%rdi);				\
8233965Sjdp	movq    %r13, VMXCTX_HOST_R13(%rdi);				\
8333965Sjdp	movq    %r12, VMXCTX_HOST_R12(%rdi);				\
8433965Sjdp	movq    %rbp, VMXCTX_HOST_RBP(%rdi);				\
85130561Sobrien	movq    %rsp, VMXCTX_HOST_RSP(%rdi);				\
8633965Sjdp	movq    %rbx, VMXCTX_HOST_RBX(%rdi);				\
8733965Sjdp
8833965Sjdp#define	VMX_HOST_RESTORE						\
8933965Sjdp	movq	VMXCTX_HOST_R15(%rdi), %r15;				\
90218822Sdim	movq	VMXCTX_HOST_R14(%rdi), %r14;				\
9133965Sjdp	movq	VMXCTX_HOST_R13(%rdi), %r13;				\
9233965Sjdp	movq	VMXCTX_HOST_R12(%rdi), %r12;				\
93130561Sobrien	movq	VMXCTX_HOST_RBP(%rdi), %rbp;				\
9433965Sjdp	movq	VMXCTX_HOST_RSP(%rdi), %rsp;				\
95218822Sdim	movq	VMXCTX_HOST_RBX(%rdi), %rbx;				\
9633965Sjdp
9733965Sjdp/*
9833965Sjdp * vmx_enter_guest(struct vmxctx *vmxctx, int launched)
9933965Sjdp * %rdi: pointer to the 'vmxctx'
100218822Sdim * %rsi: pointer to the 'vmx'
10133965Sjdp * %edx: launch state of the VMCS
10233965Sjdp * Interrupts must be disabled on entry.
103130561Sobrien */
10433965SjdpENTRY(vmx_enter_guest)
10533965Sjdp	VENTER
10633965Sjdp	/*
10733965Sjdp	 * Save host state before doing anything else.
10833965Sjdp	 */
10933965Sjdp	VMX_HOST_SAVE
110218822Sdim
111218822Sdim	/*
112218822Sdim	 * Activate guest pmap on this cpu.
113218822Sdim	 */
114218822Sdim	movq	VMXCTX_PMAP(%rdi), %r11
115218822Sdim	movl	PCPU(CPUID), %eax
116218822Sdim	LK btsl	%eax, PM_ACTIVE(%r11)
117218822Sdim
118218822Sdim	/*
119218822Sdim	 * If 'vmx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
120218822Sdim	 * then we must invalidate all mappings associated with this EPTP.
121218822Sdim	 */
122218822Sdim	movq	PM_EPTGEN(%r11), %r10
123218822Sdim	cmpq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
124218822Sdim	je	guest_restore
125218822Sdim
126218822Sdim	/* Refresh 'vmx->eptgen[curcpu]' */
127218822Sdim	movq	%r10, VMX_EPTGEN(%rsi, %rax, 8)
128218822Sdim
129218822Sdim	/* Setup the invept descriptor on the host stack */
130218822Sdim	mov	%rsp, %r11
131218822Sdim	movq	VMX_EPTP(%rsi), %rax
132218822Sdim	movq	%rax, -16(%r11)
133218822Sdim	movq	$0x0, -8(%r11)
134218822Sdim	mov	$0x1, %eax		/* Single context invalidate */
135218822Sdim	invept	-16(%r11), %rax
136218822Sdim	jbe	invept_error		/* Check invept instruction error */
137218822Sdim
138218822Sdimguest_restore:
139218822Sdim	cmpl	$0, %edx
140218822Sdim	je	do_launch
141218822Sdim
142218822Sdim	VMX_GUEST_RESTORE
14377298Sobrien	vmresume
14433965Sjdp	/*
14533965Sjdp	 * In the common case 'vmresume' returns back to the host through
146130561Sobrien	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
14733965Sjdp	 *
14833965Sjdp	 * If there is an error we return VMX_VMRESUME_ERROR to the caller.
14933965Sjdp	 */
15033965Sjdp	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
15133965Sjdp	movl	$VMX_VMRESUME_ERROR, %eax
152218822Sdim	jmp	decode_inst_error
15333965Sjdp
15433965Sjdpdo_launch:
15533965Sjdp	VMX_GUEST_RESTORE
15633965Sjdp	vmlaunch
15733965Sjdp	/*
15833965Sjdp	 * In the common case 'vmlaunch' returns back to the host through
15933965Sjdp	 * 'vmx_exit_guest' with %rsp pointing to 'vmxctx'.
16033965Sjdp	 *
16133965Sjdp	 * If there is an error we return VMX_VMLAUNCH_ERROR to the caller.
162218822Sdim	 */
16333965Sjdp	movq	%rsp, %rdi		/* point %rdi back to 'vmxctx' */
16433965Sjdp	movl	$VMX_VMLAUNCH_ERROR, %eax
165130561Sobrien	jmp	decode_inst_error
16633965Sjdp
16733965Sjdpinvept_error:
16833965Sjdp	movl	$VMX_INVEPT_ERROR, %eax
16933965Sjdp	jmp	decode_inst_error
170218822Sdim
17133965Sjdpdecode_inst_error:
17233965Sjdp	movl	$VM_FAIL_VALID, %r11d
173130561Sobrien	jz	inst_error
17433965Sjdp	movl	$VM_FAIL_INVALID, %r11d
17533965Sjdpinst_error:
17633965Sjdp	movl	%r11d, VMXCTX_INST_FAIL_STATUS(%rdi)
17733965Sjdp
17833965Sjdp	/*
179218822Sdim	 * The return value is already populated in %eax so we cannot use
18033965Sjdp	 * it as a scratch register beyond this point.
18133965Sjdp	 */
182130561Sobrien
18333965Sjdp	/*
18433965Sjdp	 * Deactivate guest pmap from this cpu.
18533965Sjdp	 */
18633965Sjdp	movq	VMXCTX_PMAP(%rdi), %r11
18733965Sjdp	movl	PCPU(CPUID), %r10d
18833965Sjdp	LK btrl	%r10d, PM_ACTIVE(%r11)
18933965Sjdp
190218822Sdim	VMX_HOST_RESTORE
19133965Sjdp	VLEAVE
19233965Sjdp	ret
193130561Sobrien
19433965Sjdp/*
19533965Sjdp * Non-error VM-exit from the guest. Make this a label so it can
19633965Sjdp * be used by C code when setting up the VMCS.
19733965Sjdp * The VMCS-restored %rsp points to the struct vmxctx
19833965Sjdp */
19933965Sjdp	ALIGN_TEXT
200218822Sdim	.globl	vmx_exit_guest
20133965Sjdpvmx_exit_guest:
20233965Sjdp	/*
203130561Sobrien	 * Save guest state that is not automatically saved in the vmcs.
20433965Sjdp	 */
20533965Sjdp	movq	%rdi,VMXCTX_GUEST_RDI(%rsp)
20633965Sjdp	movq	%rsi,VMXCTX_GUEST_RSI(%rsp)
20733965Sjdp	movq	%rdx,VMXCTX_GUEST_RDX(%rsp)
20833965Sjdp	movq	%rcx,VMXCTX_GUEST_RCX(%rsp)
20933965Sjdp	movq	%r8,VMXCTX_GUEST_R8(%rsp)
210218822Sdim	movq	%r9,VMXCTX_GUEST_R9(%rsp)
211218822Sdim	movq	%rax,VMXCTX_GUEST_RAX(%rsp)
21233965Sjdp	movq	%rbx,VMXCTX_GUEST_RBX(%rsp)
21333965Sjdp	movq	%rbp,VMXCTX_GUEST_RBP(%rsp)
214130561Sobrien	movq	%r10,VMXCTX_GUEST_R10(%rsp)
21533965Sjdp	movq	%r11,VMXCTX_GUEST_R11(%rsp)
21633965Sjdp	movq	%r12,VMXCTX_GUEST_R12(%rsp)
21733965Sjdp	movq	%r13,VMXCTX_GUEST_R13(%rsp)
21833965Sjdp	movq	%r14,VMXCTX_GUEST_R14(%rsp)
21933965Sjdp	movq	%r15,VMXCTX_GUEST_R15(%rsp)
22033965Sjdp
22133965Sjdp	movq	%cr2,%rdi
22233965Sjdp	movq	%rdi,VMXCTX_GUEST_CR2(%rsp)
223218822Sdim
224130561Sobrien	movq	%rsp,%rdi
22577298Sobrien
22633965Sjdp	/*
22733965Sjdp	 * Deactivate guest pmap from this cpu.
228130561Sobrien	 */
22933965Sjdp	movq	VMXCTX_PMAP(%rdi), %r11
23033965Sjdp	movl	PCPU(CPUID), %r10d
23133965Sjdp	LK btrl	%r10d, PM_ACTIVE(%r11)
23233965Sjdp
23333965Sjdp	VMX_HOST_RESTORE
23433965Sjdp
23533965Sjdp	/*
23633965Sjdp	 * This will return to the caller of 'vmx_enter_guest()' with a return
23733965Sjdp	 * value of VMX_GUEST_VMEXIT.
23833965Sjdp	 */
23933965Sjdp	movl	$VMX_GUEST_VMEXIT, %eax
24033965Sjdp	VLEAVE
24133965Sjdp	ret
24233965SjdpEND(vmx_enter_guest)
24333965Sjdp
24433965Sjdp/*
24533965Sjdp * %rdi = interrupt handler entry point
246 *
247 * Calling sequence described in the "Instruction Set Reference" for the "INT"
248 * instruction in Intel SDM, Vol 2.
249 */
250ENTRY(vmx_call_isr)
251	VENTER
252	mov	%rsp, %r11			/* save %rsp */
253	and	$~0xf, %rsp			/* align on 16-byte boundary */
254	pushq	$KERNEL_SS			/* %ss */
255	pushq	%r11				/* %rsp */
256	pushfq					/* %rflags */
257	pushq	$KERNEL_CS			/* %cs */
258	cli					/* disable interrupts */
259	callq	*%rdi				/* push %rip and call isr */
260	VLEAVE
261	ret
262END(vmx_call_isr)
263