vmx.h revision 256645
165925Sache/*-
265925Sache * Copyright (c) 2011 NetApp, Inc.
365925Sache * All rights reserved.
465925Sache *
565925Sache * Redistribution and use in source and binary forms, with or without
665925Sache * modification, are permitted provided that the following conditions
765925Sache * are met:
8174990Sache * 1. Redistributions of source code must retain the above copyright
965925Sache *    notice, this list of conditions and the following disclaimer.
1065925Sache * 2. Redistributions in binary form must reproduce the above copyright
1165925Sache *    notice, this list of conditions and the following disclaimer in the
1265925Sache *    documentation and/or other materials provided with the distribution.
1365925Sache *
1465925Sache * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
1565925Sache * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1665925Sache * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1765925Sache * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
1865925Sache * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1965925Sache * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2065925Sache * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2165925Sache * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2265925Sache * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23174990Sache * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2465925Sache * SUCH DAMAGE.
2565925Sache *
2665925Sache * $FreeBSD: head/sys/amd64/vmm/intel/vmx.h 256645 2013-10-16 18:20:27Z neel $
2765925Sache */
2865925Sache
2965925Sache#ifndef _VMX_H_
3065925Sache#define	_VMX_H_
3165925Sache
3265925Sache#include "vmcs.h"
3365925Sache
3465925Sachestruct pmap;
3565925Sache
3665925Sache#define	GUEST_MSR_MAX_ENTRIES	64		/* arbitrary */
3765925Sache
3865925Sachestruct vmxctx {
3965925Sache	register_t	tmpstk[32];		/* vmx_return() stack */
4065925Sache	register_t	tmpstktop;
4165925Sache
4265925Sache	register_t	guest_rdi;		/* Guest state */
4365925Sache	register_t	guest_rsi;
4465925Sache	register_t	guest_rdx;
4565925Sache	register_t	guest_rcx;
4665925Sache	register_t	guest_r8;
4765925Sache	register_t	guest_r9;
4865925Sache	register_t	guest_rax;
4965925Sache	register_t	guest_rbx;
5065925Sache	register_t	guest_rbp;
5165925Sache	register_t	guest_r10;
5265925Sache	register_t	guest_r11;
5365925Sache	register_t	guest_r12;
5465925Sache	register_t	guest_r13;
5565925Sache	register_t	guest_r14;
5665925Sache	register_t	guest_r15;
5765925Sache	register_t	guest_cr2;
5865925Sache
5965925Sache	register_t	host_r15;		/* Host state */
6065925Sache	register_t	host_r14;
6165925Sache	register_t	host_r13;
6265925Sache	register_t	host_r12;
6365925Sache	register_t	host_rbp;
6474570Sache	register_t	host_rsp;
6565925Sache	register_t	host_rbx;
6665925Sache	register_t	host_rip;
6765925Sache	/*
6867469Sache	 * XXX todo debug registers and fpu state
6965925Sache	 */
7065925Sache
7165925Sache	int		launched;		/* vmcs launch state */
7265925Sache	int		launch_error;
7365925Sache
7465925Sache	long		eptgen[MAXCPU];		/* cached pmap->pm_eptgen */
7565925Sache
7665925Sache	/*
7765925Sache	 * The 'eptp' and the 'pmap' do not change during the lifetime of
7865925Sache	 * the VM so it is safe to keep a copy in each vcpu's vmxctx.
7965925Sache	 */
8065925Sache	vm_paddr_t	eptp;
8165925Sache	struct pmap	*pmap;
82174990Sache};
8365925Sache
8465925Sachestruct vmxcap {
85174906Sache	int	set;
8665925Sache	uint32_t proc_ctls;
8765925Sache	uint32_t proc_ctls2;
88174906Sache};
8965925Sache
9065925Sachestruct vmxstate {
91174906Sache	int	lastcpu;	/* host cpu that this 'vcpu' last ran on */
92174906Sache	uint16_t vpid;
9365925Sache};
94174906Sache
95174906Sache/* virtual machine softc */
9665925Sachestruct vmx {
9774413Sache	struct vmcs	vmcs[VM_MAXCPU];	/* one vmcs per virtual cpu */
9865925Sache	char		msr_bitmap[PAGE_SIZE];
9974413Sache	struct msr_entry guest_msrs[VM_MAXCPU][GUEST_MSR_MAX_ENTRIES];
10065925Sache	struct vmxctx	ctx[VM_MAXCPU];
10174413Sache	struct vmxcap	cap[VM_MAXCPU];
10265925Sache	struct vmxstate	state[VM_MAXCPU];
10374413Sache	uint64_t	eptp;
10474413Sache	struct vm	*vm;
105};
106CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
107CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
108CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
109
110#define	VMX_RETURN_DIRECT	0
111#define	VMX_RETURN_LONGJMP	1
112#define	VMX_RETURN_VMRESUME	2
113#define	VMX_RETURN_VMLAUNCH	3
114#define	VMX_RETURN_AST		4
115#define	VMX_RETURN_INVEPT	5
116/*
117 * vmx_setjmp() returns:
118 * - 0 when it returns directly
119 * - 1 when it returns from vmx_longjmp
120 * - 2 when it returns from vmx_resume (which would only be in the error case)
121 * - 3 when it returns from vmx_launch (which would only be in the error case)
122 * - 4 when it returns from vmx_resume or vmx_launch because of AST pending
123 * - 5 when it returns from vmx_launch/vmx_resume because of invept error
124 */
125int	vmx_setjmp(struct vmxctx *ctx);
126void	vmx_longjmp(void);			/* returns via vmx_setjmp */
127void	vmx_launch(struct vmxctx *ctx) __dead2;	/* may return via vmx_setjmp */
128void	vmx_resume(struct vmxctx *ctx) __dead2;	/* may return via vmx_setjmp */
129
130u_long	vmx_fix_cr0(u_long cr0);
131u_long	vmx_fix_cr4(u_long cr4);
132
133#endif
134