Deleted Added
full compact
vmx.h (259863) vmx.h (260167)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.h 259863 2013-12-25 06:46:31Z neel $
26 * $FreeBSD: head/sys/amd64/vmm/intel/vmx.h 260167 2014-01-01 21:17:08Z neel $
27 */
28
29#ifndef _VMX_H_
30#define _VMX_H_
31
32#include "vmcs.h"
33
34struct pmap;
35
36#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
37
38struct vmxctx {
27 */
28
29#ifndef _VMX_H_
30#define _VMX_H_
31
32#include "vmcs.h"
33
34struct pmap;
35
36#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
37
38struct vmxctx {
39 register_t tmpstk[32]; /* vmx_return() stack */
40 register_t tmpstktop;
41
42 register_t guest_rdi; /* Guest state */
43 register_t guest_rsi;
44 register_t guest_rdx;
45 register_t guest_rcx;
46 register_t guest_r8;
47 register_t guest_r9;
48 register_t guest_rax;
49 register_t guest_rbx;

--- 13 unchanged lines hidden (view full) ---

63 register_t host_rbp;
64 register_t host_rsp;
65 register_t host_rbx;
66 register_t host_rip;
67 /*
68 * XXX todo debug registers and fpu state
69 */
70
39 register_t guest_rdi; /* Guest state */
40 register_t guest_rsi;
41 register_t guest_rdx;
42 register_t guest_rcx;
43 register_t guest_r8;
44 register_t guest_r9;
45 register_t guest_rax;
46 register_t guest_rbx;

--- 13 unchanged lines hidden (view full) ---

60 register_t host_rbp;
61 register_t host_rsp;
62 register_t host_rbx;
63 register_t host_rip;
64 /*
65 * XXX todo debug registers and fpu state
66 */
67
71 int launched; /* vmcs launch state */
72 int launch_error;
68 int inst_fail_status;
73
74 long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
75
76 /*
77 * The 'eptp' and the 'pmap' do not change during the lifetime of
78 * the VM so it is safe to keep a copy in each vcpu's vmxctx.
79 */
80 vm_paddr_t eptp;

--- 27 unchanged lines hidden (view full) ---

108 struct vmxstate state[VM_MAXCPU];
109 uint64_t eptp;
110 struct vm *vm;
111};
112CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
113CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
114CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
115
69
70 long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
71
72 /*
73 * The 'eptp' and the 'pmap' do not change during the lifetime of
74 * the VM so it is safe to keep a copy in each vcpu's vmxctx.
75 */
76 vm_paddr_t eptp;

--- 27 unchanged lines hidden (view full) ---

104 struct vmxstate state[VM_MAXCPU];
105 uint64_t eptp;
106 struct vm *vm;
107};
108CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
109CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
110CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
111
116#define VMX_RETURN_DIRECT 0
117#define VMX_RETURN_LONGJMP 1
118#define VMX_RETURN_VMRESUME 2
119#define VMX_RETURN_VMLAUNCH 3
120#define VMX_RETURN_AST 4
121#define VMX_RETURN_INVEPT 5
122/*
123 * vmx_setjmp() returns:
124 * - 0 when it returns directly
125 * - 1 when it returns from vmx_longjmp
126 * - 2 when it returns from vmx_resume (which would only be in the error case)
127 * - 3 when it returns from vmx_launch (which would only be in the error case)
128 * - 4 when it returns from vmx_resume or vmx_launch because of AST pending
129 * - 5 when it returns from vmx_launch/vmx_resume because of invept error
130 */
131int vmx_setjmp(struct vmxctx *ctx);
132void vmx_longjmp(void); /* returns via vmx_setjmp */
133void vmx_launch(struct vmxctx *ctx) __dead2; /* may return via vmx_setjmp */
134void vmx_resume(struct vmxctx *ctx) __dead2; /* may return via vmx_setjmp */
112#define VMX_GUEST_VMEXIT 0
113#define VMX_VMRESUME_ERROR 1
114#define VMX_VMLAUNCH_ERROR 2
115#define VMX_INVEPT_ERROR 3
116int vmx_enter_guest(struct vmxctx *ctx, int launched);
117void vmx_exit_guest(void);
135
136u_long vmx_fix_cr0(u_long cr0);
137u_long vmx_fix_cr4(u_long cr4);
138
139#endif
118
119u_long vmx_fix_cr0(u_long cr0);
120u_long vmx_fix_cr4(u_long cr4);
121
122#endif