Deleted Added
full compact
vmm.h (240912) vmm.h (240922)
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: vmm.h 482 2011-05-09 21:22:43Z grehan $
27 */
28
29#ifndef _VMM_H_
30#define _VMM_H_
31
32#ifdef _KERNEL
33
34#define VM_MAX_NAMELEN 32
35
36struct vm;
37struct vm_memory_segment;
38struct seg_desc;
39struct vm_exit;
40struct vm_run;
41struct vlapic;
42
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: vmm.h 482 2011-05-09 21:22:43Z grehan $
27 */
28
29#ifndef _VMM_H_
30#define _VMM_H_
31
32#ifdef _KERNEL
33
34#define VM_MAX_NAMELEN 32
35
36struct vm;
37struct vm_memory_segment;
38struct seg_desc;
39struct vm_exit;
40struct vm_run;
41struct vlapic;
42
43enum x2apic_state;
44
43typedef int (*vmm_init_func_t)(void);
44typedef int (*vmm_cleanup_func_t)(void);
45typedef void * (*vmi_init_func_t)(struct vm *vm); /* instance specific apis */
46typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip);
47typedef void (*vmi_cleanup_func_t)(void *vmi);
48typedef int (*vmi_mmap_func_t)(void *vmi, vm_paddr_t gpa, vm_paddr_t hpa,
49 size_t length, vm_memattr_t attr,
50 int prot, boolean_t superpages_ok);
51typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
52 uint64_t *retval);
53typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
54 uint64_t val);
55typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
56 struct seg_desc *desc);
57typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
58 struct seg_desc *desc);
59typedef int (*vmi_inject_event_t)(void *vmi, int vcpu,
60 int type, int vector,
61 uint32_t code, int code_valid);
62typedef int (*vmi_inject_nmi_t)(void *vmi, int vcpu);
63typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
64typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
65
66struct vmm_ops {
67 vmm_init_func_t init; /* module wide initialization */
68 vmm_cleanup_func_t cleanup;
69
70 vmi_init_func_t vminit; /* vm-specific initialization */
71 vmi_run_func_t vmrun;
72 vmi_cleanup_func_t vmcleanup;
73 vmi_mmap_func_t vmmmap;
74 vmi_get_register_t vmgetreg;
75 vmi_set_register_t vmsetreg;
76 vmi_get_desc_t vmgetdesc;
77 vmi_set_desc_t vmsetdesc;
78 vmi_inject_event_t vminject;
79 vmi_inject_nmi_t vmnmi;
80 vmi_get_cap_t vmgetcap;
81 vmi_set_cap_t vmsetcap;
82};
83
84extern struct vmm_ops vmm_ops_intel;
85extern struct vmm_ops vmm_ops_amd;
86
87struct vm *vm_create(const char *name);
88void vm_destroy(struct vm *vm);
89const char *vm_name(struct vm *vm);
90int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa);
91int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
92int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
93vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
94int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
95 struct vm_memory_segment *seg);
96int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
97int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
98int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
99 struct seg_desc *ret_desc);
100int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
101 struct seg_desc *desc);
102int vm_get_pinning(struct vm *vm, int vcpu, int *cpuid);
103int vm_set_pinning(struct vm *vm, int vcpu, int cpuid);
104int vm_run(struct vm *vm, struct vm_run *vmrun);
105int vm_inject_event(struct vm *vm, int vcpu, int type,
106 int vector, uint32_t error_code, int error_code_valid);
107int vm_inject_nmi(struct vm *vm, int vcpu);
108uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
109struct vlapic *vm_lapic(struct vm *vm, int cpu);
110int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
111int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
45typedef int (*vmm_init_func_t)(void);
46typedef int (*vmm_cleanup_func_t)(void);
47typedef void * (*vmi_init_func_t)(struct vm *vm); /* instance specific apis */
48typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip);
49typedef void (*vmi_cleanup_func_t)(void *vmi);
50typedef int (*vmi_mmap_func_t)(void *vmi, vm_paddr_t gpa, vm_paddr_t hpa,
51 size_t length, vm_memattr_t attr,
52 int prot, boolean_t superpages_ok);
53typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
54 uint64_t *retval);
55typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
56 uint64_t val);
57typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
58 struct seg_desc *desc);
59typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
60 struct seg_desc *desc);
61typedef int (*vmi_inject_event_t)(void *vmi, int vcpu,
62 int type, int vector,
63 uint32_t code, int code_valid);
64typedef int (*vmi_inject_nmi_t)(void *vmi, int vcpu);
65typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
66typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
67
68struct vmm_ops {
69 vmm_init_func_t init; /* module wide initialization */
70 vmm_cleanup_func_t cleanup;
71
72 vmi_init_func_t vminit; /* vm-specific initialization */
73 vmi_run_func_t vmrun;
74 vmi_cleanup_func_t vmcleanup;
75 vmi_mmap_func_t vmmmap;
76 vmi_get_register_t vmgetreg;
77 vmi_set_register_t vmsetreg;
78 vmi_get_desc_t vmgetdesc;
79 vmi_set_desc_t vmsetdesc;
80 vmi_inject_event_t vminject;
81 vmi_inject_nmi_t vmnmi;
82 vmi_get_cap_t vmgetcap;
83 vmi_set_cap_t vmsetcap;
84};
85
86extern struct vmm_ops vmm_ops_intel;
87extern struct vmm_ops vmm_ops_amd;
88
89struct vm *vm_create(const char *name);
90void vm_destroy(struct vm *vm);
91const char *vm_name(struct vm *vm);
92int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa);
93int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
94int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
95vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
96int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
97 struct vm_memory_segment *seg);
98int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
99int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
100int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
101 struct seg_desc *ret_desc);
102int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
103 struct seg_desc *desc);
104int vm_get_pinning(struct vm *vm, int vcpu, int *cpuid);
105int vm_set_pinning(struct vm *vm, int vcpu, int cpuid);
106int vm_run(struct vm *vm, struct vm_run *vmrun);
107int vm_inject_event(struct vm *vm, int vcpu, int type,
108 int vector, uint32_t error_code, int error_code_valid);
109int vm_inject_nmi(struct vm *vm, int vcpu);
110uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
111struct vlapic *vm_lapic(struct vm *vm, int cpu);
112int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
113int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
114int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
115int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
112void vm_activate_cpu(struct vm *vm, int vcpu);
113cpuset_t vm_active_cpus(struct vm *vm);
114struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
115
116/*
117 * Return 1 if device indicated by bus/slot/func is supposed to be a
118 * pci passthrough device.
119 *
120 * Return 0 otherwise.
121 */
122int vmm_is_pptdev(int bus, int slot, int func);
123
124void *vm_iommu_domain(struct vm *vm);
125
126#define VCPU_STOPPED 0
127#define VCPU_RUNNING 1
128void vm_set_run_state(struct vm *vm, int vcpu, int running);
129int vm_get_run_state(struct vm *vm, int vcpu, int *hostcpu);
130
131void *vcpu_stats(struct vm *vm, int vcpu);
132
133static int __inline
134vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
135{
136 return (vm_get_run_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
137}
138
139#endif /* KERNEL */
140
141#define VM_MAXCPU 8 /* maximum virtual cpus */
142
143/*
144 * Identifiers for events that can be injected into the VM
145 */
146enum vm_event_type {
147 VM_EVENT_NONE,
148 VM_HW_INTR,
149 VM_NMI,
150 VM_HW_EXCEPTION,
151 VM_SW_INTR,
152 VM_PRIV_SW_EXCEPTION,
153 VM_SW_EXCEPTION,
154 VM_EVENT_MAX
155};
156
157/*
158 * Identifiers for architecturally defined registers.
159 */
160enum vm_reg_name {
161 VM_REG_GUEST_RAX,
162 VM_REG_GUEST_RBX,
163 VM_REG_GUEST_RCX,
164 VM_REG_GUEST_RDX,
165 VM_REG_GUEST_RSI,
166 VM_REG_GUEST_RDI,
167 VM_REG_GUEST_RBP,
168 VM_REG_GUEST_R8,
169 VM_REG_GUEST_R9,
170 VM_REG_GUEST_R10,
171 VM_REG_GUEST_R11,
172 VM_REG_GUEST_R12,
173 VM_REG_GUEST_R13,
174 VM_REG_GUEST_R14,
175 VM_REG_GUEST_R15,
176 VM_REG_GUEST_CR0,
177 VM_REG_GUEST_CR3,
178 VM_REG_GUEST_CR4,
179 VM_REG_GUEST_DR7,
180 VM_REG_GUEST_RSP,
181 VM_REG_GUEST_RIP,
182 VM_REG_GUEST_RFLAGS,
183 VM_REG_GUEST_ES,
184 VM_REG_GUEST_CS,
185 VM_REG_GUEST_SS,
186 VM_REG_GUEST_DS,
187 VM_REG_GUEST_FS,
188 VM_REG_GUEST_GS,
189 VM_REG_GUEST_LDTR,
190 VM_REG_GUEST_TR,
191 VM_REG_GUEST_IDTR,
192 VM_REG_GUEST_GDTR,
193 VM_REG_GUEST_EFER,
194 VM_REG_LAST
195};
196
197/*
198 * Identifiers for optional vmm capabilities
199 */
200enum vm_cap_type {
201 VM_CAP_HALT_EXIT,
202 VM_CAP_MTRAP_EXIT,
203 VM_CAP_PAUSE_EXIT,
204 VM_CAP_UNRESTRICTED_GUEST,
205 VM_CAP_MAX
206};
207
116void vm_activate_cpu(struct vm *vm, int vcpu);
117cpuset_t vm_active_cpus(struct vm *vm);
118struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
119
120/*
121 * Return 1 if device indicated by bus/slot/func is supposed to be a
122 * pci passthrough device.
123 *
124 * Return 0 otherwise.
125 */
126int vmm_is_pptdev(int bus, int slot, int func);
127
128void *vm_iommu_domain(struct vm *vm);
129
130#define VCPU_STOPPED 0
131#define VCPU_RUNNING 1
132void vm_set_run_state(struct vm *vm, int vcpu, int running);
133int vm_get_run_state(struct vm *vm, int vcpu, int *hostcpu);
134
135void *vcpu_stats(struct vm *vm, int vcpu);
136
137static int __inline
138vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
139{
140 return (vm_get_run_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
141}
142
143#endif /* KERNEL */
144
145#define VM_MAXCPU 8 /* maximum virtual cpus */
146
147/*
148 * Identifiers for events that can be injected into the VM
149 */
150enum vm_event_type {
151 VM_EVENT_NONE,
152 VM_HW_INTR,
153 VM_NMI,
154 VM_HW_EXCEPTION,
155 VM_SW_INTR,
156 VM_PRIV_SW_EXCEPTION,
157 VM_SW_EXCEPTION,
158 VM_EVENT_MAX
159};
160
161/*
162 * Identifiers for architecturally defined registers.
163 */
164enum vm_reg_name {
165 VM_REG_GUEST_RAX,
166 VM_REG_GUEST_RBX,
167 VM_REG_GUEST_RCX,
168 VM_REG_GUEST_RDX,
169 VM_REG_GUEST_RSI,
170 VM_REG_GUEST_RDI,
171 VM_REG_GUEST_RBP,
172 VM_REG_GUEST_R8,
173 VM_REG_GUEST_R9,
174 VM_REG_GUEST_R10,
175 VM_REG_GUEST_R11,
176 VM_REG_GUEST_R12,
177 VM_REG_GUEST_R13,
178 VM_REG_GUEST_R14,
179 VM_REG_GUEST_R15,
180 VM_REG_GUEST_CR0,
181 VM_REG_GUEST_CR3,
182 VM_REG_GUEST_CR4,
183 VM_REG_GUEST_DR7,
184 VM_REG_GUEST_RSP,
185 VM_REG_GUEST_RIP,
186 VM_REG_GUEST_RFLAGS,
187 VM_REG_GUEST_ES,
188 VM_REG_GUEST_CS,
189 VM_REG_GUEST_SS,
190 VM_REG_GUEST_DS,
191 VM_REG_GUEST_FS,
192 VM_REG_GUEST_GS,
193 VM_REG_GUEST_LDTR,
194 VM_REG_GUEST_TR,
195 VM_REG_GUEST_IDTR,
196 VM_REG_GUEST_GDTR,
197 VM_REG_GUEST_EFER,
198 VM_REG_LAST
199};
200
201/*
202 * Identifiers for optional vmm capabilities
203 */
204enum vm_cap_type {
205 VM_CAP_HALT_EXIT,
206 VM_CAP_MTRAP_EXIT,
207 VM_CAP_PAUSE_EXIT,
208 VM_CAP_UNRESTRICTED_GUEST,
209 VM_CAP_MAX
210};
211
212enum x2apic_state {
213 X2APIC_ENABLED,
214 X2APIC_AVAILABLE,
215 X2APIC_DISABLED,
216 X2APIC_STATE_LAST
217};
218
208/*
209 * The 'access' field has the format specified in Table 21-2 of the Intel
210 * Architecture Manual vol 3b.
211 *
212 * XXX The contents of the 'access' field are architecturally defined except
213 * bit 16 - Segment Unusable.
214 */
215struct seg_desc {
216 uint64_t base;
217 uint32_t limit;
218 uint32_t access;
219};
220
221enum vm_exitcode {
222 VM_EXITCODE_INOUT,
223 VM_EXITCODE_VMX,
224 VM_EXITCODE_BOGUS,
225 VM_EXITCODE_RDMSR,
226 VM_EXITCODE_WRMSR,
227 VM_EXITCODE_HLT,
228 VM_EXITCODE_MTRAP,
229 VM_EXITCODE_PAUSE,
230 VM_EXITCODE_PAGING,
231 VM_EXITCODE_SPINUP_AP,
232 VM_EXITCODE_MAX
233};
234
235struct vm_exit {
236 enum vm_exitcode exitcode;
237 int inst_length; /* 0 means unknown */
238 uint64_t rip;
239 union {
240 struct {
241 uint16_t bytes:3; /* 1 or 2 or 4 */
242 uint16_t in:1; /* out is 0, in is 1 */
243 uint16_t string:1;
244 uint16_t rep:1;
245 uint16_t port;
246 uint32_t eax; /* valid for out */
247 } inout;
248 struct {
249 uint64_t cr3;
250 } paging;
251 /*
252 * VMX specific payload. Used when there is no "better"
253 * exitcode to represent the VM-exit.
254 */
255 struct {
256 int error; /* vmx inst error */
257 uint32_t exit_reason;
258 uint64_t exit_qualification;
259 } vmx;
260 struct {
261 uint32_t code; /* ecx value */
262 uint64_t wval;
263 } msr;
264 struct {
265 int vcpu;
266 uint64_t rip;
267 } spinup_ap;
268 } u;
269};
270
271#endif /* _VMM_H_ */
219/*
220 * The 'access' field has the format specified in Table 21-2 of the Intel
221 * Architecture Manual vol 3b.
222 *
223 * XXX The contents of the 'access' field are architecturally defined except
224 * bit 16 - Segment Unusable.
225 */
226struct seg_desc {
227 uint64_t base;
228 uint32_t limit;
229 uint32_t access;
230};
231
232enum vm_exitcode {
233 VM_EXITCODE_INOUT,
234 VM_EXITCODE_VMX,
235 VM_EXITCODE_BOGUS,
236 VM_EXITCODE_RDMSR,
237 VM_EXITCODE_WRMSR,
238 VM_EXITCODE_HLT,
239 VM_EXITCODE_MTRAP,
240 VM_EXITCODE_PAUSE,
241 VM_EXITCODE_PAGING,
242 VM_EXITCODE_SPINUP_AP,
243 VM_EXITCODE_MAX
244};
245
246struct vm_exit {
247 enum vm_exitcode exitcode;
248 int inst_length; /* 0 means unknown */
249 uint64_t rip;
250 union {
251 struct {
252 uint16_t bytes:3; /* 1 or 2 or 4 */
253 uint16_t in:1; /* out is 0, in is 1 */
254 uint16_t string:1;
255 uint16_t rep:1;
256 uint16_t port;
257 uint32_t eax; /* valid for out */
258 } inout;
259 struct {
260 uint64_t cr3;
261 } paging;
262 /*
263 * VMX specific payload. Used when there is no "better"
264 * exitcode to represent the VM-exit.
265 */
266 struct {
267 int error; /* vmx inst error */
268 uint32_t exit_reason;
269 uint64_t exit_qualification;
270 } vmx;
271 struct {
272 uint32_t code; /* ecx value */
273 uint64_t wval;
274 } msr;
275 struct {
276 int vcpu;
277 uint64_t rip;
278 } spinup_ap;
279 } u;
280};
281
282#endif /* _VMM_H_ */