vmm.h revision 240912
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: vmm.h 482 2011-05-09 21:22:43Z grehan $
27 */
28
29#ifndef _VMM_H_
30#define	_VMM_H_
31
32#ifdef _KERNEL
33
34#define	VM_MAX_NAMELEN	32
35
36struct vm;
37struct vm_memory_segment;
38struct seg_desc;
39struct vm_exit;
40struct vm_run;
41struct vlapic;
42
43typedef int	(*vmm_init_func_t)(void);
44typedef int	(*vmm_cleanup_func_t)(void);
45typedef void *	(*vmi_init_func_t)(struct vm *vm); /* instance specific apis */
46typedef int	(*vmi_run_func_t)(void *vmi, int vcpu, register_t rip);
47typedef void	(*vmi_cleanup_func_t)(void *vmi);
48typedef int	(*vmi_mmap_func_t)(void *vmi, vm_paddr_t gpa, vm_paddr_t hpa,
49				   size_t length, vm_memattr_t attr,
50				   int prot, boolean_t superpages_ok);
51typedef int	(*vmi_get_register_t)(void *vmi, int vcpu, int num,
52				      uint64_t *retval);
53typedef int	(*vmi_set_register_t)(void *vmi, int vcpu, int num,
54				      uint64_t val);
55typedef int	(*vmi_get_desc_t)(void *vmi, int vcpu, int num,
56				  struct seg_desc *desc);
57typedef int	(*vmi_set_desc_t)(void *vmi, int vcpu, int num,
58				  struct seg_desc *desc);
59typedef int	(*vmi_inject_event_t)(void *vmi, int vcpu,
60				      int type, int vector,
61				      uint32_t code, int code_valid);
62typedef	int	(*vmi_inject_nmi_t)(void *vmi, int vcpu);
63typedef int	(*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
64typedef int	(*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
65
66struct vmm_ops {
67	vmm_init_func_t		init;		/* module wide initialization */
68	vmm_cleanup_func_t	cleanup;
69
70	vmi_init_func_t		vminit;		/* vm-specific initialization */
71	vmi_run_func_t		vmrun;
72	vmi_cleanup_func_t	vmcleanup;
73	vmi_mmap_func_t		vmmmap;
74	vmi_get_register_t	vmgetreg;
75	vmi_set_register_t	vmsetreg;
76	vmi_get_desc_t		vmgetdesc;
77	vmi_set_desc_t		vmsetdesc;
78	vmi_inject_event_t	vminject;
79	vmi_inject_nmi_t	vmnmi;
80	vmi_get_cap_t		vmgetcap;
81	vmi_set_cap_t		vmsetcap;
82};
83
84extern struct vmm_ops vmm_ops_intel;
85extern struct vmm_ops vmm_ops_amd;
86
87struct vm *vm_create(const char *name);
88void vm_destroy(struct vm *vm);
89const char *vm_name(struct vm *vm);
90int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa);
91int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
92int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
93vm_paddr_t vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t size);
94int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
95	      struct vm_memory_segment *seg);
96int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
97int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
98int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
99		    struct seg_desc *ret_desc);
100int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
101		    struct seg_desc *desc);
102int vm_get_pinning(struct vm *vm, int vcpu, int *cpuid);
103int vm_set_pinning(struct vm *vm, int vcpu, int cpuid);
104int vm_run(struct vm *vm, struct vm_run *vmrun);
105int vm_inject_event(struct vm *vm, int vcpu, int type,
106		    int vector, uint32_t error_code, int error_code_valid);
107int vm_inject_nmi(struct vm *vm, int vcpu);
108uint64_t *vm_guest_msrs(struct vm *vm, int cpu);
109struct vlapic *vm_lapic(struct vm *vm, int cpu);
110int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
111int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
112void vm_activate_cpu(struct vm *vm, int vcpu);
113cpuset_t vm_active_cpus(struct vm *vm);
114struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
115
116/*
117 * Return 1 if device indicated by bus/slot/func is supposed to be a
118 * pci passthrough device.
119 *
120 * Return 0 otherwise.
121 */
122int vmm_is_pptdev(int bus, int slot, int func);
123
124void *vm_iommu_domain(struct vm *vm);
125
126#define	VCPU_STOPPED	0
127#define	VCPU_RUNNING	1
128void vm_set_run_state(struct vm *vm, int vcpu, int running);
129int vm_get_run_state(struct vm *vm, int vcpu, int *hostcpu);
130
131void *vcpu_stats(struct vm *vm, int vcpu);
132
133static int __inline
134vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
135{
136	return (vm_get_run_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
137}
138
139#endif	/* KERNEL */
140
141#define	VM_MAXCPU	8			/* maximum virtual cpus */
142
143/*
144 * Identifiers for events that can be injected into the VM
145 */
146enum vm_event_type {
147	VM_EVENT_NONE,
148	VM_HW_INTR,
149	VM_NMI,
150	VM_HW_EXCEPTION,
151	VM_SW_INTR,
152	VM_PRIV_SW_EXCEPTION,
153	VM_SW_EXCEPTION,
154	VM_EVENT_MAX
155};
156
157/*
158 * Identifiers for architecturally defined registers.
159 */
160enum vm_reg_name {
161	VM_REG_GUEST_RAX,
162	VM_REG_GUEST_RBX,
163	VM_REG_GUEST_RCX,
164	VM_REG_GUEST_RDX,
165	VM_REG_GUEST_RSI,
166	VM_REG_GUEST_RDI,
167	VM_REG_GUEST_RBP,
168	VM_REG_GUEST_R8,
169	VM_REG_GUEST_R9,
170	VM_REG_GUEST_R10,
171	VM_REG_GUEST_R11,
172	VM_REG_GUEST_R12,
173	VM_REG_GUEST_R13,
174	VM_REG_GUEST_R14,
175	VM_REG_GUEST_R15,
176	VM_REG_GUEST_CR0,
177	VM_REG_GUEST_CR3,
178	VM_REG_GUEST_CR4,
179	VM_REG_GUEST_DR7,
180	VM_REG_GUEST_RSP,
181	VM_REG_GUEST_RIP,
182	VM_REG_GUEST_RFLAGS,
183	VM_REG_GUEST_ES,
184	VM_REG_GUEST_CS,
185	VM_REG_GUEST_SS,
186	VM_REG_GUEST_DS,
187	VM_REG_GUEST_FS,
188	VM_REG_GUEST_GS,
189	VM_REG_GUEST_LDTR,
190	VM_REG_GUEST_TR,
191	VM_REG_GUEST_IDTR,
192	VM_REG_GUEST_GDTR,
193	VM_REG_GUEST_EFER,
194	VM_REG_LAST
195};
196
197/*
198 * Identifiers for optional vmm capabilities
199 */
200enum vm_cap_type {
201	VM_CAP_HALT_EXIT,
202	VM_CAP_MTRAP_EXIT,
203	VM_CAP_PAUSE_EXIT,
204	VM_CAP_UNRESTRICTED_GUEST,
205	VM_CAP_MAX
206};
207
208/*
209 * The 'access' field has the format specified in Table 21-2 of the Intel
210 * Architecture Manual vol 3b.
211 *
212 * XXX The contents of the 'access' field are architecturally defined except
213 * bit 16 - Segment Unusable.
214 */
215struct seg_desc {
216	uint64_t	base;
217	uint32_t	limit;
218	uint32_t	access;
219};
220
221enum vm_exitcode {
222	VM_EXITCODE_INOUT,
223	VM_EXITCODE_VMX,
224	VM_EXITCODE_BOGUS,
225	VM_EXITCODE_RDMSR,
226	VM_EXITCODE_WRMSR,
227	VM_EXITCODE_HLT,
228	VM_EXITCODE_MTRAP,
229	VM_EXITCODE_PAUSE,
230	VM_EXITCODE_PAGING,
231	VM_EXITCODE_SPINUP_AP,
232	VM_EXITCODE_MAX
233};
234
235struct vm_exit {
236	enum vm_exitcode	exitcode;
237	int			inst_length;	/* 0 means unknown */
238	uint64_t		rip;
239	union {
240		struct {
241			uint16_t	bytes:3;	/* 1 or 2 or 4 */
242			uint16_t	in:1;		/* out is 0, in is 1 */
243			uint16_t	string:1;
244			uint16_t	rep:1;
245			uint16_t	port;
246			uint32_t	eax;		/* valid for out */
247		} inout;
248		struct {
249			uint64_t	cr3;
250		} paging;
251		/*
252		 * VMX specific payload. Used when there is no "better"
253		 * exitcode to represent the VM-exit.
254		 */
255		struct {
256			int		error;		/* vmx inst error */
257			uint32_t	exit_reason;
258			uint64_t	exit_qualification;
259		} vmx;
260		struct {
261			uint32_t	code;		/* ecx value */
262			uint64_t	wval;
263		} msr;
264		struct {
265			int		vcpu;
266			uint64_t	rip;
267		} spinup_ap;
268	} u;
269};
270
271#endif	/* _VMM_H_ */
272