vmm.h revision 256072
1219820Sjeff/*- 2219820Sjeff * Copyright (c) 2011 NetApp, Inc. 3219820Sjeff * All rights reserved. 4219820Sjeff * 5219820Sjeff * Redistribution and use in source and binary forms, with or without 6219820Sjeff * modification, are permitted provided that the following conditions 7219820Sjeff * are met: 8219820Sjeff * 1. Redistributions of source code must retain the above copyright 9219820Sjeff * notice, this list of conditions and the following disclaimer. 10219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright 11219820Sjeff * notice, this list of conditions and the following disclaimer in the 12219820Sjeff * documentation and/or other materials provided with the distribution. 13219820Sjeff * 14219820Sjeff * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15219820Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16219820Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17219820Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18219820Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19219820Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20219820Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21219820Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22219820Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23219820Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24219820Sjeff * SUCH DAMAGE. 25219820Sjeff * 26219820Sjeff * $FreeBSD: head/sys/amd64/include/vmm.h 256072 2013-10-05 21:22:35Z neel $ 27219820Sjeff */ 28219820Sjeff 29219820Sjeff#ifndef _VMM_H_ 30219820Sjeff#define _VMM_H_ 31219820Sjeff 32219820Sjeff#ifdef _KERNEL 33219820Sjeff 34219820Sjeff#define VM_MAX_NAMELEN 32 35219820Sjeff 36219820Sjeffstruct vm; 37219820Sjeffstruct vm_memory_segment; 38219820Sjeffstruct seg_desc; 39219820Sjeffstruct vm_exit; 40219820Sjeffstruct vm_run; 41219820Sjeffstruct vlapic; 42219820Sjeffstruct vmspace; 43219820Sjeffstruct vm_object; 44219820Sjeffstruct pmap; 45219820Sjeff 46219820Sjeffenum x2apic_state; 47219820Sjeff 48219820Sjefftypedef int (*vmm_init_func_t)(void); 49219820Sjefftypedef int (*vmm_cleanup_func_t)(void); 50219820Sjefftypedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap); 51219820Sjefftypedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip, 52219820Sjeff struct pmap *pmap); 53219820Sjefftypedef void (*vmi_cleanup_func_t)(void *vmi); 54219820Sjefftypedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num, 55219820Sjeff uint64_t *retval); 56219820Sjefftypedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num, 57219820Sjeff uint64_t val); 58219820Sjefftypedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num, 59219820Sjeff struct seg_desc *desc); 60219820Sjefftypedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num, 61219820Sjeff struct seg_desc *desc); 62219820Sjefftypedef int (*vmi_inject_event_t)(void *vmi, int vcpu, 63219820Sjeff int type, int vector, 64219820Sjeff uint32_t code, int code_valid); 65219820Sjefftypedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval); 66219820Sjefftypedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val); 67219820Sjefftypedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max); 68219820Sjefftypedef void (*vmi_vmspace_free)(struct vmspace *vmspace); 69219820Sjeff 70219820Sjeffstruct vmm_ops { 71219820Sjeff vmm_init_func_t init; /* module wide initialization */ 72219820Sjeff vmm_cleanup_func_t cleanup; 73219820Sjeff 74219820Sjeff vmi_init_func_t vminit; /* vm-specific initialization */ 75219820Sjeff vmi_run_func_t vmrun; 76219820Sjeff vmi_cleanup_func_t vmcleanup; 77219820Sjeff vmi_get_register_t vmgetreg; 78219820Sjeff vmi_set_register_t vmsetreg; 79219820Sjeff vmi_get_desc_t vmgetdesc; 80219820Sjeff vmi_set_desc_t vmsetdesc; 81219820Sjeff vmi_inject_event_t vminject; 82219820Sjeff vmi_get_cap_t vmgetcap; 83219820Sjeff vmi_set_cap_t vmsetcap; 84219820Sjeff vmi_vmspace_alloc vmspace_alloc; 85219820Sjeff vmi_vmspace_free vmspace_free; 86219820Sjeff}; 87219820Sjeff 88219820Sjeffextern struct vmm_ops vmm_ops_intel; 89219820Sjeffextern struct vmm_ops vmm_ops_amd; 90219820Sjeff 91219820Sjeffint vm_create(const char *name, struct vm **retvm); 92219820Sjeffvoid vm_destroy(struct vm *vm); 93219820Sjeffconst char *vm_name(struct vm *vm); 94219820Sjeffint vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len); 95219820Sjeffint vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); 96219820Sjeffint vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); 97219820Sjeffvoid *vm_gpa_hold(struct vm *, vm_paddr_t gpa, size_t len, int prot, 98219820Sjeff void **cookie); 99219820Sjeffvoid vm_gpa_release(void *cookie); 100219820Sjeffint vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 101219820Sjeff struct vm_memory_segment *seg); 102int vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 103 vm_offset_t *offset, struct vm_object **object); 104boolean_t vm_mem_allocated(struct vm *vm, vm_paddr_t gpa); 105int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval); 106int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val); 107int vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 108 struct seg_desc *ret_desc); 109int vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 110 struct seg_desc *desc); 111int vm_run(struct vm *vm, struct vm_run *vmrun); 112int vm_inject_event(struct vm *vm, int vcpu, int type, 113 int vector, uint32_t error_code, int error_code_valid); 114int vm_inject_nmi(struct vm *vm, int vcpu); 115int vm_nmi_pending(struct vm *vm, int vcpuid); 116void vm_nmi_clear(struct vm *vm, int vcpuid); 117uint64_t *vm_guest_msrs(struct vm *vm, int cpu); 118struct vlapic *vm_lapic(struct vm *vm, int cpu); 119int vm_get_capability(struct vm *vm, int vcpu, int type, int *val); 120int vm_set_capability(struct vm *vm, int vcpu, int type, int val); 121int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state); 122int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state); 123void vm_activate_cpu(struct vm *vm, int vcpu); 124cpuset_t vm_active_cpus(struct vm *vm); 125struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid); 126 127/* 128 * Return 1 if device indicated by bus/slot/func is supposed to be a 129 * pci passthrough device. 130 * 131 * Return 0 otherwise. 132 */ 133int vmm_is_pptdev(int bus, int slot, int func); 134 135void *vm_iommu_domain(struct vm *vm); 136 137enum vcpu_state { 138 VCPU_IDLE, 139 VCPU_FROZEN, 140 VCPU_RUNNING, 141 VCPU_SLEEPING, 142}; 143 144int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state); 145enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); 146 147static int __inline 148vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) 149{ 150 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING); 151} 152 153void *vcpu_stats(struct vm *vm, int vcpu); 154void vm_interrupt_hostcpu(struct vm *vm, int vcpu); 155struct vmspace *vm_get_vmspace(struct vm *vm); 156int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func); 157int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func); 158#endif /* KERNEL */ 159 160#include <machine/vmm_instruction_emul.h> 161 162#define VM_MAXCPU 16 /* maximum virtual cpus */ 163 164/* 165 * Identifiers for events that can be injected into the VM 166 */ 167enum vm_event_type { 168 VM_EVENT_NONE, 169 VM_HW_INTR, 170 VM_NMI, 171 VM_HW_EXCEPTION, 172 VM_SW_INTR, 173 VM_PRIV_SW_EXCEPTION, 174 VM_SW_EXCEPTION, 175 VM_EVENT_MAX 176}; 177 178/* 179 * Identifiers for architecturally defined registers. 180 */ 181enum vm_reg_name { 182 VM_REG_GUEST_RAX, 183 VM_REG_GUEST_RBX, 184 VM_REG_GUEST_RCX, 185 VM_REG_GUEST_RDX, 186 VM_REG_GUEST_RSI, 187 VM_REG_GUEST_RDI, 188 VM_REG_GUEST_RBP, 189 VM_REG_GUEST_R8, 190 VM_REG_GUEST_R9, 191 VM_REG_GUEST_R10, 192 VM_REG_GUEST_R11, 193 VM_REG_GUEST_R12, 194 VM_REG_GUEST_R13, 195 VM_REG_GUEST_R14, 196 VM_REG_GUEST_R15, 197 VM_REG_GUEST_CR0, 198 VM_REG_GUEST_CR3, 199 VM_REG_GUEST_CR4, 200 VM_REG_GUEST_DR7, 201 VM_REG_GUEST_RSP, 202 VM_REG_GUEST_RIP, 203 VM_REG_GUEST_RFLAGS, 204 VM_REG_GUEST_ES, 205 VM_REG_GUEST_CS, 206 VM_REG_GUEST_SS, 207 VM_REG_GUEST_DS, 208 VM_REG_GUEST_FS, 209 VM_REG_GUEST_GS, 210 VM_REG_GUEST_LDTR, 211 VM_REG_GUEST_TR, 212 VM_REG_GUEST_IDTR, 213 VM_REG_GUEST_GDTR, 214 VM_REG_GUEST_EFER, 215 VM_REG_LAST 216}; 217 218/* 219 * Identifiers for optional vmm capabilities 220 */ 221enum vm_cap_type { 222 VM_CAP_HALT_EXIT, 223 VM_CAP_MTRAP_EXIT, 224 VM_CAP_PAUSE_EXIT, 225 VM_CAP_UNRESTRICTED_GUEST, 226 VM_CAP_MAX 227}; 228 229enum x2apic_state { 230 X2APIC_ENABLED, 231 X2APIC_AVAILABLE, 232 X2APIC_DISABLED, 233 X2APIC_STATE_LAST 234}; 235 236/* 237 * The 'access' field has the format specified in Table 21-2 of the Intel 238 * Architecture Manual vol 3b. 239 * 240 * XXX The contents of the 'access' field are architecturally defined except 241 * bit 16 - Segment Unusable. 242 */ 243struct seg_desc { 244 uint64_t base; 245 uint32_t limit; 246 uint32_t access; 247}; 248 249enum vm_exitcode { 250 VM_EXITCODE_INOUT, 251 VM_EXITCODE_VMX, 252 VM_EXITCODE_BOGUS, 253 VM_EXITCODE_RDMSR, 254 VM_EXITCODE_WRMSR, 255 VM_EXITCODE_HLT, 256 VM_EXITCODE_MTRAP, 257 VM_EXITCODE_PAUSE, 258 VM_EXITCODE_PAGING, 259 VM_EXITCODE_INST_EMUL, 260 VM_EXITCODE_SPINUP_AP, 261 VM_EXITCODE_MAX 262}; 263 264struct vm_exit { 265 enum vm_exitcode exitcode; 266 int inst_length; /* 0 means unknown */ 267 uint64_t rip; 268 union { 269 struct { 270 uint16_t bytes:3; /* 1 or 2 or 4 */ 271 uint16_t in:1; /* out is 0, in is 1 */ 272 uint16_t string:1; 273 uint16_t rep:1; 274 uint16_t port; 275 uint32_t eax; /* valid for out */ 276 } inout; 277 struct { 278 uint64_t gpa; 279 int fault_type; 280 int protection; 281 } paging; 282 struct { 283 uint64_t gpa; 284 uint64_t gla; 285 uint64_t cr3; 286 struct vie vie; 287 } inst_emul; 288 /* 289 * VMX specific payload. Used when there is no "better" 290 * exitcode to represent the VM-exit. 291 */ 292 struct { 293 int error; /* vmx inst error */ 294 uint32_t exit_reason; 295 uint64_t exit_qualification; 296 } vmx; 297 struct { 298 uint32_t code; /* ecx value */ 299 uint64_t wval; 300 } msr; 301 struct { 302 int vcpu; 303 uint64_t rip; 304 } spinup_ap; 305 } u; 306}; 307 308#endif /* _VMM_H_ */ 309