vmm.h revision 282287
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/include/vmm.h 282287 2015-04-30 22:23:22Z neel $ 27 */ 28 29#ifndef _VMM_H_ 30#define _VMM_H_ 31 32#include <x86/segments.h> 33 34enum vm_suspend_how { 35 VM_SUSPEND_NONE, 36 VM_SUSPEND_RESET, 37 VM_SUSPEND_POWEROFF, 38 VM_SUSPEND_HALT, 39 VM_SUSPEND_TRIPLEFAULT, 40 VM_SUSPEND_LAST 41}; 42 43/* 44 * Identifiers for architecturally defined registers. 45 */ 46enum vm_reg_name { 47 VM_REG_GUEST_RAX, 48 VM_REG_GUEST_RBX, 49 VM_REG_GUEST_RCX, 50 VM_REG_GUEST_RDX, 51 VM_REG_GUEST_RSI, 52 VM_REG_GUEST_RDI, 53 VM_REG_GUEST_RBP, 54 VM_REG_GUEST_R8, 55 VM_REG_GUEST_R9, 56 VM_REG_GUEST_R10, 57 VM_REG_GUEST_R11, 58 VM_REG_GUEST_R12, 59 VM_REG_GUEST_R13, 60 VM_REG_GUEST_R14, 61 VM_REG_GUEST_R15, 62 VM_REG_GUEST_CR0, 63 VM_REG_GUEST_CR3, 64 VM_REG_GUEST_CR4, 65 VM_REG_GUEST_DR7, 66 VM_REG_GUEST_RSP, 67 VM_REG_GUEST_RIP, 68 VM_REG_GUEST_RFLAGS, 69 VM_REG_GUEST_ES, 70 VM_REG_GUEST_CS, 71 VM_REG_GUEST_SS, 72 VM_REG_GUEST_DS, 73 VM_REG_GUEST_FS, 74 VM_REG_GUEST_GS, 75 VM_REG_GUEST_LDTR, 76 VM_REG_GUEST_TR, 77 VM_REG_GUEST_IDTR, 78 VM_REG_GUEST_GDTR, 79 VM_REG_GUEST_EFER, 80 VM_REG_GUEST_CR2, 81 VM_REG_GUEST_PDPTE0, 82 VM_REG_GUEST_PDPTE1, 83 VM_REG_GUEST_PDPTE2, 84 VM_REG_GUEST_PDPTE3, 85 VM_REG_GUEST_INTR_SHADOW, 86 VM_REG_LAST 87}; 88 89enum x2apic_state { 90 X2APIC_DISABLED, 91 X2APIC_ENABLED, 92 X2APIC_STATE_LAST 93}; 94 95#define VM_INTINFO_VECTOR(info) ((info) & 0xff) 96#define VM_INTINFO_DEL_ERRCODE 0x800 97#define VM_INTINFO_RSVD 0x7ffff000 98#define VM_INTINFO_VALID 0x80000000 99#define VM_INTINFO_TYPE 0x700 100#define VM_INTINFO_HWINTR (0 << 8) 101#define VM_INTINFO_NMI (2 << 8) 102#define VM_INTINFO_HWEXCEPTION (3 << 8) 103#define VM_INTINFO_SWINTR (4 << 8) 104 105#ifdef _KERNEL 106 107#define VM_MAX_NAMELEN 32 108 109struct vm; 110struct vm_exception; 111struct vm_memory_segment; 112struct seg_desc; 113struct vm_exit; 114struct vm_run; 115struct vhpet; 116struct vioapic; 117struct vlapic; 118struct vmspace; 119struct vm_object; 120struct vm_guest_paging; 121struct pmap; 122 123typedef int (*vmm_init_func_t)(int ipinum); 124typedef int (*vmm_cleanup_func_t)(void); 125typedef void (*vmm_resume_func_t)(void); 126typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap); 127typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip, 128 struct pmap *pmap, void *rendezvous_cookie, 129 void *suspend_cookie); 130typedef void (*vmi_cleanup_func_t)(void *vmi); 131typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num, 132 uint64_t *retval); 133typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num, 134 uint64_t val); 135typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num, 136 struct seg_desc *desc); 137typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num, 138 struct seg_desc *desc); 139typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval); 140typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val); 141typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max); 142typedef void (*vmi_vmspace_free)(struct vmspace *vmspace); 143typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu); 144typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic); 145 146struct vmm_ops { 147 vmm_init_func_t init; /* module wide initialization */ 148 vmm_cleanup_func_t cleanup; 149 vmm_resume_func_t resume; 150 151 vmi_init_func_t vminit; /* vm-specific initialization */ 152 vmi_run_func_t vmrun; 153 vmi_cleanup_func_t vmcleanup; 154 vmi_get_register_t vmgetreg; 155 vmi_set_register_t vmsetreg; 156 vmi_get_desc_t vmgetdesc; 157 vmi_set_desc_t vmsetdesc; 158 vmi_get_cap_t vmgetcap; 159 vmi_set_cap_t vmsetcap; 160 vmi_vmspace_alloc vmspace_alloc; 161 vmi_vmspace_free vmspace_free; 162 vmi_vlapic_init vlapic_init; 163 vmi_vlapic_cleanup vlapic_cleanup; 164}; 165 166extern struct vmm_ops vmm_ops_intel; 167extern struct vmm_ops vmm_ops_amd; 168 169int vm_create(const char *name, struct vm **retvm); 170void vm_destroy(struct vm *vm); 171int vm_reinit(struct vm *vm); 172const char *vm_name(struct vm *vm); 173int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len); 174int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa); 175int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len); 176void *vm_gpa_hold(struct vm *, vm_paddr_t gpa, size_t len, int prot, 177 void **cookie); 178void vm_gpa_release(void *cookie); 179int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 180 struct vm_memory_segment *seg); 181int vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 182 vm_offset_t *offset, struct vm_object **object); 183boolean_t vm_mem_allocated(struct vm *vm, vm_paddr_t gpa); 184int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval); 185int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val); 186int vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 187 struct seg_desc *ret_desc); 188int vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 189 struct seg_desc *desc); 190int vm_run(struct vm *vm, struct vm_run *vmrun); 191int vm_suspend(struct vm *vm, enum vm_suspend_how how); 192int vm_inject_nmi(struct vm *vm, int vcpu); 193int vm_nmi_pending(struct vm *vm, int vcpuid); 194void vm_nmi_clear(struct vm *vm, int vcpuid); 195int vm_inject_extint(struct vm *vm, int vcpu); 196int vm_extint_pending(struct vm *vm, int vcpuid); 197void vm_extint_clear(struct vm *vm, int vcpuid); 198struct vlapic *vm_lapic(struct vm *vm, int cpu); 199struct vioapic *vm_ioapic(struct vm *vm); 200struct vhpet *vm_hpet(struct vm *vm); 201int vm_get_capability(struct vm *vm, int vcpu, int type, int *val); 202int vm_set_capability(struct vm *vm, int vcpu, int type, int val); 203int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state); 204int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state); 205int vm_apicid2vcpuid(struct vm *vm, int apicid); 206int vm_activate_cpu(struct vm *vm, int vcpu); 207struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid); 208void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip); 209void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip); 210void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip); 211 212#ifdef _SYS__CPUSET_H_ 213/* 214 * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'. 215 * The rendezvous 'func(arg)' is not allowed to do anything that will 216 * cause the thread to be put to sleep. 217 * 218 * If the rendezvous is being initiated from a vcpu context then the 219 * 'vcpuid' must refer to that vcpu, otherwise it should be set to -1. 220 * 221 * The caller cannot hold any locks when initiating the rendezvous. 222 * 223 * The implementation of this API may cause vcpus other than those specified 224 * by 'dest' to be stalled. The caller should not rely on any vcpus making 225 * forward progress when the rendezvous is in progress. 226 */ 227typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg); 228void vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 229 vm_rendezvous_func_t func, void *arg); 230cpuset_t vm_active_cpus(struct vm *vm); 231cpuset_t vm_suspended_cpus(struct vm *vm); 232#endif /* _SYS__CPUSET_H_ */ 233 234static __inline int 235vcpu_rendezvous_pending(void *rendezvous_cookie) 236{ 237 238 return (*(uintptr_t *)rendezvous_cookie != 0); 239} 240 241static __inline int 242vcpu_suspended(void *suspend_cookie) 243{ 244 245 return (*(int *)suspend_cookie); 246} 247 248/* 249 * Return 1 if device indicated by bus/slot/func is supposed to be a 250 * pci passthrough device. 251 * 252 * Return 0 otherwise. 253 */ 254int vmm_is_pptdev(int bus, int slot, int func); 255 256void *vm_iommu_domain(struct vm *vm); 257 258enum vcpu_state { 259 VCPU_IDLE, 260 VCPU_FROZEN, 261 VCPU_RUNNING, 262 VCPU_SLEEPING, 263}; 264 265int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state, 266 bool from_idle); 267enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu); 268 269static int __inline 270vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu) 271{ 272 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING); 273} 274 275#ifdef _SYS_PROC_H_ 276static int __inline 277vcpu_should_yield(struct vm *vm, int vcpu) 278{ 279 return (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)); 280} 281#endif 282 283void *vcpu_stats(struct vm *vm, int vcpu); 284void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr); 285struct vmspace *vm_get_vmspace(struct vm *vm); 286int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func); 287int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func); 288struct vatpic *vm_atpic(struct vm *vm); 289struct vatpit *vm_atpit(struct vm *vm); 290struct vpmtmr *vm_pmtmr(struct vm *vm); 291struct vrtc *vm_rtc(struct vm *vm); 292 293/* 294 * Inject exception 'vector' into the guest vcpu. This function returns 0 on 295 * success and non-zero on failure. 296 * 297 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling 298 * this function directly because they enforce the trap-like or fault-like 299 * behavior of an exception. 300 * 301 * This function should only be called in the context of the thread that is 302 * executing this vcpu. 303 */ 304int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid, 305 uint32_t errcode, int restart_instruction); 306 307/* 308 * This function is called after a VM-exit that occurred during exception or 309 * interrupt delivery through the IDT. The format of 'intinfo' is described 310 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2. 311 * 312 * If a VM-exit handler completes the event delivery successfully then it 313 * should call vm_exit_intinfo() to extinguish the pending event. For e.g., 314 * if the task switch emulation is triggered via a task gate then it should 315 * call this function with 'intinfo=0' to indicate that the external event 316 * is not pending anymore. 317 * 318 * Return value is 0 on success and non-zero on failure. 319 */ 320int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo); 321 322/* 323 * This function is called before every VM-entry to retrieve a pending 324 * event that should be injected into the guest. This function combines 325 * nested events into a double or triple fault. 326 * 327 * Returns 0 if there are no events that need to be injected into the guest 328 * and non-zero otherwise. 329 */ 330int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info); 331 332int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2); 333 334enum vm_reg_name vm_segment_name(int seg_encoding); 335 336struct vm_copyinfo { 337 uint64_t gpa; 338 size_t len; 339 void *hva; 340 void *cookie; 341}; 342 343/* 344 * Set up 'copyinfo[]' to copy to/from guest linear address space starting 345 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for 346 * a copyin or PROT_WRITE for a copyout. 347 * 348 * Returns 0 on success. 349 * Returns 1 if an exception was injected into the guest. 350 * Returns -1 otherwise. 351 * 352 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if 353 * the return value is 0. The 'copyinfo[]' resources should be freed by calling 354 * 'vm_copy_teardown()' after the copy is done. 355 */ 356int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 357 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 358 int num_copyinfo); 359void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 360 int num_copyinfo); 361void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 362 void *kaddr, size_t len); 363void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 364 struct vm_copyinfo *copyinfo, size_t len); 365 366int vcpu_trace_exceptions(struct vm *vm, int vcpuid); 367#endif /* KERNEL */ 368 369#define VM_MAXCPU 16 /* maximum virtual cpus */ 370 371/* 372 * Identifiers for optional vmm capabilities 373 */ 374enum vm_cap_type { 375 VM_CAP_HALT_EXIT, 376 VM_CAP_MTRAP_EXIT, 377 VM_CAP_PAUSE_EXIT, 378 VM_CAP_UNRESTRICTED_GUEST, 379 VM_CAP_ENABLE_INVPCID, 380 VM_CAP_MAX 381}; 382 383enum vm_intr_trigger { 384 EDGE_TRIGGER, 385 LEVEL_TRIGGER 386}; 387 388/* 389 * The 'access' field has the format specified in Table 21-2 of the Intel 390 * Architecture Manual vol 3b. 391 * 392 * XXX The contents of the 'access' field are architecturally defined except 393 * bit 16 - Segment Unusable. 394 */ 395struct seg_desc { 396 uint64_t base; 397 uint32_t limit; 398 uint32_t access; 399}; 400#define SEG_DESC_TYPE(access) ((access) & 0x001f) 401#define SEG_DESC_DPL(access) (((access) >> 5) & 0x3) 402#define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0) 403#define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0) 404#define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0) 405#define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0) 406 407enum vm_cpu_mode { 408 CPU_MODE_REAL, 409 CPU_MODE_PROTECTED, 410 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ 411 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ 412}; 413 414enum vm_paging_mode { 415 PAGING_MODE_FLAT, 416 PAGING_MODE_32, 417 PAGING_MODE_PAE, 418 PAGING_MODE_64, 419}; 420 421struct vm_guest_paging { 422 uint64_t cr3; 423 int cpl; 424 enum vm_cpu_mode cpu_mode; 425 enum vm_paging_mode paging_mode; 426}; 427 428/* 429 * The data structures 'vie' and 'vie_op' are meant to be opaque to the 430 * consumers of instruction decoding. The only reason why their contents 431 * need to be exposed is because they are part of the 'vm_exit' structure. 432 */ 433struct vie_op { 434 uint8_t op_byte; /* actual opcode byte */ 435 uint8_t op_type; /* type of operation (e.g. MOV) */ 436 uint16_t op_flags; 437}; 438 439#define VIE_INST_SIZE 15 440struct vie { 441 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */ 442 uint8_t num_valid; /* size of the instruction */ 443 uint8_t num_processed; 444 445 uint8_t addrsize:4, opsize:4; /* address and operand sizes */ 446 uint8_t rex_w:1, /* REX prefix */ 447 rex_r:1, 448 rex_x:1, 449 rex_b:1, 450 rex_present:1, 451 repz_present:1, /* REP/REPE/REPZ prefix */ 452 repnz_present:1, /* REPNE/REPNZ prefix */ 453 opsize_override:1, /* Operand size override */ 454 addrsize_override:1, /* Address size override */ 455 segment_override:1; /* Segment override */ 456 457 uint8_t mod:2, /* ModRM byte */ 458 reg:4, 459 rm:4; 460 461 uint8_t ss:2, /* SIB byte */ 462 index:4, 463 base:4; 464 465 uint8_t disp_bytes; 466 uint8_t imm_bytes; 467 468 uint8_t scale; 469 int base_register; /* VM_REG_GUEST_xyz */ 470 int index_register; /* VM_REG_GUEST_xyz */ 471 int segment_register; /* VM_REG_GUEST_xyz */ 472 473 int64_t displacement; /* optional addr displacement */ 474 int64_t immediate; /* optional immediate operand */ 475 476 uint8_t decoded; /* set to 1 if successfully decoded */ 477 478 struct vie_op op; /* opcode description */ 479}; 480 481enum vm_exitcode { 482 VM_EXITCODE_INOUT, 483 VM_EXITCODE_VMX, 484 VM_EXITCODE_BOGUS, 485 VM_EXITCODE_RDMSR, 486 VM_EXITCODE_WRMSR, 487 VM_EXITCODE_HLT, 488 VM_EXITCODE_MTRAP, 489 VM_EXITCODE_PAUSE, 490 VM_EXITCODE_PAGING, 491 VM_EXITCODE_INST_EMUL, 492 VM_EXITCODE_SPINUP_AP, 493 VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */ 494 VM_EXITCODE_RENDEZVOUS, 495 VM_EXITCODE_IOAPIC_EOI, 496 VM_EXITCODE_SUSPENDED, 497 VM_EXITCODE_INOUT_STR, 498 VM_EXITCODE_TASK_SWITCH, 499 VM_EXITCODE_MONITOR, 500 VM_EXITCODE_MWAIT, 501 VM_EXITCODE_SVM, 502 VM_EXITCODE_MAX 503}; 504 505struct vm_inout { 506 uint16_t bytes:3; /* 1 or 2 or 4 */ 507 uint16_t in:1; 508 uint16_t string:1; 509 uint16_t rep:1; 510 uint16_t port; 511 uint32_t eax; /* valid for out */ 512}; 513 514struct vm_inout_str { 515 struct vm_inout inout; /* must be the first element */ 516 struct vm_guest_paging paging; 517 uint64_t rflags; 518 uint64_t cr0; 519 uint64_t index; 520 uint64_t count; /* rep=1 (%rcx), rep=0 (1) */ 521 int addrsize; 522 enum vm_reg_name seg_name; 523 struct seg_desc seg_desc; 524}; 525 526enum task_switch_reason { 527 TSR_CALL, 528 TSR_IRET, 529 TSR_JMP, 530 TSR_IDT_GATE, /* task gate in IDT */ 531}; 532 533struct vm_task_switch { 534 uint16_t tsssel; /* new TSS selector */ 535 int ext; /* task switch due to external event */ 536 uint32_t errcode; 537 int errcode_valid; /* push 'errcode' on the new stack */ 538 enum task_switch_reason reason; 539 struct vm_guest_paging paging; 540}; 541 542struct vm_exit { 543 enum vm_exitcode exitcode; 544 int inst_length; /* 0 means unknown */ 545 uint64_t rip; 546 union { 547 struct vm_inout inout; 548 struct vm_inout_str inout_str; 549 struct { 550 uint64_t gpa; 551 int fault_type; 552 } paging; 553 struct { 554 uint64_t gpa; 555 uint64_t gla; 556 uint64_t cs_base; 557 int cs_d; /* CS.D */ 558 struct vm_guest_paging paging; 559 struct vie vie; 560 } inst_emul; 561 /* 562 * VMX specific payload. Used when there is no "better" 563 * exitcode to represent the VM-exit. 564 */ 565 struct { 566 int status; /* vmx inst status */ 567 /* 568 * 'exit_reason' and 'exit_qualification' are valid 569 * only if 'status' is zero. 570 */ 571 uint32_t exit_reason; 572 uint64_t exit_qualification; 573 /* 574 * 'inst_error' and 'inst_type' are valid 575 * only if 'status' is non-zero. 576 */ 577 int inst_type; 578 int inst_error; 579 } vmx; 580 /* 581 * SVM specific payload. 582 */ 583 struct { 584 uint64_t exitcode; 585 uint64_t exitinfo1; 586 uint64_t exitinfo2; 587 } svm; 588 struct { 589 uint32_t code; /* ecx value */ 590 uint64_t wval; 591 } msr; 592 struct { 593 int vcpu; 594 uint64_t rip; 595 } spinup_ap; 596 struct { 597 uint64_t rflags; 598 } hlt; 599 struct { 600 int vector; 601 } ioapic_eoi; 602 struct { 603 enum vm_suspend_how how; 604 } suspended; 605 struct vm_task_switch task_switch; 606 } u; 607}; 608 609/* APIs to inject faults into the guest */ 610void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid, 611 int errcode); 612 613static __inline void 614vm_inject_ud(void *vm, int vcpuid) 615{ 616 vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0); 617} 618 619static __inline void 620vm_inject_gp(void *vm, int vcpuid) 621{ 622 vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0); 623} 624 625static __inline void 626vm_inject_ac(void *vm, int vcpuid, int errcode) 627{ 628 vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode); 629} 630 631static __inline void 632vm_inject_ss(void *vm, int vcpuid, int errcode) 633{ 634 vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode); 635} 636 637void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2); 638 639int vm_restart_instruction(void *vm, int vcpuid); 640 641#endif /* _VMM_H_ */ 642