vmm_instruction_emul.h revision 266595
1/*- 2 * Copyright (c) 2012 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/amd64/include/vmm_instruction_emul.h 266595 2014-05-23 19:59:14Z neel $ 27 */ 28 29#ifndef _VMM_INSTRUCTION_EMUL_H_ 30#define _VMM_INSTRUCTION_EMUL_H_ 31 32enum vm_reg_name; 33 34enum vie_cpu_mode { 35 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */ 36 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */ 37}; 38 39enum vie_paging_mode { 40 PAGING_MODE_FLAT, 41 PAGING_MODE_32, 42 PAGING_MODE_PAE, 43 PAGING_MODE_64, 44}; 45 46/* 47 * The data structures 'vie' and 'vie_op' are meant to be opaque to the 48 * consumers of instruction decoding. The only reason why their contents 49 * need to be exposed is because they are part of the 'vm_exit' structure. 50 */ 51struct vie_op { 52 uint8_t op_byte; /* actual opcode byte */ 53 uint8_t op_type; /* type of operation (e.g. MOV) */ 54 uint16_t op_flags; 55}; 56 57#define VIE_INST_SIZE 15 58struct vie { 59 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */ 60 uint8_t num_valid; /* size of the instruction */ 61 uint8_t num_processed; 62 63 uint8_t rex_w:1, /* REX prefix */ 64 rex_r:1, 65 rex_x:1, 66 rex_b:1, 67 rex_present:1; 68 69 uint8_t mod:2, /* ModRM byte */ 70 reg:4, 71 rm:4; 72 73 uint8_t ss:2, /* SIB byte */ 74 index:4, 75 base:4; 76 77 uint8_t disp_bytes; 78 uint8_t imm_bytes; 79 80 uint8_t scale; 81 int base_register; /* VM_REG_GUEST_xyz */ 82 int index_register; /* VM_REG_GUEST_xyz */ 83 84 int64_t displacement; /* optional addr displacement */ 85 int64_t immediate; /* optional immediate operand */ 86 87 uint8_t decoded; /* set to 1 if successfully decoded */ 88 89 struct vie_op op; /* opcode description */ 90}; 91 92/* 93 * Callback functions to read and write memory regions. 94 */ 95typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa, 96 uint64_t *rval, int rsize, void *arg); 97 98typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa, 99 uint64_t wval, int wsize, void *arg); 100 101/* 102 * Emulate the decoded 'vie' instruction. 103 * 104 * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region 105 * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the 106 * callback functions. 107 * 108 * 'void *vm' should be 'struct vm *' when called from kernel context and 109 * 'struct vmctx *' when called from user context. 110 * s 111 */ 112int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie, 113 mem_region_read_t mrr, mem_region_write_t mrw, 114 void *mrarg); 115 116int vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg, 117 uint64_t val, int size); 118 119/* 120 * Returns 1 if an alignment check exception should be injected and 0 otherwise. 121 */ 122int vie_alignment_check(int cpl, int operand_size, uint64_t cr0, 123 uint64_t rflags, uint64_t gla); 124 125uint64_t vie_size2mask(int size); 126 127#ifdef _KERNEL 128/* 129 * APIs to fetch and decode the instruction from nested page fault handler. 130 * 131 * 'vie' must be initialized before calling 'vmm_fetch_instruction()' 132 */ 133int vmm_fetch_instruction(struct vm *vm, int cpuid, 134 uint64_t rip, int inst_length, uint64_t cr3, 135 enum vie_paging_mode paging_mode, int cpl, 136 struct vie *vie); 137 138/* 139 * Translate the guest linear address 'gla' to a guest physical address. 140 * 141 * Returns 0 on success and '*gpa' contains the result of the translation. 142 * Returns 1 if a page fault exception was injected into the guest. 143 * Returns -1 otherwise. 144 */ 145int vmm_gla2gpa(struct vm *vm, int vcpuid, uint64_t gla, uint64_t cr3, 146 uint64_t *gpa, enum vie_paging_mode paging_mode, int cpl, int prot); 147 148void vie_init(struct vie *vie); 149 150uint64_t vie_segbase(enum vm_reg_name segment, enum vie_cpu_mode cpu_mode, 151 const struct seg_desc *desc); 152 153/* 154 * Decode the instruction fetched into 'vie' so it can be emulated. 155 * 156 * 'gla' is the guest linear address provided by the hardware assist 157 * that caused the nested page table fault. It is used to verify that 158 * the software instruction decoding is in agreement with the hardware. 159 * 160 * Some hardware assists do not provide the 'gla' to the hypervisor. 161 * To skip the 'gla' verification for this or any other reason pass 162 * in VIE_INVALID_GLA instead. 163 */ 164#define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */ 165int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, 166 enum vie_cpu_mode cpu_mode, struct vie *vie); 167#endif /* _KERNEL */ 168 169#endif /* _VMM_INSTRUCTION_EMUL_H_ */ 170