vmx_support.S revision 221914
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include <machine/asmacros.h> 30 31#include "vmx_assym.s" 32 33/* 34 * Assumes that %rdi holds a pointer to the 'vmxctx' 35 */ 36#define VMX_GUEST_RESTORE \ 37 /* \ 38 * Make sure that interrupts are disabled before restoring CR2. \ 39 * Otherwise there could be a page fault during the interrupt \ 40 * handler execution that would end up trashing CR2. \ 41 */ \ 42 cli; \ 43 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \ 44 movq %rsi,%cr2; \ 45 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \ 46 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \ 47 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \ 48 movq VMXCTX_GUEST_R8(%rdi),%r8; \ 49 movq VMXCTX_GUEST_R9(%rdi),%r9; \ 50 movq VMXCTX_GUEST_RAX(%rdi),%rax; \ 51 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \ 52 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \ 53 movq VMXCTX_GUEST_R10(%rdi),%r10; \ 54 movq VMXCTX_GUEST_R11(%rdi),%r11; \ 55 movq VMXCTX_GUEST_R12(%rdi),%r12; \ 56 movq VMXCTX_GUEST_R13(%rdi),%r13; \ 57 movq VMXCTX_GUEST_R14(%rdi),%r14; \ 58 movq VMXCTX_GUEST_R15(%rdi),%r15; \ 59 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */ 60 61#define VM_INSTRUCTION_ERROR(reg) \ 62 jnc 1f; \ 63 movl $VM_FAIL_INVALID,reg; /* CF is set */ \ 64 jmp 3f; \ 651: jnz 2f; \ 66 movl $VM_FAIL_VALID,reg; /* ZF is set */ \ 67 jmp 3f; \ 682: movl $VM_SUCCESS,reg; \ 693: movl reg,VMXCTX_LAUNCH_ERROR(%rsp) 70 71 .text 72/* 73 * int vmx_setjmp(ctxp) 74 * %rdi = ctxp 75 * 76 * Return value is '0' when it returns directly from here. 77 * Return value is '1' when it returns after a vm exit through vmx_longjmp. 78 */ 79ENTRY(vmx_setjmp) 80 movq (%rsp),%rax /* return address */ 81 movq %r15,VMXCTX_HOST_R15(%rdi) 82 movq %r14,VMXCTX_HOST_R14(%rdi) 83 movq %r13,VMXCTX_HOST_R13(%rdi) 84 movq %r12,VMXCTX_HOST_R12(%rdi) 85 movq %rbp,VMXCTX_HOST_RBP(%rdi) 86 movq %rsp,VMXCTX_HOST_RSP(%rdi) 87 movq %rbx,VMXCTX_HOST_RBX(%rdi) 88 movq %rax,VMXCTX_HOST_RIP(%rdi) 89 90 /* 91 * XXX save host debug registers 92 */ 93 movl $VMX_RETURN_DIRECT,%eax 94 ret 95END(vmx_setjmp) 96 97/* 98 * void vmx_return(struct vmxctx *ctxp, int retval) 99 * %rdi = ctxp 100 * %rsi = retval 101 * Return to vmm context through vmx_setjmp() with a value of 'retval'. 102 */ 103ENTRY(vmx_return) 104 /* Restore host context. */ 105 movq VMXCTX_HOST_R15(%rdi),%r15 106 movq VMXCTX_HOST_R14(%rdi),%r14 107 movq VMXCTX_HOST_R13(%rdi),%r13 108 movq VMXCTX_HOST_R12(%rdi),%r12 109 movq VMXCTX_HOST_RBP(%rdi),%rbp 110 movq VMXCTX_HOST_RSP(%rdi),%rsp 111 movq VMXCTX_HOST_RBX(%rdi),%rbx 112 movq VMXCTX_HOST_RIP(%rdi),%rax 113 movq %rax,(%rsp) /* return address */ 114 115 /* 116 * XXX restore host debug registers 117 */ 118 movl %esi,%eax 119 ret 120END(vmx_return) 121 122/* 123 * void vmx_longjmp(void) 124 * %rsp points to the struct vmxctx 125 */ 126ENTRY(vmx_longjmp) 127 /* 128 * Save guest state that is not automatically saved in the vmcs. 129 */ 130 movq %rdi,VMXCTX_GUEST_RDI(%rsp) 131 movq %rsi,VMXCTX_GUEST_RSI(%rsp) 132 movq %rdx,VMXCTX_GUEST_RDX(%rsp) 133 movq %rcx,VMXCTX_GUEST_RCX(%rsp) 134 movq %r8,VMXCTX_GUEST_R8(%rsp) 135 movq %r9,VMXCTX_GUEST_R9(%rsp) 136 movq %rax,VMXCTX_GUEST_RAX(%rsp) 137 movq %rbx,VMXCTX_GUEST_RBX(%rsp) 138 movq %rbp,VMXCTX_GUEST_RBP(%rsp) 139 movq %r10,VMXCTX_GUEST_R10(%rsp) 140 movq %r11,VMXCTX_GUEST_R11(%rsp) 141 movq %r12,VMXCTX_GUEST_R12(%rsp) 142 movq %r13,VMXCTX_GUEST_R13(%rsp) 143 movq %r14,VMXCTX_GUEST_R14(%rsp) 144 movq %r15,VMXCTX_GUEST_R15(%rsp) 145 146 movq %cr2,%rdi 147 movq %rdi,VMXCTX_GUEST_CR2(%rsp) 148 149 movq %rsp,%rdi 150 movq $VMX_RETURN_LONGJMP,%rsi 151 callq vmx_return 152END(vmx_longjmp) 153 154/* 155 * void vmx_resume(struct vmxctx *ctxp) 156 * %rdi = ctxp 157 * 158 * Although the return type is a 'void' this function may return indirectly 159 * through vmx_setjmp() with a return value of 2. 160 */ 161ENTRY(vmx_resume) 162 /* 163 * Restore guest state that is not automatically loaded from the vmcs. 164 */ 165 VMX_GUEST_RESTORE 166 167 vmresume 168 169 /* 170 * Capture the reason why vmresume failed. 171 */ 172 VM_INSTRUCTION_ERROR(%eax) 173 174 /* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */ 175 movq %rsp,%rdi 176 movq $VMX_RETURN_VMRESUME,%rsi 177 callq vmx_return 178END(vmx_resume) 179 180/* 181 * void vmx_launch(struct vmxctx *ctxp) 182 * %rdi = ctxp 183 * 184 * Although the return type is a 'void' this function may return indirectly 185 * through vmx_setjmp() with a return value of 3. 186 */ 187ENTRY(vmx_launch) 188 /* 189 * Restore guest state that is not automatically loaded from the vmcs. 190 */ 191 VMX_GUEST_RESTORE 192 193 vmlaunch 194 195 /* 196 * Capture the reason why vmlaunch failed. 197 */ 198 VM_INSTRUCTION_ERROR(%eax) 199 200 /* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */ 201 movq %rsp,%rdi 202 movq $VMX_RETURN_VMLAUNCH,%rsi 203 callq vmx_return 204END(vmx_launch) 205