1/*- 2 * Copyright (c) 2005 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * The x86_64 callback routines were written and graciously submitted 33 * by Ville-Pertti Keinonen <will@exomi.com>. 34 * 35 * $FreeBSD: src/sys/compat/ndis/winx64_wrap.S,v 1.3.2.1 2005/02/18 16:30:09 wpaul Exp $ 36 */ 37 38#include <machine/asmacros.h> 39 40/* 41 * Wrapper for handling up to 16 arguments. We can't really 42 * know how many arguments the caller will pass us. I'm taking an 43 * educated guess that we'll never get over 16. Handling too 44 * few arguments is bad. Handling too many is inefficient, but 45 * not fatal. If someone can think of a way to handle an arbitrary 46 * number of arguments with more elegant code, freel free to let 47 * me know. 48 * 49 * Standard amd64 calling conventions specify the following registers 50 * to be used for passing the first 6 arguments: 51 * 52 * %rdi, %rsi, %rdx, %rcx, %r8, %r9 53 * 54 * Further arguments are passed on the stack (the 7th argument is 55 * located immediately after the return address). 56 * 57 * Windows x86_64 calling conventions only pass the first 4 58 * arguments in registers: 59 * 60 * %rcx, %rdx, %r8, %r9 61 * 62 * Even when arguments are passed in registers, the stack must have 63 * space reserved for those arguments. Thus the 5th argument (the 64 * first non-register argument) is placed 32 bytes after the return 65 * address. Additionally, %rdi and %rsi must be preserved. (These 66 * two registers are not scratch registers in the standard convention.) 67 * 68 * Note that in this template, we load a contrived 64 bit address into 69 * %r11 to represent our jump address. This is to guarantee that the 70 * assembler leaves enough room to patch in an absolute 64-bit address 71 * later. The idea behind this code is that we want to avoid having to 72 * manually create all the wrapper functions at compile time with 73 * a bunch of macros. This is doable, but a) messy and b) requires 74 * us to maintain two separate tables (one for the UNIX function 75 * pointers and another with the wrappers). This means I'd have to 76 * update two different tables each time I added a function. 77 * 78 * To avoid this, we create the wrappers at runtime instead. The 79 * image patch tables now contain two pointers: one two the normal 80 * routine, and a blank one for the wrapper. To construct a wrapper, 81 * we allocate some memory and copy the template function into it, 82 * then patch the function pointer for the routine we want to wrap 83 * into the newly created wrapper. The subr_pe module can then 84 * simply patch the wrapper routine into the jump table into the 85 * windows image. As a bonus, the wrapper pointer not only serves 86 * as the wrapper entry point address, it's also a data pointer 87 * that we can pass to free() later when we unload the module. 88 */ 89 90 .globl x86_64_wrap_call 91 .globl x86_64_wrap_end 92 93ENTRY(x86_64_wrap) 94 subq $96,%rsp # allocate space on stack 95 mov %rsi,96-8(%rsp) # save %rsi 96 mov %rdi,96-16(%rsp)# save %rdi 97 mov %rcx,%r10 # temporarily save %rcx in scratch 98 mov %rsp,%rsi 99 add $96+56,%rsi # source == old stack top (stack+56) 100 mov %rsp,%rdi # destination == new stack top 101 mov $10,%rcx # count == 10 quadwords 102 rep 103 movsq # copy old stack contents to new location 104 mov %r10,%rdi # set up arg0 (%rcx -> %rdi) 105 mov %rdx,%rsi # set up arg1 (%rdx -> %rsi) 106 mov %r8,%rdx # set up arg2 (%r8 -> %rdx) 107 mov %r9,%rcx # set up arg3 (%r9 -> %rcx) 108 mov 96+40(%rsp),%r8 # set up arg4 (stack+40 -> %r8) 109 mov 96+48(%rsp),%r9 # set up arg5 (stack+48 -> %r9) 110 xor %rax,%rax # clear return value 111x86_64_wrap_call: 112 mov $0xFF00FF00FF00FF00,%r11 113 callq *%r11 # call routine 114 mov 96-16(%rsp),%rdi# restore %rdi 115 mov 96-8(%rsp),%rsi # restore %rsi 116 addq $96,%rsp # delete space on stack 117 ret 118x86_64_wrap_end: 119 120/* 121 * Functions for invoking x86_64 callbacks. In each case, the first 122 * argument is a pointer to the function. 123 */ 124 125ENTRY(x86_64_call1) 126 subq $8,%rsp 127 mov %rsi,%rcx 128 call *%rdi 129 addq $8,%rsp 130 ret 131 132ENTRY(x86_64_call2) 133 subq $24,%rsp 134 mov %rsi,%rcx 135 /* %rdx is already correct */ 136 call *%rdi 137 addq $24,%rsp 138 ret 139 140ENTRY(x86_64_call3) 141 subq $24,%rsp 142 mov %rcx,%r8 143 mov %rsi,%rcx 144 call *%rdi 145 addq $24,%rsp 146 ret 147 148ENTRY(x86_64_call4) 149 subq $40,%rsp 150 mov %r8,%r9 151 mov %rcx,%r8 152 mov %rsi,%rcx 153 call *%rdi 154 addq $40,%rsp 155 ret 156 157ENTRY(x86_64_call5) 158 subq $40,%rsp 159 mov %r9,32(%rsp) 160 mov %r8,%r9 161 mov %rcx,%r8 162 mov %rsi,%rcx 163 call *%rdi 164 addq $40,%rsp 165 ret 166 167ENTRY(x86_64_call6) 168 subq $56,%rsp 169 mov 56+8(%rsp),%rax 170 mov %r9,32(%rsp) 171 mov %rax,40(%rsp) 172 mov %r8,%r9 173 mov %rcx,%r8 174 mov %rsi,%rcx 175 call *%rdi 176 addq $56,%rsp 177 ret 178