152928Sjb/*	$NetBSD$	*/
252928Sjb
3161560Smarcel/*
4161560Smarcel * Function calling ABI conversion from Linux to EFI for x86_64
5107508Sobrien *
6107508Sobrien * Copyright (C) 2007 Intel Corp
7131832Sobrien *	Bibo Mao <bibo.mao@intel.com>
896783Sobrien *	Huang Ying <ying.huang@intel.com>
996783Sobrien * Copyright (C) 2012 Felipe Contreras <felipe.contreras@gmail.com>
1096783Sobrien */
1184902Sobrien
1284902Sobrien#if !defined(HAVE_USE_MS_ABI)
1384902Sobrien/*
1484902Sobrien * EFI calling conventions are documented at:
15131832Sobrien *   http://msdn.microsoft.com/en-us/library/ms235286%28v=vs.80%29.aspx
16131832Sobrien * ELF calling conventions are documented at:
17107508Sobrien *   http://www.x86-64.org/documentation/abi.pdf
1884902Sobrien *
1984902Sobrien * Basically here are the conversion rules:
20 * a) our function pointer is in %rdi
21 * b) rsi through r8 (elf) aka rcx through r9 (ms) require stack space
22 *    on the MS side even though it's not getting used at all.
23 * c) 8(%rsp) is always aligned to 16 in ELF, so %rsp is shifted 8 bytes extra
24 * d) arguments are as follows: (elf -> ms)
25 *   1) rdi -> rcx (32 saved)
26 *   2) rsi -> rdx (32 saved)
27 *   3) rdx -> r8 (32 saved)
28 *   4) rcx -> r9 (32 saved)
29 *   5) r8 -> 32(%rsp) (32 saved)
30 *   6) r9 -> 40(%rsp) (48 saved)
31 *   7) 8(%rsp) -> 48(%rsp) (48 saved)
32 *   8) 16(%rsp) -> 56(%rsp) (64 saved)
33 *   9) 24(%rsp) -> 64(%rsp) (64 saved)
34 *  10) 32(%rsp) -> 72(%rsp) (80 saved)
35 * e) because the first argument we recieve in a thunker is actually the
36 *    function to be called, arguments are offset as such:
37 *   0) rdi -> caller
38 *   1) rsi -> rcx (32 saved)
39 *   2) rdx -> rdx (32 saved)
40 *   3) rcx -> r8 (32 saved)
41 *   4) r8 -> r9 (32 saved)
42 *   5) r9 -> 32(%rsp) (32 saved)
43 *   6) 8(%rsp) -> 40(%rsp) (48 saved)
44 *   7) 16(%rsp) -> 48(%rsp) (48 saved)
45 *   8) 24(%rsp) -> 56(%rsp) (64 saved)
46 *   9) 32(%rsp) -> 64(%rsp) (64 saved)
47 *  10) 40(%rsp) -> 72(%rsp) (80 saved)
48 * f) arguments need to be moved in opposite order to avoid clobbering
49 */
50
51#define ENTRY(name)	\
52	.globl name;	\
53	name:
54
55ENTRY(efi_call0)
56	subq $40, %rsp
57	call *%rdi
58	addq $40, %rsp
59	ret
60
61ENTRY(efi_call1)
62	subq $40, %rsp
63	mov  %rsi, %rcx
64	call *%rdi
65	addq $40, %rsp
66	ret
67
68ENTRY(efi_call2)
69	subq $40, %rsp
70	/* mov %rdx, %rdx */
71	mov  %rsi, %rcx
72	call *%rdi
73	addq $40, %rsp
74	ret
75
76ENTRY(efi_call3)
77	subq $40, %rsp
78	mov  %rcx, %r8
79	/* mov %rdx, %rdx */
80	mov  %rsi, %rcx
81	call *%rdi
82	addq $40, %rsp
83	ret
84
85ENTRY(efi_call4)
86	subq $40, %rsp
87	mov %r8, %r9
88	mov %rcx, %r8
89	/* mov %rdx, %rdx */
90	mov %rsi, %rcx
91	call *%rdi
92	addq $40, %rsp
93	ret
94
95ENTRY(efi_call5)
96	subq $40, %rsp
97	mov %r9, 32(%rsp)
98	mov %r8, %r9
99	mov %rcx, %r8
100	/* mov %rdx, %rdx */
101	mov %rsi, %rcx
102	call *%rdi
103	addq $40, %rsp
104	ret
105
106ENTRY(efi_call6)
107	subq $56, %rsp
108	mov 56+8(%rsp), %rax
109	mov %rax, 40(%rsp)
110	mov %r9, 32(%rsp)
111	mov %r8, %r9
112	mov %rcx, %r8
113	/* mov %rdx, %rdx */
114	mov %rsi, %rcx
115	call *%rdi
116	addq $56, %rsp
117	ret
118
119ENTRY(efi_call7)
120	subq $56, %rsp
121	mov 56+16(%rsp), %rax
122	mov %rax, 48(%rsp)
123	mov 56+8(%rsp), %rax
124	mov %rax, 40(%rsp)
125	mov %r9, 32(%rsp)
126	mov %r8, %r9
127	mov %rcx, %r8
128	/* mov %rdx, %rdx */
129	mov %rsi, %rcx
130	call *%rdi
131	addq $56, %rsp
132	ret
133
134ENTRY(efi_call8)
135	subq $72, %rsp
136	mov 72+24(%rsp), %rax
137	mov %rax, 56(%rsp)
138	mov 72+16(%rsp), %rax
139	mov %rax, 48(%rsp)
140	mov 72+8(%rsp), %rax
141	mov %rax, 40(%rsp)
142	mov %r9, 32(%rsp)
143	mov %r8, %r9
144	mov %rcx, %r8
145	/* mov %rdx, %rdx */
146	mov %rsi, %rcx
147	call *%rdi
148	addq $72, %rsp
149	ret
150
151ENTRY(efi_call9)
152	subq $72, %rsp
153	mov 72+32(%rsp), %rax
154	mov %rax, 64(%rsp)
155	mov 72+24(%rsp), %rax
156	mov %rax, 56(%rsp)
157	mov 72+16(%rsp), %rax
158	mov %rax, 48(%rsp)
159	mov 72+8(%rsp), %rax
160	mov %rax, 40(%rsp)
161	mov %r9, 32(%rsp)
162	mov %r8, %r9
163	mov %rcx, %r8
164	/* mov %rdx, %rdx */
165	mov %rsi, %rcx
166	call *%rdi
167	addq $72, %rsp
168	ret
169
170ENTRY(efi_call10)
171	subq $88, %rsp
172	mov 88+40(%rsp), %rax
173	mov %rax, 72(%rsp)
174	mov 88+32(%rsp), %rax
175	mov %rax, 64(%rsp)
176	mov 88+24(%rsp), %rax
177	mov %rax, 56(%rsp)
178	mov 88+16(%rsp), %rax
179	mov %rax, 48(%rsp)
180	mov 88+8(%rsp), %rax
181	mov %rax, 40(%rsp)
182	mov %r9, 32(%rsp)
183	mov %r8, %r9
184	mov %rcx, %r8
185	/* mov %rdx, %rdx */
186	mov %rsi, %rcx
187	call *%rdi
188	addq $88, %rsp
189	ret
190
191#endif
192