1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2016 Travis Geiselbrecht
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <asm.h>
9#include <arch/x86/mp.h>
10#include <zircon/zx-syscall-numbers.h>
11
12#define DW_REG_rsp        0x7
13#define DW_REG_rip        0x10
14
15//
16// Macros for preparing ABI conformant calls for syscall wrappers.
17//
18// syscall_8(arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, arg_8, rip)
19//
20// arg_1 from rdi to rdi
21// arg_2 from rsi to rsi
22// arg_3 from rdx to rdx
23// arg_4 from r10 to rcx
24// arg_5 from r8  to r8
25// arg_6 from r9  to r9
26// arg_7 from r12 to (%rsp)
27// arg_8 from r13 to 8(%rsp)
28// rip   from rcx to 16(%rsp)
29//
30.macro pre_8_args
31    pre_push 3
32    push_value %rcx
33    push_value %r13
34    push_value %r12
35
36    /* move arg 4 into the proper register for calling convention */
37    mov      %r10, %rcx
38.endm
39
40.macro post_8_args
41    post_pop 3
42    jmp     .Lcleanup_and_return
43.endm
44
45//
46// syscall_7(arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, arg_7, rip)
47//
48// arg_1 from rdi to rdi
49// arg_2 from rsi to rsi
50// arg_3 from rdx to rdx
51// arg_4 from r10 to rcx
52// arg_5 from r8  to r8
53// arg_6 from r9  to r9
54// arg_7 from r12 to (rsp)
55// rip   from rcx to 8(rsp)
56//
57.macro pre_7_args
58    pre_push 2
59    push_value %rcx
60    push_value %r12
61    mov      %r10, %rcx
62.endm
63
64.macro post_7_args
65    post_pop 2
66    jmp     .Lcleanup_and_return
67.endm
68
69//
70// syscall_6(arg_1, arg_2, arg_3, arg_4, arg_5, arg_6, rip)
71//
72// arg_1 from rdi to rdi
73// arg_2 from rsi to rsi
74// arg_3 from rdx to rdx
75// arg_4 from r10 to rcx
76// arg_5 from r8  to r8
77// arg_6 from r9  to r9
78// rip   from rcx to (rsp)
79//
80.macro pre_6_args
81    pre_push 1
82    push_value %rcx
83    mov      %r10, %rcx
84.endm
85
86.macro post_6_args
87    post_pop 1
88    jmp     .Lcleanup_and_return
89.endm
90
91//
92// syscall_5(arg_1, arg_2, arg_3, arg_4, arg_5, rip)
93//
94// arg_1 from rdi to rdi
95// arg_2 from rsi to rsi
96// arg_3 from rdx to rdx
97// arg_4 from r10 to rcx
98// arg_5 from r8  to r8
99// rip   from rcx to r9
100//
101.macro pre_5_args
102    pre_push 0
103    mov     %rcx, %r9
104    mov     %r10, %rcx
105.endm
106
107.macro post_5_args
108    post_pop 0
109    jmp     .Lcleanup_and_return
110.endm
111
112//
113// syscall_4(arg_1, arg_2, arg_3, arg_4, rip)
114//
115// arg_1 from rdi to rdi
116// arg_2 from rsi to rsi
117// arg_3 from rdx to rdx
118// arg_4 from r10 to rcx
119// rip   from rcx to r8
120//
121.macro pre_4_args
122    pre_push 0
123    mov     %rcx, %r8
124    mov     %r10, %rcx
125.endm
126
127.macro post_4_args
128    post_pop 0
129    jmp     .Lcleanup_and_return
130.endm
131
132//
133// syscall_3(arg_1, arg_2, arg_3, rip)
134//
135// arg_1 from rdi to rdi
136// arg_2 from rsi to rsi
137// arg_3 from rdx to rdx
138// rip   from rcx to rcx
139//
140.macro pre_3_args
141    pre_push 0
142.endm
143
144.macro post_3_args
145    post_pop 0
146    jmp     .Lcleanup_and_return
147.endm
148
149//
150// syscall_2(arg_1, arg_2, rip)
151//
152// arg_1 from rdi to rdi
153// arg_2 from rsi to rsi
154// rip   from rcx to rdx
155//
156.macro pre_2_args
157    pre_push 0
158    mov     %rcx, %rdx
159.endm
160
161.macro post_2_args
162    post_pop 0
163    jmp     .Lcleanup_and_return
164.endm
165
166//
167// syscall_1(arg_1, rip)
168//
169// arg_1 from rdi to rdi
170// rip   from rcx to rsi
171//
172.macro pre_1_args
173    pre_push 0
174    mov    %rcx, %rsi
175.endm
176
177.macro post_1_args
178    post_pop 0
179    jmp     .Lcleanup_and_return
180.endm
181
182
183//
184// syscall_0(rip)
185//
186// rip   from rcx to rdi
187//
188.macro pre_0_args
189    pre_push 0
190    mov    %rcx, %rdi
191.endm
192
193.macro post_0_args
194    post_pop 0
195    jmp     .Lcleanup_and_return
196.endm
197
198// x86_syscall (below) leaves the stack misaligned by 8, so the macros
199// need to account for that.
200.macro pre_push n
201.if \n % 2 == 0
202    push_value $0
203.endif
204.endm
205
206.macro post_pop n
207.if \n % 2 == 0
208    add_to_sp ((\n + 1) * 8)
209.else
210    add_to_sp (\n * 8)
211.endif
212.endm
213
214.macro cfi_outermost_frame
215    // TODO(dje): IWBN to use .cfi_undefined here, but gdb didn't properly
216    // handle initial attempts. Need to try again (or file gdb bug).
217    cfi_register_is_zero DW_REG_rsp
218    cfi_register_is_zero DW_REG_rip
219.endm
220
221// Adds a label for making the syscall and adds it to the jump table.
222.macro syscall_dispatch nargs, syscall
223    .pushsection .text.syscall-dispatch,"ax",%progbits
224    LOCAL_FUNCTION(.Lcall_\syscall\())
225        // See x86_syscall for why this is here.
226        cfi_outermost_frame
227        pre_\nargs\()_args
228        call wrapper_\syscall
229        post_\nargs\()_args
230    END_FUNCTION(.Lcall_\syscall\())
231    .popsection
232    .pushsection .rodata.syscall-table,"a",%progbits
233        .quad .Lcall_\syscall
234    .popsection
235.endm
236
237// Adds the label for the jump table.
238.macro start_syscall_dispatch
239    .pushsection .rodata.syscall-table,"a",%progbits
240    .balign 8
241    .Lcall_wrapper_table:
242    .popsection
243.endm
244
245.text
246
247    /* kernel side of the SYSCALL instruction
248     * state on entry:
249     * RCX holds user RIP
250     * R11 holds user RFLAGS
251     * RSP still holds user stack
252     * CS loaded with kernel CS from IA32_STAR
253     * SS loaded with kernel CS + 8 from IA32_STAR
254
255     * args passed:
256     *  rax - syscall # and return
257     *  rbx - saved
258     *  rcx - modified as part of syscall instruction
259     *  rdx - arg 3
260     *  rdi - arg 1
261     *  rsi - arg 2
262     *  rbp - saved
263     *  rsp - saved
264     *  r8  - arg 5
265     *  r9  - arg 6
266     *  r10 - arg 4
267     *  r11 - modified as part of syscall instruction
268     *  r12 - arg 7
269     *  r13 - arg 8
270     *  r14 - saved
271     *  r15 - saved
272     */
273FUNCTION_LABEL(x86_syscall)
274    .cfi_startproc simple
275    // CFI tracking here doesn't (currently) try to support backtracing from
276    // kernel space to user space. This is left for later. For now just say
277    // %rsp and %rip of the previous frame are zero, mark all the other
278    // registers as undefined, and have all register push/pop just specify
279    // stack adjustments and not how to find the register's value.
280    cfi_outermost_frame
281    // The default for caller-saved regs is "undefined", but for completeness
282    // sake mark them all as undefined.
283    ALL_CFI_UNDEFINED
284
285    /* swap to the kernel GS register */
286    swapgs
287
288    /* save the user stack pointer */
289    mov     %rsp, %gs:PERCPU_SAVED_USER_SP_OFFSET
290
291    /* load the kernel stack pointer */
292    mov     %gs:PERCPU_KERNEL_SP_OFFSET, %rsp
293    .cfi_def_cfa %rsp, 0
294
295    /* save away the user stack pointer */
296    push_value %gs:PERCPU_SAVED_USER_SP_OFFSET
297
298    push_value %r11 /* user RFLAGS */
299    push_value %rcx /* user RIP */
300
301    // Any changes to the stack here need to be reflected in
302    // pre_push and post_pop macros above to maintain alignment.
303    // Verify the syscall is in range and jump to it.
304    cmp     $ZX_SYS_COUNT, %rax
305    jae     .Lunknown_syscall
306    leaq    .Lcall_wrapper_table(%rip), %r11
307    jmp     *(%r11, %rax, 8)
308.Lunknown_syscall:
309    pre_0_args
310    call    unknown_syscall
311    post_0_args
312
313.Lcleanup_and_return:
314
315    /* at this point:
316       rax = syscall result
317       rdx = non-zero if thread was signaled */
318
319    /* restore the registers from which SYSRET restores user state */
320    pop_value %rcx /* user RIP */
321    pop_value %r11 /* user RFLAGS */
322
323    /* zero out trashed arg registers */
324    xorl    %edi, %edi
325    xorl    %esi, %esi
326    /* Don't zero %rdx yet -- it contains the "is_signaled" indicator */
327    xorl    %r10d, %r10d
328    xorl    %r8d, %r8d
329    xorl    %r9d, %r9d
330
331    cmp     $0, %rdx
332    jnz     .Lthread_signaled
333
334    /*xor     %rdx, %rdx - already zero */
335
336.Lreturn_from_syscall:
337
338    /* make sure interrupts are disabled (they already are in the fall-through
339       path, but if we took the .Lthread_signaled path they aren't) */
340    cli
341
342    /* restore the user stack */
343    pop_value %rsp
344
345    /* put the user gs back */
346    swapgs
347
348    /* This will fault if the return address is non-canonical.  See
349     * docs/sysret_problem.md for how we avoid that. */
350    sysretq
351
352.Lthread_signaled:
353    /* re-enable interrupts to maintain kernel preemptiveness */
354    sti
355
356    /* fill in x86_syscall_general_regs_t
357       Because we don't save the regs unless we have to a lot of the original
358       values are gone. The user just has to deal with it. One important thing
359       to do here is not leak kernel values to userspace. */
360    movq    (%rsp), %rdi /* user rsp */
361    push_value %r11 /* rflags */
362    push_value %rcx /* rip */
363    push_value %r15
364    push_value %r14
365    push_value %r13
366    push_value %r12
367    push_value %r11
368    push_value %r10
369    push_value %r9
370    push_value %r8
371    push_value %rdi /* rsp */
372    push_value %rbp
373    push_value $0
374    push_value %rsi
375    push_value $0 /* instead of signaled flag */
376    push_value %rcx
377    push_value %rbx
378    push_value %rax
379
380    movq    %rsp, %rdi
381    call    x86_syscall_process_pending_signals
382
383    pop_value %rax
384    pop_value %rbx
385    pop_value %rcx
386    pop_value %rdx
387    pop_value %rsi
388    pop_value %rdi
389    pop_value %rbp
390    pop_value %r8 /* discard any changed %rsp value - TODO(dje): check ok */
391    pop_value %r8
392    pop_value %r9
393    pop_value %r10
394    pop_value %r11
395    pop_value %r12
396    pop_value %r13
397    pop_value %r14
398    pop_value %r15
399    pop_value %rcx
400    pop_value %r11
401    jmp     .Lreturn_from_syscall
402
403END_FUNCTION(x86_syscall)
404
405#include <zircon/syscall-kernel-branches.S>
406