1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2014 Travis Geiselbrecht
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <asm.h>
9#include <arch/asm_macros.h>
10#include <arch/arch_thread.h>
11#include <arch/arm64.h>
12#include <arch/arm64/exceptions.h>
13#include <zircon/zx-syscall-numbers.h>
14
15.section .text.boot.vectab,"ax",@progbits
16.align 12
17
18#define DW_REG_lr   30
19#define DW_REG_sp   31
20// The "current mode exception link register", which for our purposes is elr_el1.
21#define DW_REG_ELR_mode 33
22
23#define lr x30
24#define elr1 DW_REG_ELR_mode
25
26// offset where sp,elr,spsr,lr,mdscr goes in the iframe
27#define regsave_special_reg_offset (30 * 8)
28
29// offset where x20-x29 goes in the iframe
30#define regsave_high_reg_offset (20 * 8)
31
32.macro regsave_long
33// There are 5 regs @ regsave_special_reg_offset, plus one unused space = 6 to
34// maintain 16-byte padding.
35sub_from_sp (6*8)
36push_regs x28, x29
37push_regs x26, x27
38push_regs x24, x25
39push_regs x22, x23
40push_regs x20, x21
41push_regs x18, x19
42push_regs x16, x17
43push_regs x14, x15
44push_regs x12, x13
45push_regs x10, x11
46push_regs x8, x9
47push_regs x6, x7
48push_regs x4, x5
49push_regs x2, x3
50push_regs x0, x1
51// Preserve x0-x7 for syscall arguments
52mrs  x9, sp_el0
53// x10 (containing elr_el1) is used in syscall handler
54mrs  x10, elr_el1
55mrs  x11, spsr_el1
56mrs  x12, mdscr_el1
57stp  lr, x9, [sp, #regsave_special_reg_offset]
58.cfi_rel_offset lr, (regsave_special_reg_offset)
59.cfi_rel_offset sp, (regsave_special_reg_offset + 8)
60stp  x10, x11, [sp, #regsave_special_reg_offset + 16]
61.cfi_rel_offset elr1, (regsave_special_reg_offset + 16)
62str x12, [sp, #regsave_special_reg_offset + 32]
63.endm
64
65.macro regsave_short
66// Align the same as the long version above. The 10 extra words correspond to
67// x20 through x29 which are not saved here.
68sub_from_sp ((6*8) + (10*8))
69push_regs x18, x19
70push_regs x16, x17
71push_regs x14, x15
72push_regs x12, x13
73push_regs x10, x11
74push_regs x8, x9
75push_regs x6, x7
76push_regs x4, x5
77push_regs x2, x3
78push_regs x0, x1
79// Preserve x0-x7 to mirror regsave_long
80mrs  x9, sp_el0
81mrs  x10, elr_el1
82mrs  x11, spsr_el1
83stp  lr, x9, [sp, #regsave_special_reg_offset]
84.cfi_rel_offset lr, (regsave_special_reg_offset)
85.cfi_rel_offset sp, (regsave_special_reg_offset + 8)
86stp  x10, x11, [sp, #regsave_special_reg_offset + 16]
87.cfi_rel_offset elr1, (regsave_special_reg_offset + 16)
88.endm
89
90// convert a short iframe to a long one by patching in the additional 10 words to save
91.macro regsave_short_to_long
92stp  x20, x21, [sp, #regsave_high_reg_offset]
93stp  x22, x23, [sp, #regsave_high_reg_offset + 0x10]
94stp  x24, x25, [sp, #regsave_high_reg_offset + 0x20]
95stp  x26, x27, [sp, #regsave_high_reg_offset + 0x30]
96stp  x28, x29, [sp, #regsave_high_reg_offset + 0x40]
97mrs  x12, mdscr_el1
98str  x12, [sp, #regsave_special_reg_offset + 32]
99.endm
100
101// Once we pop the stack past the saved sp_el0, elr_el1 the userspace values
102// are inaccessible.
103.macro mark_lr_sp_inaccessible
104// TODO(dje): gdb tries to use some value for these even if "undefined",
105// as a workaround set their values to zero which will cause gdb to
106// terminate the backtrace. Need to revisit, file gdb bug if necessary.
107cfi_register_is_zero DW_REG_sp
108cfi_register_is_zero DW_REG_ELR_mode
109.endm
110
111.macro regrestore_long
112// Preserve x0-x1 for syscall returns (eventually x0-x7)
113ldp  lr, x9, [sp, #regsave_special_reg_offset]
114.cfi_same_value lr
115ldp  x10, x11, [sp, #regsave_special_reg_offset + 16]
116ldr  x12, [sp, #regsave_special_reg_offset + 32]
117msr  sp_el0, x9
118msr  elr_el1, x10
119msr  spsr_el1, x11
120msr  mdscr_el1, x12
121pop_regs x0, x1
122pop_regs x2, x3
123pop_regs x4, x5
124pop_regs x6, x7
125pop_regs x8, x9
126pop_regs x10, x11
127pop_regs x12, x13
128pop_regs x14, x15
129pop_regs x16, x17
130pop_regs x18, x19
131pop_regs x20, x21
132pop_regs x22, x23
133pop_regs x24, x25
134pop_regs x26, x27
135pop_regs x28, x29
136add_to_sp (6*8)
137mark_lr_sp_inaccessible
138.endm
139
140.macro regrestore_short
141// Preserve x0-x7 to mirror regrestore_long
142ldp  lr, x9, [sp, #regsave_special_reg_offset]
143.cfi_same_value lr
144ldp  x10, x11, [sp, #regsave_special_reg_offset + 16]
145msr  sp_el0, x9
146msr  elr_el1, x10
147msr  spsr_el1, x11
148pop_regs x0, x1
149pop_regs x2, x3
150pop_regs x4, x5
151pop_regs x6, x7
152pop_regs x8, x9
153pop_regs x10, x11
154pop_regs x12, x13
155pop_regs x14, x15
156pop_regs x16, x17
157pop_regs x18, x19
158add_to_sp ((6*8) + (10*8))
159mark_lr_sp_inaccessible
160.endm
161
162.macro start_isr_cfi
163    .cfi_startproc simple
164    .cfi_signal_frame
165    // The return address is in elr_el1, not lr.
166    .cfi_return_column elr1
167    .cfi_def_cfa sp, 0
168.endm
169
170.macro start_isr_func
171    start_isr_cfi
172    ALL_CFI_SAME_VALUE
173    .cfi_undefined elr1
174.endm
175
176.macro start_helper_cfi
177    .cfi_startproc simple
178    .cfi_signal_frame
179    .cfi_def_cfa sp, (regsave_special_reg_offset + 4 * 8)
180.endm
181
182// The CFA offset of integer register |regno| (regno = 0-29).
183#define REG_CFA_OFFSET(regno) .cfi_offset x##regno, -((4 * 8) + ((30 - (regno)) * 8))
184
185// Mark the locations of the registers based on the CFA so that the
186// location doesn't change as the regs are popped.
187.macro setup_short_helper_cfi
188    REG_CFA_OFFSET(0)
189    REG_CFA_OFFSET(1)
190    REG_CFA_OFFSET(2)
191    REG_CFA_OFFSET(3)
192    REG_CFA_OFFSET(4)
193    REG_CFA_OFFSET(5)
194    REG_CFA_OFFSET(6)
195    REG_CFA_OFFSET(7)
196    REG_CFA_OFFSET(8)
197    REG_CFA_OFFSET(9)
198    REG_CFA_OFFSET(10)
199    REG_CFA_OFFSET(11)
200    REG_CFA_OFFSET(12)
201    REG_CFA_OFFSET(13)
202    REG_CFA_OFFSET(14)
203    REG_CFA_OFFSET(15)
204    REG_CFA_OFFSET(16)
205    REG_CFA_OFFSET(17)
206    REG_CFA_OFFSET(18)
207    REG_CFA_OFFSET(19)
208    .cfi_offset sp, -(3 * 8)
209    .cfi_offset lr, -(4 * 8)
210.endm
211
212.macro setup_long_helper_cfi
213    setup_short_helper_cfi
214    REG_CFA_OFFSET(20)
215    REG_CFA_OFFSET(21)
216    REG_CFA_OFFSET(22)
217    REG_CFA_OFFSET(23)
218    REG_CFA_OFFSET(24)
219    REG_CFA_OFFSET(25)
220    REG_CFA_OFFSET(26)
221    REG_CFA_OFFSET(27)
222    REG_CFA_OFFSET(28)
223    REG_CFA_OFFSET(29)
224.endm
225
226.macro start_short_helper
227    start_helper_cfi
228    setup_short_helper_cfi
229.endm
230
231.macro start_long_helper
232    start_helper_cfi
233    setup_long_helper_cfi
234.endm
235
236.macro invalid_exception, which
237    start_isr_func
238    regsave_long
239    mov x1, #\which
240    mov x0, sp
241    bl  arm64_invalid_exception
242    b   .
243.endm
244
245.macro irq_exception, exception_flags
246    start_isr_func
247    regsave_short
248    msr daifclr, #1 /* reenable fiqs once elr and spsr have been saved */
249    mov x0, sp
250    mov x1, \exception_flags
251    bl  arm64_irq
252    cbnz x0, arm64_finish_user_irq_wrapper /* anything extra to do? */
253    msr daifset, #1 /* disable fiqs to protect elr and spsr restore */
254    b   arm64_exc_shared_restore_short
255.endm
256
257.macro sync_exception, exception_flags, from_lower_el_64=0
258    start_isr_func
259    regsave_long
260    mrs x9, esr_el1
261.if \from_lower_el_64
262    // If this is a syscall, x0-x7 contain args and x16 contains syscall num.
263    // x10 contains elr_el1.
264    lsr x11, x9, #26              // shift esr right 26 bits to get ec
265    cmp x11, #0x15                // check for 64-bit syscall
266    beq arm64_syscall_dispatcher  // and jump to syscall handler
267.endif
268    // Prepare the default sync_exception args
269    mov x0, sp
270    mov x1, \exception_flags
271    mov w2, w9
272    bl  arm64_sync_exception
273    b  arm64_exc_shared_restore_long
274.endm
275
276FUNCTION_LABEL(arm64_el1_exception_base)
277
278/* exceptions from current EL, using SP0 */
279.org 0x000
280LOCAL_FUNCTION_LABEL(arm64_el1_sync_exc_current_el_SP0)
281    invalid_exception 0
282END_FUNCTION(arm64_el1_sync_exc_current_el_SP0)
283
284.org 0x080
285LOCAL_FUNCTION_LABEL(arm64_el1_irq_current_el_SP0)
286    invalid_exception 1
287END_FUNCTION(arm64_el1_irq_current_el_SP0)
288
289.org 0x100
290LOCAL_FUNCTION_LABEL(arm64_el1_fiq_current_el_SP0)
291    invalid_exception 2
292END_FUNCTION(arm64_el1_fiq_current_el_SP0)
293
294.org 0x180
295LOCAL_FUNCTION_LABEL(arm64_el1_err_exc_current_el_SP0)
296    invalid_exception 3
297END_FUNCTION(arm64_el1_err_exc_current_el_SP0)
298
299/* exceptions from current EL, using SPx */
300.org 0x200
301LOCAL_FUNCTION_LABEL(arm64_el1_sync_exc_current_el_SPx)
302    sync_exception #0 /* same EL, arm64 */
303END_FUNCTION(arm64_el1_sync_exc_current_el_SPx)
304
305.org 0x280
306LOCAL_FUNCTION_LABEL(arm64_el1_irq_current_el_SPx)
307    irq_exception #0 /* same EL, arm64 */
308END_FUNCTION(arm64_el1_irq_current_el_SPx)
309
310.org 0x300
311LOCAL_FUNCTION_LABEL(arm64_el1_fiq_current_el_SPx)
312    start_isr_func
313    regsave_short
314    mov x0, sp
315    bl  platform_fiq
316    b  arm64_exc_shared_restore_short
317END_FUNCTION(arm64_el1_fiq_current_el_SPx)
318
319.org 0x380
320LOCAL_FUNCTION_LABEL(arm64_el1_err_exc_current_el_SPx)
321    invalid_exception 0x13
322END_FUNCTION(arm64_el1_err_exc_current_el_SPx)
323
324/* exceptions from lower EL, running arm64 */
325.org 0x400
326LOCAL_FUNCTION_LABEL(arm64_el1_sync_exc_lower_el_64)
327    sync_exception #(ARM64_EXCEPTION_FLAG_LOWER_EL), 1
328END_FUNCTION(arm64_el1_sync_exc_lower_el_64)
329
330.org 0x480
331LOCAL_FUNCTION_LABEL(arm64_el1_irq_lower_el_64)
332    irq_exception #(ARM64_EXCEPTION_FLAG_LOWER_EL)
333END_FUNCTION(arm64_el1_irq_lower_el_64)
334
335.org 0x500
336LOCAL_FUNCTION_LABEL(arm64_el1_fiq_lower_el_64)
337    start_isr_func
338    regsave_short
339    mov x0, sp
340    bl  platform_fiq
341    b  arm64_exc_shared_restore_short
342END_FUNCTION(arm64_el1_fiq_lower_el_64)
343
344.org 0x580
345LOCAL_FUNCTION_LABEL(arm64_el1_err_exc_lower_el_64)
346    invalid_exception 0x23
347END_FUNCTION(arm64_el1_err_exc_lower_el_64)
348
349/* exceptions from lower EL, running arm32 */
350.org 0x600
351LOCAL_FUNCTION_LABEL(arm64_el1_sync_exc_lower_el_32)
352    sync_exception #(ARM64_EXCEPTION_FLAG_LOWER_EL|ARM64_EXCEPTION_FLAG_ARM32)
353END_FUNCTION(arm64_el1_sync_exc_lower_el_32)
354
355.org 0x680
356LOCAL_FUNCTION_LABEL(arm64_el1_irq_lower_el_32)
357    irq_exception #(ARM64_EXCEPTION_FLAG_LOWER_EL|ARM64_EXCEPTION_FLAG_ARM32)
358END_FUNCTION(arm64_el1_irq_lower_el_32)
359
360.org 0x700
361LOCAL_FUNCTION_LABEL(arm64_el1_fiq_lower_el_32)
362    start_isr_func
363    regsave_short
364    mov x0, sp
365    bl  platform_fiq
366    b  arm64_exc_shared_restore_short
367END_FUNCTION(arm64_el1_fiq_lower_el_32)
368
369.org 0x780
370LOCAL_FUNCTION_LABEL(arm64_el1_err_exc_lower_el_32)
371    invalid_exception 0x33
372END_FUNCTION(arm64_el1_err_exc_lower_el_32)
373
374/* If an IRQ happened in userspace, and either the thread was signaled or
375   needs to be rescheduled, then we end up here after arm64_irq returns.
376   Suspending the thread requires constructing a long iframe in order to
377   provide the values of all regs to any debugger that wishes to access
378   them, but we can't do that until arm64_irq returns as we rely on the
379   compiler to save/restore callee-saved regs. */
380LOCAL_FUNCTION_LABEL(arm64_finish_user_irq_wrapper)
381    start_short_helper
382    /* if we're only here because of a need to reschedule then we don't
383       need to construct a long iframe */
384    cmp x0, #ARM64_IRQ_EXIT_RESCHEDULE
385    bne 1f
386    mov x1, #0 /* don't need an iframe, just pass NULL */
387    bl  arm64_finish_user_irq
388    msr daifset, #1 /* disable fiqs to protect elr and spsr restore */
389    b   arm64_exc_shared_restore_short
3901:
391    /* convert the short to a long frame */
392    regsave_short_to_long
393    mov x1, sp
394    bl  arm64_finish_user_irq
395    msr daifset, #1 /* disable fiqs to protect elr and spsr restore */
396    b   arm64_exc_shared_restore_long
397END_FUNCTION(arm64_finish_user_irq_wrapper)
398
399LOCAL_FUNCTION_LABEL(arm64_exc_shared_restore_long)
400    start_long_helper
401    regrestore_long
402    eret
403END_FUNCTION(arm64_exc_shared_restore_long)
404
405LOCAL_FUNCTION_LABEL(arm64_exc_shared_restore_short)
406    start_short_helper
407    regrestore_short
408    eret
409END_FUNCTION(arm64_exc_shared_restore_short)
410
411//
412// Syscall args are in x0-x7 already.
413// pc is in x10 and needs to go in the next available register,
414// or the stack if the regs are full.
415//
416.macro pre_args, nargs
417.if \nargs == 8
418    push_regs x10, x11 // push twice to maintain alignment
419.else
420    mov x\nargs, x10
421.endif
422.endm
423
424.macro post_args, nargs
425.if \nargs == 8
426    pop_regs x10, x11
427.endif
428    b .Lpost_syscall
429.endm
430
431//
432// Expected state prior to arm64_syscall_dispatcher branch...
433//
434// x0-x7 - contains syscall arguments
435// x9    - contains esr_el1
436// x10   - contains elr_el1
437// x16   - contains syscall_num
438// sp    - points to base of frame (frame->r[0])
439//
440// Expected state prior to unknown_syscall and wrapper_syscall...
441//
442// x0-x7  - contains syscall arguments
443// x10    - contains userspace pc
444//
445LOCAL_FUNCTION_LABEL(arm64_syscall_dispatcher)
446    start_isr_func
447    // Restore per cpu pointer
448    mrs  x11, tpidr_el1
449    ldr  x18, [x11, #CURRENT_PERCPU_PTR_OFFSET]
450    // Verify syscall number and call the unknown handler if bad.
451    cmp  x16, #ZX_SYS_COUNT
452    bhs  .Lunknown_syscall
453    // Jump to the right syscall wrapper.
454    // call_wrapper_table is 4096 byte aligned so just use adrp
455    adrp x12, call_wrapper_table
456    ldr  x12, [x12, x16, LSL#3]
457    br   x12
458.Lunknown_syscall:
459    mov  x0, x16
460    pre_args 1
461    bl   unknown_syscall
462    post_args 1
463.Lpost_syscall:
464    // Upon return from syscall, x0 = status, x1 = thread signalled
465    // Move the status to frame->r[0] for return to userspace.
466    str  x0, [sp]
467    // Check for pending signals. If none, just return.
468    cbz  x1, arm64_exc_shared_restore_long
469    mov  x0, sp
470    bl   arm64_thread_process_pending_signals
471    b    arm64_exc_shared_restore_long
472END_FUNCTION(arm64_syscall_dispatcher)
473
474// Adds a label for making the syscall and adds it to the jump table.
475.macro syscall_dispatch nargs, syscall
476    .pushsection .text.syscall-dispatch,"ax",%progbits
477    LOCAL_FUNCTION(.Lcall_\syscall\())
478        pre_args \nargs
479        bl wrapper_\syscall
480        post_args \nargs
481    END_FUNCTION(.Lcall_\syscall\())
482    .popsection
483    .pushsection .rodata.syscall-table,"a",%progbits
484        .quad .Lcall_\syscall
485    .popsection
486.endm
487
488// Adds the label for the jump table.
489.macro start_syscall_dispatch
490    .pushsection .rodata.syscall-table,"a",%progbits
491    // align on 4096 byte boundary to save an instruction on table lookup
492    .balign 4096
493    call_wrapper_table:
494    .popsection
495.endm
496
497#include <zircon/syscall-kernel-branches.S>
498