1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <config.h>
14#include <machine/assembler.h>
15#include <arch/api/syscall.h>
16#include <arch/machine/hardware.h>
17#include <arch/machine/registerset.h>
18
19#define VM_EVENT_DATA_ABORT 0
20#define VM_EVENT_PREFETCH_ABORT 1
21
22#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
23
24#define ELR     elr_el2
25#define ESR     esr_el2
26#define SPSR    spsr_el2
27#define TPIDR   tpidr_el2
28
29#else
30
31#define ELR     elr_el1
32#define ESR     esr_el1
33#define SPSR    spsr_el1
34#define TPIDR   tpidr_el1
35
36#endif
37
38
39.macro lsp_i _tmp
40    mrs     \_tmp, TPIDR
41#if CONFIG_MAX_NUM_NODES > 1
42    bic     \_tmp, \_tmp, #0xfff
43#endif
44    mov     sp, \_tmp
45.endm
46
47.macro ventry label
48.align 7
49    b       \label
50.endm
51
52.section .vectors
53
54BEGIN_FUNC(arm_vector_table)
55    ventry  invalid_vector_entry           // Synchronous EL1t/EL2t
56    ventry  invalid_vector_entry           // IRQ EL1t/EL2t
57    ventry  invalid_vector_entry           // FIQ EL1t/EL2t
58    ventry  invalid_vector_entry           // SError EL1t/EL2t
59
60    ventry  cur_el_sync                    // Current EL Synchronous (EL1/2)
61    ventry  cur_el_irq                     // IRQ
62    ventry  invalid_vector_entry           // FIQ
63    ventry  invalid_vector_entry           // SError
64
65    ventry  lower_el_sync                  // Synchronous 64-bit EL0/EL1
66    ventry  lower_el_irq                   // IRQ 64-bit EL0/EL1
67    ventry  invalid_vector_entry           // FIQ 64-bit EL0/EL1
68    ventry  invalid_vector_entry           // SError 64-bit EL0/EL1
69
70    ventry  invalid_vector_entry           // Synchronous 32-bit EL0/EL1
71    ventry  invalid_vector_entry           // IRQ 32-bit EL0/EL1
72    ventry  invalid_vector_entry           // FIQ 32-bit EL0/EL1
73    ventry  invalid_vector_entry           // SError 32-bit EL0/EL1
74END_FUNC(arm_vector_table)
75
76.section .vectors.text
77
78.macro kernel_enter
79    /* Storing thread's stack frame */
80    stp     x0,  x1,  [sp, #16 * 0]
81    stp     x2,  x3,  [sp, #16 * 1]
82    stp     x4,  x5,  [sp, #16 * 2]
83    stp     x6,  x7,  [sp, #16 * 3]
84    stp     x8,  x9,  [sp, #16 * 4]
85    stp     x10, x11, [sp, #16 * 5]
86    stp     x12, x13, [sp, #16 * 6]
87    stp     x14, x15, [sp, #16 * 7]
88    stp     x16, x17, [sp, #16 * 8]
89    stp     x18, x19, [sp, #16 * 9]
90    stp     x20, x21, [sp, #16 * 10]
91    stp     x22, x23, [sp, #16 * 11]
92    stp     x24, x25, [sp, #16 * 12]
93    stp     x26, x27, [sp, #16 * 13]
94    stp     x28, x29, [sp, #16 * 14]
95
96    /* Store thread's SPSR, LR, and SP */
97    mrs     x21, sp_el0
98    mrs     x22, ELR
99    mrs     x23, SPSR
100    stp     x30, x21, [sp, #PT_LR]
101    stp     x22, x23, [sp, #PT_ELR_EL1]
102.endm
103
104BEGIN_FUNC(invalid_vector_entry)
105    lsp_i   x19
106    b       halt
107END_FUNC(invalid_vector_entry)
108
109BEGIN_FUNC(cur_el_sync)
110    lsp_i   x19
111    /* Read esr and branch to respective labels */
112    mrs     x25, ESR
113    lsr     x24, x25, #ESR_EC_SHIFT
114    cmp     x24, #ESR_EC_CEL_DABT
115    b.eq    cur_el_da
116    cmp     x24, #ESR_EC_CEL_IABT
117    b.eq    cur_el_ia
118    b       cur_el_inv
119
120cur_el_da:
121#ifdef CONFIG_DEBUG_BUILD
122    mrs     x0, ELR
123    bl      kernelDataAbort
124#endif /* CONFIG_DEBUG_BUILD */
125    b       halt
126
127cur_el_ia:
128#ifdef CONFIG_DEBUG_BUILD
129    mrs     x0, ELR
130    bl      kernelPrefetchAbort
131#endif /* CONFIG_DEBUG_BUILD */
132    b       halt
133
134cur_el_inv:
135    b       invalid_vector_entry
136END_FUNC(cur_el_sync)
137
138/*
139 * This is only called if ksCurThread is idle thread.
140 *
141 * No need to store the state of idle thread and simply call c_handle_interrupt to
142 * activate ksCurThread when returning from interrupt as long as idle thread is stateless.
143 */
144BEGIN_FUNC(cur_el_irq)
145    lsp_i   x19
146    b       c_handle_interrupt
147END_FUNC(cur_el_irq)
148
149BEGIN_FUNC(lower_el_sync)
150    kernel_enter
151
152    /* Read esr and branch to respective labels */
153    mrs     x25, ESR
154    lsr     x24, x25, #ESR_EC_SHIFT
155    cmp     x24, #ESR_EC_LEL_DABT
156    b.eq    lel_da
157    cmp     x24, #ESR_EC_LEL_IABT
158    b.eq    lel_ia
159    cmp     x24, #ESR_EC_LEL_SVC64
160    b.eq    lel_syscall
161#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
162    cmp     x24, #ESR_EC_LEL_HVC64
163    b.eq    lel_syscall
164    mrs     x20, ELR
165    str     x20, [sp, #PT_FaultInstruction]
166
167    lsp_i   x19
168    /* move the ESR as the input */
169    mov     x0, x25
170    b       c_handle_vcpu_fault
171#else
172    cmp     x24, #ESR_EL1_EC_ENFP
173    b.eq    el0_enfp
174    b       el0_user
175#endif
176
177lel_da:
178    mrs     x20, ELR
179    str     x20, [sp, #PT_FaultInstruction]
180
181    lsp_i   x19
182    b       c_handle_data_fault
183
184lel_ia:
185    mrs     x20, ELR
186    str     x20, [sp, #PT_FaultInstruction]
187
188    lsp_i   x19
189    b       c_handle_instruction_fault
190
191lel_syscall:
192    mrs     x20, ELR
193    sub     x20, x20, #4
194    str     x20, [sp, #PT_FaultInstruction]
195
196    lsp_i   x19
197    mov     x2, x7
198    b       c_handle_syscall
199
200el0_enfp:
201#ifdef CONFIG_HAVE_FPU
202    lsp_i   x19
203    b       c_handle_enfp
204#endif /* CONFIG_HAVE_FPU */
205
206el0_user:
207    mrs     x20, ELR
208    str     x20, [sp, #PT_FaultInstruction]
209
210    lsp_i   x19
211    b       c_handle_undefined_instruction
212END_FUNC(lower_el_sync)
213
214BEGIN_FUNC(lower_el_irq)
215    kernel_enter
216    mrs     x20, ELR
217    str     x20, [sp, #PT_FaultInstruction]
218
219    lsp_i   x19
220    b       c_handle_interrupt
221END_FUNC(lower_el_irq)
222