1/*
2 * Copyright (c) 2007-2016 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#ifndef __ASSEMBLER__
11#define __ASSEMBLER__
12#endif // __ASSEMBLER__
13
14#include <asmoffsets.h> // OFFSETOF etc.
15#include <barrelfish_kpi/registers_arch.h> // CPSR_REG etc.
16#include <barrelfish_kpi/flags_arch.h> // CPSR_IF_MASK etc.
17#include <exceptions.h>
18#include <offsets.h>
19
20/*** Macros used in later routines. ***/
21
22//
23// Macro to initialize system mode stack.
24// Assumes that the GOT pointer is set.
25//
26.macro init_sys_stack
27    ldr sp, cpu_stack_got_offset
28    ldr sp, [PIC_REGISTER, sp]
29    add sp, sp, #KERNEL_STACK_SIZE
30.endm
31
32// Macro definition to get pointer to arch-specific dispatcher
33//
34// Logical equivalent of C function with same name
35//
36.macro get_dispatcher_shared_arm out tmp
37    load_got \tmp
38    ldr      \out, dcb_current_idx
39    ldr      \out, [\tmp, \out]
40    ldr      \out, [\out]        // out = dcb_current
41    ldr      \out, [\out, #OFFSETOF_DCB_DISP] //now ptr to dispatcher_shared_arm
42.endm
43
44//
45// Macro to determine if dispatcher is disabled.
46//
47// pc and disp arguments are unmodified.
48// out contains result
49//
50.macro disp_is_disabled disp, pc, out
51    // disp->disabled || (disp->crit_pc_lo <= pc && pc < disp->crit_pc_hi)
52    ldrb    \out, [\disp, #OFFSETOF_DISP_DISABLED]
53    cmp     \out, #1
54    bhs     0f                      // disp->disabled >= 0      | disabled
55                                    // disp->disabled = false
56    ldr     \out, [\disp, #OFFSETOF_DISP_CRIT_PC_LOW]
57    cmp     \out, \pc
58    movhi   \out, #0
59    bhi     0f                      // disp->crit_pc_low > pc   | enabled
60    ldr     \out, [\disp, #OFFSETOF_DISP_CRIT_PC_HIGH]
61    cmp     \pc, \out
62    movhs   \out, #0                // pc >= disp->crit_pc_high | enabled
63    movlo   \out, #1                // pc <  disp->crit_pc_high | disable
640:
65.endm
66
67//
68// Macro to spill registers
69//
70// Assumptions:
71//      - context is in scratch registers set {r0-r3}.
72//      - spsr is also in scratch register set.
73//      - stack holds spilled scratch registers.
74//      - lr contains pc for context
75// Side-effects:
76//      - spills context
77//      - pops scratch registers off stack (sp -> sp + 16).
78//      - trashes spsr_reg
79//
80.macro save_context context, spsr_reg
81    .if     CPSR_REG <> 0
82    .err    "Invariant failure: CPSR offset != 0"
83    .endif
84    .if     PC_REG <> 16
85    .err    "Invariant failure: PC offset != 16"
86    .endif
87    str     \spsr_reg, [\context, #(CPSR_REG * 4)]
88    str     lr, [\context, #(PC_REG * 4)]
89    add     \spsr_reg, \context, #(R4_REG * 4)
90    stmia   \spsr_reg!, {r4-r14}^
91    add     \spsr_reg, #4
92    vmrs    r4, fpscr
93    str     r4, [\spsr_reg], #4
94    vstmia  \spsr_reg!, {d0-d15}
95    vstmia  \spsr_reg!, {d16-d31}
96    add     \spsr_reg, \context, #(R3_REG * 4)
97    pop     {r4-r7}                         // Pop spilled scratch registers
98    stmda   \spsr_reg!, {r4-r7}             // And Save them
99.endm
100
101//
102// Macro to initialize SVC pic register
103//
104.macro load_got reg
105    // Read the PL1 thread ID register, where we stored the GOT address on
106    // boot.
107    mrc     p15, 0, \reg, c13, c0, 4
108.endm
109
110//
111// Macro to enter SYS mode with interrupts disabled.
112// Set up stack and GOT pointer.
113//
114.macro enter_sys scratch
115    clrex
116    mov     \scratch, #(CPSR_IF_MASK | ARM_MODE_SYS)
117    msr     cpsr_c, \scratch
118    load_got PIC_REGISTER
119    init_sys_stack
120.endm
121
122/*** From here, this is one contiguous block of code. ***/
123
124    .arm
125    /* The vector table and handler stubs are linked together, at a 4k-aligned
126     * address, so that they can be remapped to the high vector address. */
127    .section .text.vectors
128
129    .globl exception_vectors
130
131/*** The exception vector table. ***/
132
133/* This needs to be at the beginning of a 4k frame, that we'll map to the high
134 * vectors address.  It also needs to long jump, as it will be jumping down
135 * into the regular kernel window.  As there's no room to load the GOT in the
136 * vector table itself, the handler stubs are linked in the same 4k frame, so
137 * that we can reach them with a short jump.  They then load the GOT base, and
138 * long jump into the C handlers. */
139
140 .align 12
141exception_vectors:
142    /* Reset */
143    b .
144    /* Undefined instruction */
145    b undef_handler
146    /* System call */
147    b swi_handler
148    /* Prefetch abort */
149    b pabt_handler
150    /* Data abort */
151    b dabt_handler
152    /* Hypervisor trap */
153    b .
154    /* IRQ */
155    b irq_handler
156    /* FIQ */
157    b fiq_handler
158
159/*** The exception handlers. ***/
160
161    .align 2
162
163/* Different instances of the CPU driver will have their own stacks.  On the
164 * BSP core, this is initialised in bsp_start, to the bsp kernel stack
165 * allocated alongside the first CPU driver image. */
166    .type cpu_stack_got_offset, STT_OBJECT
167cpu_stack_got_offset:
168    .word kernel_stack(GOT)
169
170
171/* The GOT offset of dcb_current. */
172    .type dcb_current_idx, STT_OBJECT
173dcb_current_idx:
174    .word dcb_current(GOT)
175
176/* The vector table above uses short jumps to reach these, so they must also
177 * fit inside the 4kB high vectors page at 0xfffff000. */
178
179/* These are the GOT offsets of the C handler functions, to which we've now
180 * got to long jump. */
181
182    .type got_fatal_kernel, STT_OBJECT
183got_fatal_kernel:
184    .word fatal_kernel_fault(GOT)
185
186    .type got_user_undef, STT_OBJECT
187got_user_undef:
188    .word handle_user_undef(GOT)
189
190    .type got_sys_syscall, STT_OBJECT
191got_sys_syscall:
192    .word sys_syscall(GOT)
193
194    .type got_syscall_kernel, STT_OBJECT
195got_syscall_kernel:
196    .word sys_syscall_kernel(GOT)
197
198    .type got_page_fault, STT_OBJECT
199got_page_fault:
200    .word handle_user_page_fault(GOT)
201
202    .type got_handle_irq, STT_OBJECT
203got_handle_irq:
204    .word handle_irq(GOT)
205
206    .type got_kernel_irq, STT_OBJECT
207got_kernel_irq:
208    .word handle_irq_kernel(GOT)
209
210    .type got_handle_fiq, STT_OBJECT
211got_handle_fiq:
212    .word handle_fiq(GOT)
213
214    .type got_kernel_fiq, STT_OBJECT
215got_kernel_fiq:
216    .word handle_fiq_kernel(GOT)
217
218//
219// void undef_handler(void)
220//
221// Entered in UNDEF mode, IRQ disabled, ARM state.
222//
223// NB Identical to PABT except for final jump in undef_user and
224// code doesn't adjust lr to point to faulting instruction since
225// it was undefined and there's no point re-executing it.
226//
227undef_handler:
228    stmfd   sp!, {r0-r3}                    // Save for scratch use
229    mrs     r3, spsr                        // r3 = spsr until save_context
230    ands    r1, r3, #ARM_MODE_PRIV
231    bne     $undef_kernel
232$undef_user:
233    get_dispatcher_shared_arm r2 r0
234    sub     r0, lr, #4                      // r0 = faulting pc
235    disp_is_disabled r2, r0, r1             // r1 = 1 if disabled, else 0
236    cmp     r1, #0
237    addeq   r1, r2, #OFFSETOF_DISP_ENABLED_AREA
238    addne   r1, r2, #OFFSETOF_DISP_TRAP_AREA
239    save_context r1, r3                     // r1 = save area
240    enter_sys r3
241    ldr r3, got_user_undef
242    ldr pc, [PIC_REGISTER, r3]              // f(fault_addr, save_area)
243$undef_kernel:
244    sub     r2, sp, #(NUM_REGS * 4)         // Save to stack
245    save_context r2, r3                     // r2 = saved context
246    sub     r1, lr, #4                      // r1 = fault address
247    mov     r0, #ARM_EVECTOR_UNDEF
248    enter_sys r3
249    ldr r3, got_fatal_kernel
250    ldr pc, [PIC_REGISTER, r3]              // f(evector, addr, save_area)
251
252//
253// void swi_handler(void)
254//
255// Entered in SVC mode, IRQ disabled, ARM state.
256//
257// r0 = encoded syscall ordinal
258// r1 = syscall arg0
259// r2 = syscall arg1
260// ...
261// r7 = syscall arg6
262//
263// For now the system saves the caller's context here, because
264// some fraction of system calls do not return directly.
265//
266swi_handler:
267    .if SYSCALL_REG <> 0
268    .error "Syscall entry broken. Expected ordinal reg to be r0."
269    .endif
270
271    // Are we in kernel mode or not?
272    stmfd   sp!, {r0-r3}                    // Save for scratch use
273    mrs     r3, spsr                        // r3 = spsr until save_context
274    ands    r1, r3, #ARM_MODE_PRIV
275    bne     $swi_kernel
276$swi_user:
277    // System call from user space.  Save state.
278    get_dispatcher_shared_arm r2 r0
279    disp_is_disabled r2, lr, r1             // r1 = 1 if disabled, else 0
280    cmp     r1, #0
281    addeq   r0, r2, #OFFSETOF_DISP_ENABLED_AREA
282    addne   r0, r2, #OFFSETOF_DISP_DISABLED_AREA
283    save_context r0, r3                     // r0 = save area, r3 = scratch
284    enter_sys r3
285    // Removing these two instructions: they don't do anything
286    // useful.
287    //  ldr     r11, [r0, #48]                  // context->fp
288    //  ldr     lr, [r0, #60]                   // context->lr
289    // Now we call sys_syscall:
290    // __attribute__((noreturn))
291    // void sys_syscall(arch_registers_state_t* context,
292    //                  uint32_t disabled,
293    //                  struct dispatcher_shared_generic *disp);
294    //  r0  = address of area context was saved to
295    //  r1  = 0 if not disabled, != 0 if disabled
296    //  r2  = kernel address of dispatcher
297    //  r3  = scratch value
298    ldr r3, got_sys_syscall
299    ldr pc, [PIC_REGISTER, r3]
300$swi_kernel:
301    ldr r3, got_syscall_kernel
302    ldr pc, [PIC_REGISTER, r3]
303
304//
305// void pabt_handler(void)
306//
307// Entered in ABT mode, IRQ disabled, ARM state.
308//
309pabt_handler:
310    stmfd   sp!, {r0-r3}                    // Save for scratch use
311    sub     lr, lr, #4                      // lr = faulting pc
312    mrs     r3, spsr                        // r3 = spsr until save_context
313    ands    r1, r3, #ARM_MODE_PRIV
314    bne     $pabt_kernel
315$pabt_user:
316    get_dispatcher_shared_arm r2 r0
317    mov     r0, lr                          // r0 = faulting pc
318    disp_is_disabled r2, r0, r1             // r1 = 1 if disabled, else 0
319    cmp     r1, #0
320    addeq   r1, r2, #OFFSETOF_DISP_ENABLED_AREA
321    addne   r1, r2, #OFFSETOF_DISP_TRAP_AREA
322    save_context r1, r3                     // r1 = save area
323    enter_sys r3
324    ldr r3, got_page_fault
325    ldr pc, [PIC_REGISTER, r3]              // f(fault_addr, save_area)
326$pabt_kernel:
327    // {r0-r3} spilled to stack
328    sub     r2, sp, #(NUM_REGS * 4)         // Reserve stack space for save
329    save_context r2, r3                     // r2 = save_area
330    mov     r1, lr                          // r1 = faulting pc
331    mov     r0, #ARM_EVECTOR_PABT
332    enter_sys r3
333    ldr r3, got_fatal_kernel
334    ldr pc, [PIC_REGISTER, r3]              // f(evector, addr, save_area)
335
336//
337// void dabt_handler(void)
338//
339// Entered in ABT mode, IRQ disabled, ARM state.
340//
341dabt_handler:
342    stmfd   sp!, {r0-r3}                    // Save for scratch use
343    sub     lr, lr, #8                      // lr = faulting instruction
344    mrs     r3, spsr                        // r3 = spsr until save_context
345    ands    r1, r3, #ARM_MODE_PRIV
346    bne     $dabt_kernel
347$dabt_user:
348    get_dispatcher_shared_arm r2 r0
349    mov     r0, lr                          // r0 = faulting pc
350    disp_is_disabled r2, r0, r1             // r1 = disp_is_disabled
351    cmp     r1, #0
352    addeq   r1, r2, #OFFSETOF_DISP_ENABLED_AREA
353    addne   r1, r2, #OFFSETOF_DISP_TRAP_AREA
354    save_context    r1, r3                  // r1 = save_area
355    mrc     p15, 0, r0, c6, c0, 0           // r0 = fault address
356    enter_sys r3
357    ldr r3, got_page_fault
358    ldr pc, [PIC_REGISTER, r3]              // f(fault_addr, save_area)
359$dabt_kernel:
360    // {r0-r3} spilled to stack
361    sub     r2, sp, #(NUM_REGS * 4)         // Reserve stack space for save
362    save_context r2, r3                     // r2 = save_area
363    mrc     p15, 0, r1, c6, c0, 0           // r1 = fault address
364    mov     r0, #ARM_EVECTOR_DABT
365    enter_sys r3
366    ldr r3, got_fatal_kernel
367    ldr pc, [PIC_REGISTER, r3]              // f(evector, addr, save_area)
368
369//
370// void irq_handler(void)
371//
372// Entered in IRQ mode, IRQ disabled, ARM state
373//
374irq_handler:
375    stmfd   sp!, {r0-r3}                    // Save for scratch use
376    sub     lr, lr, #4                      // lr = return address
377    mrs     r3, spsr                        // r3 = spsr until save_context
378    ands    r1, r3, #ARM_MODE_PRIV
379    bne     $irq_kernel
380$irq_user:
381    get_dispatcher_shared_arm r2 r1         // r2 = cur_dcb->disp
382    mov     r1, lr                          // r1 = return address
383    disp_is_disabled r2, r1, r0             // r0 = 1 if disabled, else 0
384    cmp     r0, #0
385    addeq   r0, r2, #OFFSETOF_DISP_ENABLED_AREA
386    addne   r0, r2, #OFFSETOF_DISP_DISABLED_AREA
387    save_context    r0, r3                  // r0 = save area
388    enter_sys       r3
389    // Call: void handle_irq(arch_registers_state_t* save_area,
390    //                       uintptr_t fault_pc,
391    //                       struct dispatcher_shared_generic *disp)
392    //     __attribute__((noreturn));
393    ldr r3, got_handle_irq
394    ldr pc, [PIC_REGISTER, r3]              // f(save_area, fault_pc)
395$irq_kernel:
396    // IRQs in the kernel only occur in the wfi loop, and we don't really care
397    // about the register context.
398    mov r0, #0
399    add sp, sp, #16                         // Discard scratch registers
400    enter_sys r3
401    // Call: void handle_irq_kernel(arch_registers_state_t* NULL,
402    //                              uintptr_t fault_pc,
403    //                              struct dispatcher_shared_generic *disp)
404    //     __attribute__((noreturn));
405    ldr r3, got_kernel_irq
406    ldr pc, [PIC_REGISTER, r3]              // f(save_area, fault_pc)
407
408//
409// void fiq_handler(void)
410//
411// Entered in FIQ mode, IRQ disabled, ARM state
412//
413fiq_handler:
414    stmfd   sp!, {r0-r3}                    // Save for scratch use
415    sub     lr, lr, #4                      // lr = return address
416    mrs     r3, spsr                        // r0 = spsr until save_context
417    ands    r1, r3, #ARM_MODE_PRIV
418    bne     $irq_kernel
419$fiq_user:
420    get_dispatcher_shared_arm r2 r1
421    mov     r1, lr
422    disp_is_disabled r2, r1, r0             // r0 = 1 if disabled, else 0
423    cmp     r0, #0
424    addeq   r0, r2, #OFFSETOF_DISP_ENABLED_AREA
425    addne   r0, r2, #OFFSETOF_DISP_DISABLED_AREA
426    save_context    r0, r3                  // r0 = save area
427    enter_sys       r3
428    mov     lr, #0
429    mov     r11, #0
430    // Call: void handle_fiq(arch_registers_state_t* save_area,
431    //                       uintptr_t fault_pc,
432    //                       struct dispatcher_shared_generic *disp)
433    //     __attribute__((noreturn));
434    ldr r3, got_handle_fiq
435    ldr pc, [PIC_REGISTER, r3]              // f(save_area, fault_pc)
436$fiq_kernel:
437    // CPU was in System mode.
438    mov r0, #0
439    add sp, sp, #16                         // Discard scratch registers
440    mov r1, lr
441    enter_sys       r3
442    // Call: void handle_fiq_kernel(arch_registers_state_t* save_area,
443    //                              uintptr_t fault_pc)
444    //     __attribute__((noreturn));
445    ldr r3, got_kernel_fiq
446    ldr pc, [PIC_REGISTER, r3]              // f(save_area, fault_pc)
447
448    .global do_resume
449do_resume:
450    clrex
451    // There is no SPSR in system mode, so switch to supervisor.
452    msr    CPSR_c, #(CPSR_IF_MASK | ARM_MODE_SVC)
453    // Load cpsr into LR and move regs to next entry (postindex op)
454    // LR = r14, used as scratch register.
455    // LDR = read word from memory
456    //        target register
457    //        /   use register containing "regs" as base register
458    //       /   /     post index: only base register is used for
459    //      /   /     /   addressing and the offset added afterwards
460    ldr    lr, [r0], #4
461    // set SPSR to value of lr == regs.cpsr
462    // restore cpsr
463    //        bits indicating SPSR
464    //       /      read from register lr
465    //      /      /
466    msr    spsr, lr
467    // Restore VFP
468    add   r1, r0, #16*4
469    ldr   r2, [r1], #4
470    vmsr  fpscr, r2
471    vldmia r1!, {d0-d15}
472    vldmia r1, {d16-d31}
473    // Restore register r0 to r15, "^" means: cpsr := spsr
474    // Restore the non-banked registers.  Use LR as the index.
475    mov    lr, r0
476    //          will increment the base pointer
477    //         /
478    ldmia  lr!, {r0-r12}
479    // Restore the user stack pointer and link register.  n.b. LR is
480    // banked in SVC mode, so *our* LR isn't affected.  Also, this can't
481    // write back, so we've got to add the offset ourselves.
482    ldmia  lr, {r13, r14}^
483    // Load the (banked SVC) LR with the return address (add the offset
484    // that the last ldmia couldn't).
485    ldr    lr, [lr, #8]
486    // Exception return - LR_svc -> PC_usr, SPSR_svc -> CPSR
487    movs pc, lr
488
489/* Any load targets for the instructions above must be within the same 4k
490 * page, so we flush constants here to make sure. */
491    .ltorg
492