1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <machine/assembler.h>
9#include <arch/machine/hardware.h>
10#include <arch/machine/registerset.h>
11#include <util.h>
12
13#ifndef ALLOW_UNALIGNED_ACCESS
14#define ALLOW_UNALIGNED_ACCESS 1
15#endif
16
17#if ALLOW_UNALIGNED_ACCESS
18#define CR_ALIGN_SET     0
19#define CR_ALIGN_CLEAR   BIT(CONTROL_A)
20#else
21#define CR_ALIGN_SET     BIT(CONTROL_A)
22#define CR_ALIGN_CLEAR   0
23#endif
24
25#ifndef CONFIG_DEBUG_DISABLE_L1_ICACHE
26    #define CR_L1_ICACHE_SET   BIT(CONTROL_I)
27    #define CR_L1_ICACHE_CLEAR 0
28#else
29    #define CR_L1_ICACHE_SET   0
30    #define CR_L1_ICACHE_CLEAR BIT(CONTROL_I)
31#endif
32
33#ifndef CONFIG_DEBUG_DISABLE_L1_DCACHE
34    #define CR_L1_DCACHE_SET   BIT(CONTROL_C)
35    #define CR_L1_DCACHE_CLEAR 0
36#else
37    #define CR_L1_DCACHE_SET   0
38    #define CR_L1_DCACHE_CLEAR BIT(CONTROL_C)
39#endif
40
41#define CR_BITS_SET    (CR_ALIGN_SET | \
42                        CR_L1_ICACHE_SET | \
43                        CR_L1_DCACHE_SET | \
44                        BIT(CONTROL_M))
45
46#define CR_BITS_CLEAR  (CR_ALIGN_CLEAR | \
47                        CR_L1_ICACHE_CLEAR | \
48                        CR_L1_DCACHE_CLEAR | \
49                        BIT(CONTROL_SA0) | \
50                        BIT(CONTROL_EE) | \
51                        BIT(CONTROL_E0E))
52
53/*
54 * Entry point of the kernel ELF image.
55 * X0-X5 contain parameters that are passed to init_kernel().
56 *
57 * Note that for SMP kernel, the tpidr_el1 is used to pass
58 * the logical core ID.
59 */
60
61#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
62#define SCTLR   sctlr_el2
63#else
64#define SCTLR   sctlr_el1
65#endif
66
67.section .boot.text
68BEGIN_FUNC(_start)
69    /* Save x4 and x5 so we don't clobber it */
70    mov     x7, x4
71    mov     x8, x5
72
73    /* Make sure interrupts are disable */
74    msr daifset, #DAIFSET_MASK
75
76    /* Initialise sctlr_el1 or sctlr_el2 register */
77    msr     spsel, #1
78    mrs     x4, SCTLR
79    ldr     x19, =CR_BITS_SET
80    ldr     x20, =CR_BITS_CLEAR
81    orr     x4, x4, x19
82    bic     x4, x4, x20
83    msr     SCTLR, x4
84
85#ifdef ENABLE_SMP_SUPPORT
86    /* tpidr_el1 has the logic ID of the core, starting from 0 */
87    mrs     x6, tpidr_el1
88    /* Set the sp for each core assuming linear indices */
89    ldr     x5, =BIT(CONFIG_KERNEL_STACK_BITS)
90    mul     x5, x5, x6
91    ldr     x4, =kernel_stack_alloc + BIT(CONFIG_KERNEL_STACK_BITS)
92    add     x4, x4, x5
93    mov     sp, x4
94    /* the kernel stack must be 4-KiB aligned since we use the
95       lowest 12 bits to store the logical core ID. */
96    orr     x6, x6, x4
97#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
98    msr     tpidr_el2, x6
99#else
100    msr     tpidr_el1, x6
101#endif
102#else
103    ldr    x4, =kernel_stack_alloc + BIT(CONFIG_KERNEL_STACK_BITS)
104    mov    sp, x4
105#endif /* ENABLE_SMP_SUPPORT */
106
107    /* Attempt to workaround any known ARM errata. */
108    stp     x0, x1, [sp, #-16]!
109    stp     x2, x3, [sp, #-16]!
110    stp     x7, x8, [sp, #-16]!
111    bl arm_errata
112    ldp     x4, x5, [sp], #16
113    ldp     x2, x3, [sp], #16
114    ldp     x0, x1, [sp], #16
115
116    /* Call bootstrapping implemented in C */
117    bl      init_kernel
118
119    /* Restore the initial thread */
120    b restore_user_context
121END_FUNC(_start)
122