1/*
2 * Copyright 2013, winocm. <winocm@icloud.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 *   Redistributions of source code must retain the above copyright notice, this
9 *   list of conditions and the following disclaimer.
10 *
11 *   Redistributions in binary form must reproduce the above copyright notice, this
12 *   list of conditions and the following disclaimer in the documentation and/or
13 *   other materials provided with the distribution.
14 *
15 *   If you are going to use this software in any form that does not involve
16 *   releasing the source to this project or improving it, let me know beforehand.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29/*
30 * ARM system startup
31 */
32
33#include <arm/arch.h>
34#include <arm/asm_help.h>
35#include <assym.s>
36#include <mach/arm/asm.h>
37#include <arm/PlatformConfigs.h>
38
39/*
40 * During system initialization, there are two possible methods of
41 * initial bootstrap.
42 *
43 * The old BootKit loader prepared the initial virtual memory mappings
44 * for the kernel. When we boot using a shim loader, we don't get this
45 * luxury, so we have to do it ourselves. Isn't that quite fun?
46 *
47 *  - r0 = kernel boot-args structure.
48 *
49 * The boot-args structure will be updated to then be virtual.
50 */
51
52EnterARM(_start)
53    /* First, disable interrupts so that the BL doesn't get any. */
54    LOAD_ADDR(lr, arm_init)
55    cpsid   if
56
57    /* If MMU is initialized, go the quick way. */
58    mrc     p15, 0, r4, c1, c0, 0
59    and     r4, #0x1
60    cmp     r4, #0x1
61    beq     mmu_initialized
62
63mmu_reinitialize:
64    /*
65     * MMU initialization part begins here. -----------------------
66     *
67     * Basically, all of SDRAM gets remapped to the virtual base.
68     */
69
70    /* Adjust DACR register. */
71    mov     r4, #0x1
72
73    mcr     p15, 0, r4, c3, c0, 0
74#ifdef _ARM_ARCH_7
75    isb     sy
76#endif
77
78    /* Clean TLB and instruction cache. */
79    mov     r4, #0
80    mcr     p15, 0, r4, c8, c7, 0
81    mcr     p15, 0, r4, c7, c5, 0
82    mcr     p15, 0, r4, c2, c0, 2
83
84    /*
85     * Create a dumb mapping for right now. This mapping lies
86     * at the top of kernel data.
87     */
88    ldr     r4, [r0, BOOT_ARGS_TOP_OF_KERNEL]
89    ldr     r10, [r0, BOOT_ARGS_VIRTBASE]
90    ldr     r11, [r0, BOOT_ARGS_PHYSBASE]
91
92    /* Is it bootArgs revision 3? */
93    ldrh    r12, [r0, BOOT_ARGS_VERSION]
94    cmp     r12, #3
95
96    /* Align the memory size to 1MB for compatibility. */
97    ldreq   r5, [r0, BOOT_ARGS_MEMSIZE]
98    andeq   r5, r5, #0xFFF000000
99    streq   r5, [r0, BOOT_ARGS_MEMSIZE]
100
101    /* Load memory size value after fixup. */
102    ldr     r12, [r0, BOOT_ARGS_MEMSIZE]
103
104    /* MMU cacheability value. */
105    orr     r5, r4, #0x18
106
107    /* Now, we have to set our TTB to this value. */
108    mcr     p15, 0, r5, c2, c0, 0
109
110    /* Make our section mappings now. */
111    mov     r6, #0xE            /* This is a section descriptor */
112    orr     r6, r6, #0x400      /* Permissions */
113
114    /* Identity map UART for right now */
115    LoadConstantToReg((0x7f600000), r7)
116    mov     r7, r7, lsr#20
117    add     r5, r4, r7, lsl#2
118    mov     r7, r7, lsl#20
119    orr     r8, r7, r6
120    str     r8, [r5]
121
122    mov     r7, pc, lsr#20
123    add     r5, r4, r7, lsl#2   /* Make the TTE offset */
124
125    /* God, I hope we're loaded at the beginning of SDRAM. */
126    mov     r7, r7, lsl#20
127    orr     r8, r7, r6
128
129    /* Store our section mappings. */
130    str     r8, [r5]
131
132    /* Get the physical address... */
133    mov     r1, r11
134    add     r5, r4, r10, lsr#18
135map:
136    /* Just map all of SDRAM. */
137    orr     r8, r1, r6
138    str     r8, [r5], #4
139    add     r1, r1, #_1MB
140    subs    r12, r12, #_1MB
141    bne     map
142
143    /* Start MMU. */
144    mrc     p15, 0, r3, c1, c0, 0
145    orr     r3, r3, #1
146    mcr     p15, 0, r3, c1, c0, 0
147
148    /*
149     * Hopefully, if we got here, things are looking good and we
150     * are running in VM mode.
151     */
152
153     /*
154      * xxx KASLR: we need to jump to a trampoline.
155      * The address in r3 is relative, we convert it to a KVA and jump.
156      */
157    adr     r3, start_trampoline
158    sub     r3, r3, r11
159    add     r3, r3, r10
160    bx      r3
161start_trampoline:
162    nop
163
164fix_boot_args_hack_for_bootkit:
165    /* Fix up boot-args */
166    sub     r0, r0, r11
167    add     r0, r0, r10
168
169    /* Goddamn section offset. */
170    LOAD_ADDR(r12, sectionOffset)
171    mov     sp, #0
172    str     sp, [r12]
173
174#ifdef _ARM_ARCH_7
175    /*
176     * VBAR Note:
177     * The exception vectors are mapped high also at 0xFFFF0000 for compatibility purposes.
178     */
179
180    /* Set low vectors. */
181    mrc     p15, 0, r4, c1, c0, 0
182    bic     r4, r4, #(1 << 13)
183    mcr     p15, 0, r4, c1, c0, 0
184
185    /* Set NS-VBAR to ExceptionVectorsBase */
186    LOAD_ADDR(r4, ExceptionVectorsBase)
187    mcr     p15, 0, r4, c12, c0, 0
188
189#else
190
191    /* Now, the vectors could be mapped low. Fix that. */
192    mrc     p15, 0, r4, c1, c0, 0
193    orr     r4, r4, #(1 << 13)
194    mcr     p15, 0, r4, c1, c0, 0
195
196#endif
197
198    /*
199     * MMU initialization end. ------------------------------------
200     */
201
202mmu_initialized:
203    /*
204     * Zero out the frame pointer so that the kernel fp tracer
205     * doesn't go farther than it needs to.
206     */
207    mov     r7, #0
208
209#if __ARM_PROCESSOR_CLASS_CORTEX_A9__
210    /* Enable automatic-clock gating. */
211    mrc     p15, 0, r4, c15, c0, 0
212    orr     r4, r4, #1
213    mcr     p15, 0, r4, c15, c0, 0
214#endif
215
216    /* Enable unaligned memory access and caching */
217    mrc     p15, 0, r4, c1, c0, 0
218    orr     r4, r4, #(1 << 22)  /* Force unaligned accesses, fixes OMAP boot. */
219    bic     r4, r4, #(1 << 1)
220    orr     r4, r4, #(1 << 23)  /* Unaligned memory access */
221    orr     r4, r4, #(1 << 12)  /* Enable I-cache */
222    mcr     p15, 0, r4, c1, c0, 0
223
224    /* Invalid Data/Inst TLB */
225    mov     r4, #0
226    mcr     p15, 0, r4, c8, c7, 0
227
228    /* Invalidate caches */
229    mcr     p15, 0, r4, c7, c5, 0
230
231    /* Set CONTEXIDR to 0, kernel ASID. */
232    mcr     p15, 0, r4, c13, c0, 1
233
234    /* Set up initial sp. */
235    LOAD_ADDR(sp, intstack_top)
236
237    /* Boot to ARM init. */
238    bx      lr
239
240/**
241 * sleep_test
242 */
243EnterARM(sleep_test)
244    /* Get physical base. */
245    ldr     r8, [r1, BOOT_ARGS_PHYSBASE]
246    ldr     r9, [r1, BOOT_ARGS_VIRTBASE]
247    ldr     r4, [r1, BOOT_ARGS_TOP_OF_KERNEL]
248
249    /* Set new page tables. (kernel bootstrap page table) */
250    orr     r6, r4, #0x18
251    mcr     p15, 0, r6, c2, c0, 0
252    mcr     p15, 0, r6, c2, c0, 1
253
254    sub     r4, r4, r8
255    add     r4, r4, r9
256
257    /* Create boot page table entry for trampoline. */
258    ldr     r10, [r1, BOOT_ARGS_MEMSIZE]
259    mov     r6, #0xE
260    mov     r1, #1
261    orr     r6, r6, r1, lsl#10
262    add     r5, r4, r8, lsr#18
263    orr     r11, r8, r6
264    str     r11, [r5]
265
266    /* Clear unified TLB */
267    mov     r1, #0
268    mcr     p15, 0, r1, c8, c7, 0
269#ifdef _ARM_ARCH_7
270    isb     sy
271#endif
272
273    /* Clear MMU-EN bit in SCTLR */
274    mrc     p15, 0, r11, c1, c0, 0
275    bic     r11, r11, #1
276#ifdef _ARM_ARCH_7
277    isb     sy
278#endif
279
280    /* Jump to physical trampoline. */
281    adr     r4, sleep_tramp
282    sub     r4, r4, r9
283    add     r4, r4, r8
284    bx      r4
285sleep_tramp:
286    cpsid   if, #0x13
287    mcr     p15, 0, r11, c1, c0, 0
288    nop
289    nop
290    nop
291    nop
292    bx      r2
293
294
295/*
296 * Initial stack
297 */
298.data
299.align 4
300.globl _intstack_top
301.globl _intstack
302_intstack:
303.space (8192), 0
304_intstack_top:
305
306/*
307 * ARM SMP stack.
308 */
309.globl _debstack_top
310.globl _debstack
311_debstack:
312.space (8192), 0
313_debstack_top:
314
315LOAD_ADDR_GEN_DEF(ExceptionVectorsBase)
316LOAD_ADDR_GEN_DEF(arm_init)
317LOAD_ADDR_GEN_DEF(intstack_top)
318LOAD_ADDR_GEN_DEF(intstack)
319LOAD_ADDR_GEN_DEF(sectionOffset)
320