1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte-v4.h>
41
42__FBSDID("$FreeBSD: stable/11/sys/arm/arm/locore-v4.S 331890 2018-04-02 22:02:49Z gonzo $");
43
44/* 2K initial stack is plenty, it is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	2048
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for libkvm, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 *
59 * These are being phased out. Newer copies of libkvm don't need these
60 * values as the information is added to the core file by inspecting
61 * the running kernel.
62 */
63	.text
64	.align	2
65#ifdef PHYSADDR
66.globl kernbase
67.set kernbase,KERNBASE
68.globl physaddr
69.set physaddr,PHYSADDR
70#endif
71
72/*
73 * On entry for FreeBSD boot ABI:
74 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
75 *	r1 - if (r0 == 0) then metadata pointer
76 * On entry for Linux boot ABI:
77 *	r0 - 0
78 *	r1 - machine type (passed as arg2 to initarm)
79 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
80 *
81 * For both types of boot we gather up the args, put them in a struct arm_boot_params
82 * structure and pass that to initarm.
83 */
84	.globl	btext
85btext:
86ASENTRY_NP(_start)
87	STOP_UNWINDING		/* Can't unwind into the bootloader! */
88
89	mov	r9, r0		/* 0 or boot mode from boot2 */
90	mov	r8, r1		/* Save Machine type */
91	mov	ip, r2		/* Save meta data */
92	mov	fp, r3		/* Future expansion */
93
94	/* Make sure interrupts are disabled. */
95	mrs	r7, cpsr
96	orr	r7, r7, #(PSR_I | PSR_F)
97	msr	cpsr_c, r7
98
99#if defined (FLASHADDR) && defined(LOADERRAMADDR)
100/*
101 * Sanity check the configuration.
102 * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
103 * ARMv4 and ARMv5 make assumptions on where they are loaded.
104 * TODO: Fix the ARMv4/v5 case.
105 */
106#ifndef PHYSADDR
107#error PHYSADDR must be defined for this configuration
108#endif
109
110	/* Check if we're running from flash. */
111	ldr	r7, =FLASHADDR
112	/*
113	 * If we're running with MMU disabled, test against the
114	 * physical address instead.
115	 */
116	mrc	CP15_SCTLR(r2)
117	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
118	ldreq	r6, =PHYSADDR
119	ldrne	r6, =LOADERRAMADDR
120	cmp	r7, r6
121	bls 	flash_lower
122	cmp	r7, pc
123	bhi	from_ram
124	b	do_copy
125
126flash_lower:
127	cmp	r6, pc
128	bls	from_ram
129do_copy:
130	ldr	r7, =KERNBASE
131	adr	r1, _start
132	ldr	r0, Lreal_start
133	ldr	r2, Lend
134	sub	r2, r2, r0
135	sub	r0, r0, r7
136	add	r0, r0, r6
137	mov	r4, r0
138	bl	memcpy
139	ldr	r0, Lram_offset
140	add	pc, r4, r0
141Lram_offset:	.word from_ram-_C_LABEL(_start)
142from_ram:
143	nop
144#endif
145
146disable_mmu:
147	/* Disable MMU for a while */
148	mrc	CP15_SCTLR(r2)
149	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
150	    CPU_CONTROL_WBUF_ENABLE)
151	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
152	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
153	mcr	CP15_SCTLR(r2)
154
155	nop
156	nop
157	nop
158	CPWAIT(r0)
159
160Lunmapped:
161	/*
162	 * Build page table from scratch.
163	 */
164
165	/*
166	 * Figure out the physical address we're loaded at by assuming this
167	 * entry point code is in the first L1 section and so if we clear the
168	 * offset bits of the pc that will give us the section-aligned load
169	 * address, which remains in r5 throughout all the following code.
170	 */
171	ldr	r2, =(L1_S_OFFSET)
172	bic	r5, pc, r2
173
174	/* Find the delta between VA and PA, result stays in r0 throughout. */
175	adr	r0, Lpagetable
176	bl	translate_va_to_pa
177
178	/*
179	 * First map the entire 4GB address space as VA=PA.  It's mapped as
180	 * normal (cached) memory because it's for things like accessing the
181	 * parameters passed in from the bootloader, which might be at any
182	 * physical address, different for every platform.
183	 */
184	mov	r1, #0
185	mov	r2, #0
186	mov	r3, #4096
187	bl	build_pagetables
188
189	/*
190	 * Next we do 64MiB starting at the physical load address, mapped to
191	 * the VA the kernel is linked for.
192	 */
193	mov	r1, r5
194	ldr	r2, =(KERNVIRTADDR)
195	mov	r3, #64
196	bl	build_pagetables
197#if defined(PHYSADDR) && (KERNVIRTADDR != KERNBASE)
198/*
199 * If the kernel wasn't loaded at the beginning of the ram, map the memory
200 * before the kernel too, as some ports use that for pagetables, stack, etc...
201 */
202	ldr	r1, =PHYSADDR
203	ldr 	r2, =KERNBASE
204	ldr	r3, =((KERNVIRTADDR - KERNBASE) / L1_S_SIZE)
205	bl	build_pagetables
206#endif
207
208	/* Create a device mapping for early_printf if specified. */
209#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
210	ldr	r1, =SOCDEV_PA
211	ldr	r2, =SOCDEV_VA
212	mov	r3, #1
213	bl	build_device_pagetables
214#endif
215
216	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
217	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
218
219	/* Set the Domain Access register.  Very important! */
220	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
221	mcr	p15, 0, r0, c3, c0, 0
222	/*
223	 * Enable MMU.
224	 */
225	mrc	CP15_SCTLR(r0)
226	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
227	mcr	CP15_SCTLR(r0)
228	nop
229	nop
230	nop
231	CPWAIT(r0)
232
233	/* Transition the PC from physical to virtual addressing. */
234	ldr	pc,=mmu_done
235
236mmu_done:
237	nop
238	adr	r1, .Lstart
239	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
240	sub	r2, r2, r1		/* get zero init data */
241	mov	r3, #0
242.L1:
243	str	r3, [r1], #0x0004	/* get zero init data */
244	subs	r2, r2, #4
245	bgt	.L1
246
247virt_done:
248	mov	r1, #28			/* loader info size is 28 bytes also second arg */
249	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
250	mov	r0, sp			/* loader info pointer is first arg */
251	bic	sp, sp, #7		/* align stack to 8 bytes */
252	str	r1, [r0]		/* Store length of loader info */
253	str	r9, [r0, #4]		/* Store r0 from boot loader */
254	str	r8, [r0, #8]		/* Store r1 from boot loader */
255	str	ip, [r0, #12]		/* store r2 from boot loader */
256	str	fp, [r0, #16]		/* store r3 from boot loader */
257	str	r5, [r0, #20]		/* store the physical address */
258	adr	r4, Lpagetable		/* load the pagetable address */
259	ldr	r5, [r4, #4]
260	str	r5, [r0, #24]		/* store the pagetable address */
261	mov	fp, #0			/* trace back starts here */
262	bl	_C_LABEL(initarm)	/* Off we go */
263
264	/* init arm will return the new stack pointer. */
265	mov	sp, r0
266
267	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
268
269	adr	r0, .Lmainreturned
270	b	_C_LABEL(panic)
271	/* NOTREACHED */
272END(_start)
273
274#define VA_TO_PA_POINTER(name, table)	 \
275name:					;\
276	.word	.			;\
277	.word	table
278
279/*
280 * Returns the physical address of a magic va to pa pointer.
281 * r0     - The pagetable data pointer. This must be built using the
282 *          VA_TO_PA_POINTER macro.
283 *          e.g.
284 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
285 *            ...
286 *            adr  r0, Lpagetable
287 *            bl   translate_va_to_pa
288 *            r0 will now contain the physical address of pagetable
289 * r1, r2 - Trashed
290 */
291translate_va_to_pa:
292	ldr	r1, [r0]
293	sub	r2, r1, r0
294	/* At this point: r2 = VA - PA */
295
296	/*
297	 * Find the physical address of the table. After these two
298	 * instructions:
299	 * r1 = va(pagetable)
300	 *
301	 * r0 = va(pagetable) - (VA - PA)
302	 *    = va(pagetable) - VA + PA
303	 *    = pa(pagetable)
304	 */
305	ldr	r1, [r0, #4]
306	sub	r0, r1, r2
307	RET
308
309/*
310 * Builds the page table
311 * r0 - The table base address
312 * r1 - The physical address (trashed)
313 * r2 - The virtual address (trashed)
314 * r3 - The number of 1MiB sections
315 * r4 - Trashed
316 *
317 * Addresses must be 1MiB aligned
318 */
319build_device_pagetables:
320	ldr	r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
321	b	1f
322build_pagetables:
323	/* Set the required page attributed */
324	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
3251:
326	orr	r1, r4
327
328	/* Move the virtual address to the correct bit location */
329	lsr	r2, #(L1_S_SHIFT - 2)
330
331	mov	r4, r3
3322:
333	str	r1, [r0, r2]
334	add	r2, r2, #4
335	add	r1, r1, #(L1_S_SIZE)
336	adds	r4, r4, #-1
337	bhi	2b
338
339	RET
340
341VA_TO_PA_POINTER(Lpagetable, pagetable)
342
343Lreal_start:
344	.word	_start
345Lend:
346	.word	_edata
347
348.Lstart:
349	.word	_edata
350	.word	_ebss
351	.word	svcstk + INIT_ARM_STACK_SIZE
352
353.Lvirt_done:
354	.word	virt_done
355
356.Lmainreturned:
357	.asciz	"main() returned"
358	.align	2
359
360	.bss
361svcstk:
362	.space	INIT_ARM_STACK_SIZE
363
364/*
365 * Memory for the initial pagetable. We are unable to place this in
366 * the bss as this will be cleared after the table is loaded.
367 */
368	.section ".init_pagetable", "aw", %nobits
369	.align	14 /* 16KiB aligned */
370pagetable:
371	.space	L1_TABLE_SIZE
372
373	.text
374	.align	2
375
376.Lcpufuncs:
377	.word	_C_LABEL(cpufuncs)
378
379ENTRY_NP(cpu_halt)
380	mrs	r2, cpsr
381	bic	r2, r2, #(PSR_MODE)
382	orr	r2, r2, #(PSR_SVC32_MODE)
383	orr	r2, r2, #(PSR_I | PSR_F)
384	msr	cpsr_fsxc, r2
385
386	ldr	r4, .Lcpu_reset_address
387	ldr	r4, [r4]
388
389	ldr	r0, .Lcpufuncs
390	mov	lr, pc
391	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
392	mov	lr, pc
393	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
394
395	/*
396	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
397	 * necessary.
398	 */
399
400	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
401	ldr	r1, [r1]
402	cmp	r1, #0
403	mov	r2, #0
404
405	/*
406	 * MMU & IDC off, 32 bit program & data space
407	 * Hurl ourselves into the ROM
408	 */
409	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
410	mcr	CP15_SCTLR(r0)
411	mcrne	p15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
412	mov	pc, r4
413
414	/*
415	 * _cpu_reset_address contains the address to branch to, to complete
416	 * the cpu reset after turning the MMU off
417	 * This variable is provided by the hardware specific code
418	 */
419.Lcpu_reset_address:
420	.word	_C_LABEL(cpu_reset_address)
421
422	/*
423	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
424	 * v4 MMU disable instruction needs executing... it is an illegal instruction
425	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
426	 * instruction / data-abort / reset loop.
427	 */
428.Lcpu_reset_needs_v4_MMU_disable:
429	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
430END(cpu_halt)
431
432
433/*
434 * setjump + longjmp
435 */
436ENTRY(setjmp)
437	stmia	r0, {r4-r14}
438	mov	r0, #0x00000000
439	RET
440END(setjmp)
441
442ENTRY(longjmp)
443	ldmia	r0, {r4-r14}
444	mov	r0, #0x00000001
445	RET
446END(longjmp)
447
448	.data
449	.global	_C_LABEL(esym)
450_C_LABEL(esym):	.word	_C_LABEL(end)
451
452ENTRY_NP(abort)
453	b	_C_LABEL(abort)
454END(abort)
455
456ENTRY_NP(sigcode)
457	mov	r0, sp
458	add	r0, r0, #SIGF_UC
459
460	/*
461	 * Call the sigreturn system call.
462	 *
463	 * We have to load r7 manually rather than using
464	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
465	 * correct. Using the alternative places esigcode at the address
466	 * of the data rather than the address one past the data.
467	 */
468
469	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
470	swi	SYS_sigreturn
471
472	/* Well if that failed we better exit quick ! */
473
474	ldr	r7, [pc, #8]	/* Load SYS_exit */
475	swi	SYS_exit
476
477	/* Branch back to retry SYS_sigreturn */
478	b	. - 16
479END(sigcode)
480	.word	SYS_sigreturn
481	.word	SYS_exit
482
483	.align	2
484	.global _C_LABEL(esigcode)
485		_C_LABEL(esigcode):
486
487	.data
488	.global szsigcode
489szsigcode:
490	.long esigcode-sigcode
491
492/* End of locore.S */
493