locore-v4.S revision 314530
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte-v4.h>
41
42__FBSDID("$FreeBSD: stable/11/sys/arm/arm/locore-v4.S 314530 2017-03-02 01:18:46Z ian $");
43
44/* 2K initial stack is plenty, it is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	2048
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for libkvm, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 *
59 * These are being phased out. Newer copies of libkvm don't need these
60 * values as the information is added to the core file by inspecting
61 * the running kernel.
62 */
63	.text
64	.align	2
65#ifdef PHYSADDR
66.globl kernbase
67.set kernbase,KERNBASE
68.globl physaddr
69.set physaddr,PHYSADDR
70#endif
71
72/*
73 * On entry for FreeBSD boot ABI:
74 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
75 *	r1 - if (r0 == 0) then metadata pointer
76 * On entry for Linux boot ABI:
77 *	r0 - 0
78 *	r1 - machine type (passed as arg2 to initarm)
79 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
80 *
81 * For both types of boot we gather up the args, put them in a struct arm_boot_params
82 * structure and pass that to initarm.
83 */
84	.globl	btext
85btext:
86ASENTRY_NP(_start)
87	STOP_UNWINDING		/* Can't unwind into the bootloader! */
88
89	mov	r9, r0		/* 0 or boot mode from boot2 */
90	mov	r8, r1		/* Save Machine type */
91	mov	ip, r2		/* Save meta data */
92	mov	fp, r3		/* Future expansion */
93
94	/* Make sure interrupts are disabled. */
95	mrs	r7, cpsr
96	orr	r7, r7, #(PSR_I | PSR_F)
97	msr	cpsr_c, r7
98
99#if defined (FLASHADDR) && defined(LOADERRAMADDR)
100/*
101 * Sanity check the configuration.
102 * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
103 * ARMv4 and ARMv5 make assumptions on where they are loaded.
104 * TODO: Fix the ARMv4/v5 case.
105 */
106#ifndef PHYSADDR
107#error PHYSADDR must be defined for this configuration
108#endif
109
110	/* Check if we're running from flash. */
111	ldr	r7, =FLASHADDR
112	/*
113	 * If we're running with MMU disabled, test against the
114	 * physical address instead.
115	 */
116	mrc	CP15_SCTLR(r2)
117	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
118	ldreq	r6, =PHYSADDR
119	ldrne	r6, =LOADERRAMADDR
120	cmp	r7, r6
121	bls 	flash_lower
122	cmp	r7, pc
123	bhi	from_ram
124	b	do_copy
125
126flash_lower:
127	cmp	r6, pc
128	bls	from_ram
129do_copy:
130	ldr	r7, =KERNBASE
131	adr	r1, _start
132	ldr	r0, Lreal_start
133	ldr	r2, Lend
134	sub	r2, r2, r0
135	sub	r0, r0, r7
136	add	r0, r0, r6
137	mov	r4, r0
138	bl	memcpy
139	ldr	r0, Lram_offset
140	add	pc, r4, r0
141Lram_offset:	.word from_ram-_C_LABEL(_start)
142from_ram:
143	nop
144#endif
145
146disable_mmu:
147	/* Disable MMU for a while */
148	mrc	CP15_SCTLR(r2)
149	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
150	    CPU_CONTROL_WBUF_ENABLE)
151	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
152	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
153	mcr	CP15_SCTLR(r2)
154
155	nop
156	nop
157	nop
158	CPWAIT(r0)
159
160Lunmapped:
161	/*
162	 * Build page table from scratch.
163	 */
164
165	/*
166	 * Figure out the physical address we're loaded at by assuming this
167	 * entry point code is in the first L1 section and so if we clear the
168	 * offset bits of the pc that will give us the section-aligned load
169	 * address, which remains in r5 throughout all the following code.
170	 */
171	ldr	r2, =(L1_S_OFFSET)
172	bic	r5, pc, r2
173
174	/* Find the delta between VA and PA, result stays in r0 throughout. */
175	adr	r0, Lpagetable
176	bl	translate_va_to_pa
177
178	/*
179	 * First map the entire 4GB address space as VA=PA.  It's mapped as
180	 * normal (cached) memory because it's for things like accessing the
181	 * parameters passed in from the bootloader, which might be at any
182	 * physical address, different for every platform.
183	 */
184	mov	r1, #0
185	mov	r2, #0
186	mov	r3, #4096
187	bl	build_pagetables
188
189	/*
190	 * Next we do 64MiB starting at the physical load address, mapped to
191	 * the VA the kernel is linked for.
192	 */
193	mov	r1, r5
194	ldr	r2, =(KERNVIRTADDR)
195	mov	r3, #64
196	bl	build_pagetables
197
198	/* Create a device mapping for early_printf if specified. */
199#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
200	ldr	r1, =SOCDEV_PA
201	ldr	r2, =SOCDEV_VA
202	mov	r3, #1
203	bl	build_device_pagetables
204#endif
205
206	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
207	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
208
209	/* Set the Domain Access register.  Very important! */
210	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
211	mcr	p15, 0, r0, c3, c0, 0
212	/*
213	 * Enable MMU.
214	 */
215	mrc	CP15_SCTLR(r0)
216	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
217	mcr	CP15_SCTLR(r0)
218	nop
219	nop
220	nop
221	CPWAIT(r0)
222
223	/* Transition the PC from physical to virtual addressing. */
224	ldr	pc,=mmu_done
225
226mmu_done:
227	nop
228	adr	r1, .Lstart
229	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
230	sub	r2, r2, r1		/* get zero init data */
231	mov	r3, #0
232.L1:
233	str	r3, [r1], #0x0004	/* get zero init data */
234	subs	r2, r2, #4
235	bgt	.L1
236
237virt_done:
238	mov	r1, #28			/* loader info size is 28 bytes also second arg */
239	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
240	mov	r0, sp			/* loader info pointer is first arg */
241	bic	sp, sp, #7		/* align stack to 8 bytes */
242	str	r1, [r0]		/* Store length of loader info */
243	str	r9, [r0, #4]		/* Store r0 from boot loader */
244	str	r8, [r0, #8]		/* Store r1 from boot loader */
245	str	ip, [r0, #12]		/* store r2 from boot loader */
246	str	fp, [r0, #16]		/* store r3 from boot loader */
247	str	r5, [r0, #20]		/* store the physical address */
248	adr	r4, Lpagetable		/* load the pagetable address */
249	ldr	r5, [r4, #4]
250	str	r5, [r0, #24]		/* store the pagetable address */
251	mov	fp, #0			/* trace back starts here */
252	bl	_C_LABEL(initarm)	/* Off we go */
253
254	/* init arm will return the new stack pointer. */
255	mov	sp, r0
256
257	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
258
259	adr	r0, .Lmainreturned
260	b	_C_LABEL(panic)
261	/* NOTREACHED */
262END(_start)
263
264#define VA_TO_PA_POINTER(name, table)	 \
265name:					;\
266	.word	.			;\
267	.word	table
268
269/*
270 * Returns the physical address of a magic va to pa pointer.
271 * r0     - The pagetable data pointer. This must be built using the
272 *          VA_TO_PA_POINTER macro.
273 *          e.g.
274 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
275 *            ...
276 *            adr  r0, Lpagetable
277 *            bl   translate_va_to_pa
278 *            r0 will now contain the physical address of pagetable
279 * r1, r2 - Trashed
280 */
281translate_va_to_pa:
282	ldr	r1, [r0]
283	sub	r2, r1, r0
284	/* At this point: r2 = VA - PA */
285
286	/*
287	 * Find the physical address of the table. After these two
288	 * instructions:
289	 * r1 = va(pagetable)
290	 *
291	 * r0 = va(pagetable) - (VA - PA)
292	 *    = va(pagetable) - VA + PA
293	 *    = pa(pagetable)
294	 */
295	ldr	r1, [r0, #4]
296	sub	r0, r1, r2
297	RET
298
299/*
300 * Builds the page table
301 * r0 - The table base address
302 * r1 - The physical address (trashed)
303 * r2 - The virtual address (trashed)
304 * r3 - The number of 1MiB sections
305 * r4 - Trashed
306 *
307 * Addresses must be 1MiB aligned
308 */
309build_device_pagetables:
310	ldr	r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
311	b	1f
312build_pagetables:
313	/* Set the required page attributed */
314	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
3151:
316	orr	r1, r4
317
318	/* Move the virtual address to the correct bit location */
319	lsr	r2, #(L1_S_SHIFT - 2)
320
321	mov	r4, r3
3222:
323	str	r1, [r0, r2]
324	add	r2, r2, #4
325	add	r1, r1, #(L1_S_SIZE)
326	adds	r4, r4, #-1
327	bhi	2b
328
329	RET
330
331VA_TO_PA_POINTER(Lpagetable, pagetable)
332
333Lreal_start:
334	.word	_start
335Lend:
336	.word	_edata
337
338.Lstart:
339	.word	_edata
340	.word	_ebss
341	.word	svcstk + INIT_ARM_STACK_SIZE
342
343.Lvirt_done:
344	.word	virt_done
345
346.Lmainreturned:
347	.asciz	"main() returned"
348	.align	2
349
350	.bss
351svcstk:
352	.space	INIT_ARM_STACK_SIZE
353
354/*
355 * Memory for the initial pagetable. We are unable to place this in
356 * the bss as this will be cleared after the table is loaded.
357 */
358	.section ".init_pagetable"
359	.align	14 /* 16KiB aligned */
360pagetable:
361	.space	L1_TABLE_SIZE
362
363	.text
364	.align	2
365
366.Lcpufuncs:
367	.word	_C_LABEL(cpufuncs)
368
369ENTRY_NP(cpu_halt)
370	mrs	r2, cpsr
371	bic	r2, r2, #(PSR_MODE)
372	orr	r2, r2, #(PSR_SVC32_MODE)
373	orr	r2, r2, #(PSR_I | PSR_F)
374	msr	cpsr_fsxc, r2
375
376	ldr	r4, .Lcpu_reset_address
377	ldr	r4, [r4]
378
379	ldr	r0, .Lcpufuncs
380	mov	lr, pc
381	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
382	mov	lr, pc
383	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
384
385	/*
386	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
387	 * necessary.
388	 */
389
390	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
391	ldr	r1, [r1]
392	cmp	r1, #0
393	mov	r2, #0
394
395	/*
396	 * MMU & IDC off, 32 bit program & data space
397	 * Hurl ourselves into the ROM
398	 */
399	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
400	mcr	CP15_SCTLR(r0)
401	mcrne	p15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
402	mov	pc, r4
403
404	/*
405	 * _cpu_reset_address contains the address to branch to, to complete
406	 * the cpu reset after turning the MMU off
407	 * This variable is provided by the hardware specific code
408	 */
409.Lcpu_reset_address:
410	.word	_C_LABEL(cpu_reset_address)
411
412	/*
413	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
414	 * v4 MMU disable instruction needs executing... it is an illegal instruction
415	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
416	 * instruction / data-abort / reset loop.
417	 */
418.Lcpu_reset_needs_v4_MMU_disable:
419	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
420END(cpu_halt)
421
422
423/*
424 * setjump + longjmp
425 */
426ENTRY(setjmp)
427	stmia	r0, {r4-r14}
428	mov	r0, #0x00000000
429	RET
430END(setjmp)
431
432ENTRY(longjmp)
433	ldmia	r0, {r4-r14}
434	mov	r0, #0x00000001
435	RET
436END(longjmp)
437
438	.data
439	.global	_C_LABEL(esym)
440_C_LABEL(esym):	.word	_C_LABEL(end)
441
442ENTRY_NP(abort)
443	b	_C_LABEL(abort)
444END(abort)
445
446ENTRY_NP(sigcode)
447	mov	r0, sp
448	add	r0, r0, #SIGF_UC
449
450	/*
451	 * Call the sigreturn system call.
452	 *
453	 * We have to load r7 manually rather than using
454	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
455	 * correct. Using the alternative places esigcode at the address
456	 * of the data rather than the address one past the data.
457	 */
458
459	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
460	swi	SYS_sigreturn
461
462	/* Well if that failed we better exit quick ! */
463
464	ldr	r7, [pc, #8]	/* Load SYS_exit */
465	swi	SYS_exit
466
467	/* Branch back to retry SYS_sigreturn */
468	b	. - 16
469END(sigcode)
470	.word	SYS_sigreturn
471	.word	SYS_exit
472
473	.align	2
474	.global _C_LABEL(esigcode)
475		_C_LABEL(esigcode):
476
477	.data
478	.global szsigcode
479szsigcode:
480	.long esigcode-sigcode
481
482/* End of locore.S */
483