locore-v4.S revision 300533
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/cpuconf.h>
41#include <machine/pte-v4.h>
42
43__FBSDID("$FreeBSD: head/sys/arm/arm/locore-v4.S 300533 2016-05-23 20:07:17Z ian $");
44
45/* 2K initial stack is plenty, it is only used by initarm() */
46#define INIT_ARM_STACK_SIZE	2048
47
48#define	CPWAIT_BRANCH							 \
49	sub	pc, pc, #4
50
51#define	CPWAIT(tmp)							 \
52	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
53	mov	tmp, tmp		/* wait for it to complete */	;\
54	CPWAIT_BRANCH			/* branch to next insn */
55
56/*
57 * This is for libkvm, and should be the address of the beginning
58 * of the kernel text segment (not necessarily the same as kernbase).
59 *
60 * These are being phased out. Newer copies of libkvm don't need these
61 * values as the information is added to the core file by inspecting
62 * the running kernel.
63 */
64	.text
65	.align	2
66#ifdef PHYSADDR
67.globl kernbase
68.set kernbase,KERNBASE
69.globl physaddr
70.set physaddr,PHYSADDR
71#endif
72
73/*
74 * On entry for FreeBSD boot ABI:
75 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
76 *	r1 - if (r0 == 0) then metadata pointer
77 * On entry for Linux boot ABI:
78 *	r0 - 0
79 *	r1 - machine type (passed as arg2 to initarm)
80 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
81 *
82 * For both types of boot we gather up the args, put them in a struct arm_boot_params
83 * structure and pass that to initarm.
84 */
85	.globl	btext
86btext:
87ASENTRY_NP(_start)
88	STOP_UNWINDING		/* Can't unwind into the bootloader! */
89
90	mov	r9, r0		/* 0 or boot mode from boot2 */
91	mov	r8, r1		/* Save Machine type */
92	mov	ip, r2		/* Save meta data */
93	mov	fp, r3		/* Future expansion */
94
95	/* Make sure interrupts are disabled. */
96	mrs	r7, cpsr
97	orr	r7, r7, #(PSR_I | PSR_F)
98	msr	cpsr_c, r7
99
100#if defined (FLASHADDR) && defined(LOADERRAMADDR)
101/*
102 * Sanity check the configuration.
103 * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
104 * ARMv4 and ARMv5 make assumptions on where they are loaded.
105 * TODO: Fix the ARMv4/v5 case.
106 */
107#ifndef PHYSADDR
108#error PHYSADDR must be defined for this configuration
109#endif
110
111	/* Check if we're running from flash. */
112	ldr	r7, =FLASHADDR
113	/*
114	 * If we're running with MMU disabled, test against the
115	 * physical address instead.
116	 */
117	mrc	CP15_SCTLR(r2)
118	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
119	ldreq	r6, =PHYSADDR
120	ldrne	r6, =LOADERRAMADDR
121	cmp	r7, r6
122	bls 	flash_lower
123	cmp	r7, pc
124	bhi	from_ram
125	b	do_copy
126
127flash_lower:
128	cmp	r6, pc
129	bls	from_ram
130do_copy:
131	ldr	r7, =KERNBASE
132	adr	r1, _start
133	ldr	r0, Lreal_start
134	ldr	r2, Lend
135	sub	r2, r2, r0
136	sub	r0, r0, r7
137	add	r0, r0, r6
138	mov	r4, r0
139	bl	memcpy
140	ldr	r0, Lram_offset
141	add	pc, r4, r0
142Lram_offset:	.word from_ram-_C_LABEL(_start)
143from_ram:
144	nop
145#endif
146
147disable_mmu:
148	/* Disable MMU for a while */
149	mrc	CP15_SCTLR(r2)
150	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
151	    CPU_CONTROL_WBUF_ENABLE)
152	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
153	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
154	mcr	CP15_SCTLR(r2)
155
156	nop
157	nop
158	nop
159	CPWAIT(r0)
160
161Lunmapped:
162	/*
163	 * Build page table from scratch.
164	 */
165
166	/*
167	 * Figure out the physical address we're loaded at by assuming this
168	 * entry point code is in the first L1 section and so if we clear the
169	 * offset bits of the pc that will give us the section-aligned load
170	 * address, which remains in r5 throughout all the following code.
171	 */
172	ldr	r2, =(L1_S_OFFSET)
173	bic	r5, pc, r2
174
175	/* Find the delta between VA and PA, result stays in r0 throughout. */
176	adr	r0, Lpagetable
177	bl	translate_va_to_pa
178
179	/*
180	 * First map the entire 4GB address space as VA=PA.  It's mapped as
181	 * normal (cached) memory because it's for things like accessing the
182	 * parameters passed in from the bootloader, which might be at any
183	 * physical address, different for every platform.
184	 */
185	mov	r1, #0
186	mov	r2, #0
187	mov	r3, #4096
188	bl	build_pagetables
189
190	/*
191	 * Next we do 64MiB starting at the physical load address, mapped to
192	 * the VA the kernel is linked for.
193	 */
194	mov	r1, r5
195	ldr	r2, =(KERNVIRTADDR)
196	mov	r3, #64
197	bl	build_pagetables
198
199	/* Create a device mapping for early_printf if specified. */
200#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
201	ldr	r1, =SOCDEV_PA
202	ldr	r2, =SOCDEV_VA
203	mov	r3, #1
204	bl	build_device_pagetables
205#endif
206
207	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
208	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
209
210	/* Set the Domain Access register.  Very important! */
211	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
212	mcr	p15, 0, r0, c3, c0, 0
213	/*
214	 * Enable MMU.
215	 */
216	mrc	CP15_SCTLR(r0)
217	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
218	mcr	CP15_SCTLR(r0)
219	nop
220	nop
221	nop
222	CPWAIT(r0)
223
224	/* Transition the PC from physical to virtual addressing. */
225	ldr	pc,=mmu_done
226
227mmu_done:
228	nop
229	adr	r1, .Lstart
230	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
231	sub	r2, r2, r1		/* get zero init data */
232	mov	r3, #0
233.L1:
234	str	r3, [r1], #0x0004	/* get zero init data */
235	subs	r2, r2, #4
236	bgt	.L1
237
238virt_done:
239	mov	r1, #28			/* loader info size is 28 bytes also second arg */
240	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
241	mov	r0, sp			/* loader info pointer is first arg */
242	bic	sp, sp, #7		/* align stack to 8 bytes */
243	str	r1, [r0]		/* Store length of loader info */
244	str	r9, [r0, #4]		/* Store r0 from boot loader */
245	str	r8, [r0, #8]		/* Store r1 from boot loader */
246	str	ip, [r0, #12]		/* store r2 from boot loader */
247	str	fp, [r0, #16]		/* store r3 from boot loader */
248	str	r5, [r0, #20]		/* store the physical address */
249	adr	r4, Lpagetable		/* load the pagetable address */
250	ldr	r5, [r4, #4]
251	str	r5, [r0, #24]		/* store the pagetable address */
252	mov	fp, #0			/* trace back starts here */
253	bl	_C_LABEL(initarm)	/* Off we go */
254
255	/* init arm will return the new stack pointer. */
256	mov	sp, r0
257
258	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
259
260	adr	r0, .Lmainreturned
261	b	_C_LABEL(panic)
262	/* NOTREACHED */
263END(_start)
264
265#define VA_TO_PA_POINTER(name, table)	 \
266name:					;\
267	.word	.			;\
268	.word	table
269
270/*
271 * Returns the physical address of a magic va to pa pointer.
272 * r0     - The pagetable data pointer. This must be built using the
273 *          VA_TO_PA_POINTER macro.
274 *          e.g.
275 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
276 *            ...
277 *            adr  r0, Lpagetable
278 *            bl   translate_va_to_pa
279 *            r0 will now contain the physical address of pagetable
280 * r1, r2 - Trashed
281 */
282translate_va_to_pa:
283	ldr	r1, [r0]
284	sub	r2, r1, r0
285	/* At this point: r2 = VA - PA */
286
287	/*
288	 * Find the physical address of the table. After these two
289	 * instructions:
290	 * r1 = va(pagetable)
291	 *
292	 * r0 = va(pagetable) - (VA - PA)
293	 *    = va(pagetable) - VA + PA
294	 *    = pa(pagetable)
295	 */
296	ldr	r1, [r0, #4]
297	sub	r0, r1, r2
298	RET
299
300/*
301 * Builds the page table
302 * r0 - The table base address
303 * r1 - The physical address (trashed)
304 * r2 - The virtual address (trashed)
305 * r3 - The number of 1MiB sections
306 * r4 - Trashed
307 *
308 * Addresses must be 1MiB aligned
309 */
310build_device_pagetables:
311	ldr	r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
312	b	1f
313build_pagetables:
314	/* Set the required page attributed */
315	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
3161:
317	orr	r1, r4
318
319	/* Move the virtual address to the correct bit location */
320	lsr	r2, #(L1_S_SHIFT - 2)
321
322	mov	r4, r3
3232:
324	str	r1, [r0, r2]
325	add	r2, r2, #4
326	add	r1, r1, #(L1_S_SIZE)
327	adds	r4, r4, #-1
328	bhi	2b
329
330	RET
331
332VA_TO_PA_POINTER(Lpagetable, pagetable)
333
334Lreal_start:
335	.word	_start
336Lend:
337	.word	_edata
338
339.Lstart:
340	.word	_edata
341	.word	_ebss
342	.word	svcstk + INIT_ARM_STACK_SIZE
343
344.Lvirt_done:
345	.word	virt_done
346
347.Lmainreturned:
348	.asciz	"main() returned"
349	.align	2
350
351	.bss
352svcstk:
353	.space	INIT_ARM_STACK_SIZE
354
355/*
356 * Memory for the initial pagetable. We are unable to place this in
357 * the bss as this will be cleared after the table is loaded.
358 */
359	.section ".init_pagetable"
360	.align	14 /* 16KiB aligned */
361pagetable:
362	.space	L1_TABLE_SIZE
363
364	.text
365	.align	2
366
367.Lcpufuncs:
368	.word	_C_LABEL(cpufuncs)
369
370ENTRY_NP(cpu_halt)
371	mrs	r2, cpsr
372	bic	r2, r2, #(PSR_MODE)
373	orr	r2, r2, #(PSR_SVC32_MODE)
374	orr	r2, r2, #(PSR_I | PSR_F)
375	msr	cpsr_fsxc, r2
376
377	ldr	r4, .Lcpu_reset_address
378	ldr	r4, [r4]
379
380	ldr	r0, .Lcpufuncs
381	mov	lr, pc
382	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
383	mov	lr, pc
384	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
385
386	/*
387	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
388	 * necessary.
389	 */
390
391	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
392	ldr	r1, [r1]
393	cmp	r1, #0
394	mov	r2, #0
395
396	/*
397	 * MMU & IDC off, 32 bit program & data space
398	 * Hurl ourselves into the ROM
399	 */
400	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
401	mcr	CP15_SCTLR(r0)
402	mcrne	p15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
403	mov	pc, r4
404
405	/*
406	 * _cpu_reset_address contains the address to branch to, to complete
407	 * the cpu reset after turning the MMU off
408	 * This variable is provided by the hardware specific code
409	 */
410.Lcpu_reset_address:
411	.word	_C_LABEL(cpu_reset_address)
412
413	/*
414	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
415	 * v4 MMU disable instruction needs executing... it is an illegal instruction
416	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
417	 * instruction / data-abort / reset loop.
418	 */
419.Lcpu_reset_needs_v4_MMU_disable:
420	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
421END(cpu_halt)
422
423
424/*
425 * setjump + longjmp
426 */
427ENTRY(setjmp)
428	stmia	r0, {r4-r14}
429	mov	r0, #0x00000000
430	RET
431END(setjmp)
432
433ENTRY(longjmp)
434	ldmia	r0, {r4-r14}
435	mov	r0, #0x00000001
436	RET
437END(longjmp)
438
439	.data
440	.global	_C_LABEL(esym)
441_C_LABEL(esym):	.word	_C_LABEL(end)
442
443ENTRY_NP(abort)
444	b	_C_LABEL(abort)
445END(abort)
446
447ENTRY_NP(sigcode)
448	mov	r0, sp
449	add	r0, r0, #SIGF_UC
450
451	/*
452	 * Call the sigreturn system call.
453	 *
454	 * We have to load r7 manually rather than using
455	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
456	 * correct. Using the alternative places esigcode at the address
457	 * of the data rather than the address one past the data.
458	 */
459
460	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
461	swi	SYS_sigreturn
462
463	/* Well if that failed we better exit quick ! */
464
465	ldr	r7, [pc, #8]	/* Load SYS_exit */
466	swi	SYS_exit
467
468	/* Branch back to retry SYS_sigreturn */
469	b	. - 16
470END(sigcode)
471	.word	SYS_sigreturn
472	.word	SYS_exit
473
474	.align	2
475	.global _C_LABEL(esigcode)
476		_C_LABEL(esigcode):
477
478	.data
479	.global szsigcode
480szsigcode:
481	.long esigcode-sigcode
482
483/* End of locore.S */
484