locore-v6.S revision 278702
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.s"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/cpuconf.h>
38#include <machine/pte.h>
39
40__FBSDID("$FreeBSD: stable/10/sys/arm/arm/locore-v6.S 278702 2015-02-13 20:23:06Z ian $");
41
42#ifndef	ARM_NEW_PMAP
43#define	PTE1_OFFSET	L1_S_OFFSET
44#define	PTE1_SHIFT	L1_S_SHIFT
45#define	PTE1_SIZE	L1_S_SIZE
46#endif
47
48/* A small statically-allocated stack used only during initarm() and AP startup. */
49#define	INIT_ARM_STACK_SIZE	2048
50
51	.text
52	.align	2
53
54/*
55 * On entry for	FreeBSD	boot ABI:
56 *	r0 - metadata pointer or 0 (boothowto on AT91's	boot2)
57 *	r1 - if	(r0 == 0) then metadata	pointer
58 * On entry for	Linux boot ABI:
59 *	r0 - 0
60 *	r1 - machine type (passed as arg2 to initarm)
61 *	r2 - Pointer to	a tagged list or dtb image (phys addr) (passed as arg1 initarm)
62 *
63 * For both types of boot we gather up the args, put them in a struct arm_boot_params
64 * structure and pass that to initarm.
65 */
66	.globl	btext
67btext:
68ASENTRY_NP(_start)
69	STOP_UNWINDING		/* Can't unwind	into the bootloader! */
70
71	/* Make	sure interrupts	are disabled. */
72	cpsid	ifa
73
74	mov	r8, r0		/* 0 or	boot mode from boot2 */
75	mov	r9, r1		/* Save	Machine	type */
76	mov	r10, r2		/* Save	meta data */
77	mov	r11, r3		/* Future expansion */
78
79	/*
80	 * Check whether data cache is enabled.  If it is, then we know
81	 * current tags are valid (not power-on garbage values) and there
82	 * might be dirty lines that need cleaning.  Disable cache to prevent
83	 * new lines being allocated, then call wbinv_poc_all to clean it.
84	 */
85	mrc	CP15_SCTLR(r7)
86	tst	r7, #CPU_CONTROL_DC_ENABLE
87	blne	dcache_wbinv_poc_all
88
89	/* ! Do not write to memory between wbinv and disabling cache ! */
90
91	/*
92	 * Now there are no dirty lines, but there may still be lines marked
93	 * valid.  Disable all caches and the MMU, and invalidate everything
94	 * before setting up new page tables and re-enabling the mmu.
95	 */
961:
97	bic	r7, #CPU_CONTROL_DC_ENABLE
98	bic	r7, #CPU_CONTROL_MMU_ENABLE
99	bic	r7, #CPU_CONTROL_IC_ENABLE
100	bic	r7, #CPU_CONTROL_UNAL_ENABLE
101	bic	r7, #CPU_CONTROL_BPRD_ENABLE
102	bic	r7, #CPU_CONTROL_SW_ENABLE
103	orr	r7, #CPU_CONTROL_AFLT_ENABLE
104	orr	r7, #CPU_CONTROL_VECRELOC
105	mcr	CP15_SCTLR(r7)
106	ISB
107	bl	dcache_inv_poc_all
108	mcr	CP15_ICIALLU
109	ISB
110
111	/*
112	 * Build page table from scratch.
113	 */
114
115	/* Calculate the physical address of the startup pagetable. */
116	adr	r0, Lpagetable
117	bl	translate_va_to_pa
118
119	/*
120	 * Map PA == VA
121	 */
122	/* Find	the start kernels load address */
123	adr	r5, _start
124	ldr	r2, =(PTE1_OFFSET)
125	bic	r5, r2
126	mov	r1, r5
127	mov	r2, r5
128	/* Map 64MiB, preserved	over calls to build_pagetables */
129	mov	r3, #64
130	bl	build_pagetables
131
132	/* Create the kernel map to jump to */
133	mov	r1, r5
134	ldr	r2, =(KERNVIRTADDR)
135	bl	build_pagetables
136
137#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
138	/* Create the custom map used for early_printf(). */
139	ldr	r1, =SOCDEV_PA
140	ldr	r2, =SOCDEV_VA
141	bl	build_pagetables
142#endif
143	bl	init_mmu
144
145	/* Switch to virtual addresses.	*/
146	ldr	pc, =1f
1471:
148
149	/* Setup stack,	clear BSS */
150	ldr	r1, =.Lstart
151	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
152	add	sp, sp,	#INIT_ARM_STACK_SIZE
153	sub	r2, r2,	r1		/* get zero init data */
154	mov	r3, #0
1552:
156	str	r3, [r1], #0x0004	/* get zero init data */
157	subs	r2, r2,	#4
158	bgt	2b
159
160	mov	r1, #28			/* loader info size is 28 bytes	also second arg	*/
161	subs	sp, sp,	r1		/* allocate arm_boot_params struct on stack */
162	mov	r0, sp			/* loader info pointer is first	arg */
163	bic	sp, sp,	#7		/* align stack to 8 bytes */
164	str	r1, [r0]		/* Store length	of loader info */
165	str	r8, [r0, #4]		/* Store r0 from boot loader */
166	str	r9, [r0, #8]		/* Store r1 from boot loader */
167	str	r10, [r0, #12]		/* store r2 from boot loader */
168	str	r11, [r0, #16]		/* store r3 from boot loader */
169	str	r5, [r0, #20]		/* store the physical address */
170	adr	r4, Lpagetable		/* load	the pagetable address */
171	ldr	r5, [r4, #4]
172	str	r5, [r0, #24]		/* store the pagetable address */
173	mov	fp, #0			/* trace back starts here */
174	bl	_C_LABEL(initarm)	/* Off we go */
175
176	/* init	arm will return	the new	stack pointer. */
177	mov	sp, r0
178
179	bl	_C_LABEL(mi_startup)	/* call	mi_startup()! */
180
181	ldr	r0, =.Lmainreturned
182	b	_C_LABEL(panic)
183	/* NOTREACHED */
184END(_start)
185
186#define VA_TO_PA_POINTER(name, table)	 \
187name:					;\
188	.word	.			;\
189	.word	table
190
191/*
192 * Returns the physical address of a magic va to pa pointer.
193 * r0     - The pagetable data pointer. This must be built using the
194 *          VA_TO_PA_POINTER macro.
195 *          e.g.
196 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
197 *            ...
198 *            adr  r0, Lpagetable
199 *            bl   translate_va_to_pa
200 *            r0 will now contain the physical address of pagetable
201 * r1, r2 - Trashed
202 */
203translate_va_to_pa:
204	ldr	r1, [r0]
205	sub	r2, r1, r0
206	/* At this point: r2 = VA - PA */
207
208	/*
209	 * Find the physical address of the table. After these two
210	 * instructions:
211	 * r1 = va(pagetable)
212	 *
213	 * r0 = va(pagetable) - (VA - PA)
214	 *    = va(pagetable) - VA + PA
215	 *    = pa(pagetable)
216	 */
217	ldr	r1, [r0, #4]
218	sub	r0, r1, r2
219	mov	pc, lr
220
221/*
222 * Init	MMU
223 * r0 -	The table base address
224 */
225
226ASENTRY_NP(init_mmu)
227
228	/* Setup TLB and MMU registers */
229	mcr	CP15_TTBR0(r0)		/* Set TTB */
230	mov	r0, #0
231	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
232
233	/* Set the Domain Access register */
234	mov	r0, #((DOMAIN_CLIENT <<	(PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
235	mcr	CP15_DACR(r0)
236
237#ifdef ARM_NEW_PMAP
238	/*
239	 * Set TEX remap registers
240	 *  - All is set to uncacheable memory
241	 */
242	ldr	r0, =0xAAAAA
243	mrc	CP15_PRRR(r0)
244	mov	r0, #0
245	mcr	CP15_NMRR(r0)
246#endif
247	mcr	CP15_TLBIALL		/* Flush TLB */
248	DSB
249	ISB
250
251	/* Enable MMU */
252	mrc	CP15_SCTLR(r0)
253	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
254	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
255#ifdef ARM_NEW_PMAP
256	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
257#endif
258	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
259	mcr	CP15_SCTLR(r0)
260	DSB
261	ISB
262	mcr	CP15_TLBIALL		/* Flush TLB */
263	mcr	CP15_BPIALL		/* Flush Branch predictor */
264	ISB
265	mov	pc, lr
266END(init_mmu)
267
268
269/*
270 * Init	SMP coherent mode, enable caching and switch to	final MMU table.
271 * Called with disabled	caches
272 * r0 -	The table base address
273 * r1 -	clear bits for aux register
274 * r2 -	set bits for aux register
275 */
276ASENTRY_NP(reinit_mmu)
277	push	{r4-r11, lr}
278	mov	r4, r0
279	mov	r5, r1
280	mov	r6, r2
281
282	/* !! Be very paranoid here !! */
283	/* !! We cannot write single bit here !! */
284
285#if 0	/* XXX writeback shouldn't be necessary */
286	/* Write back and invalidate all integrated caches */
287	bl 	dcache_wbinv_poc_all
288#else
289	bl	dcache_inv_pou_all
290#endif
291	mcr	CP15_ICIALLU
292	ISB
293
294	/* Set auxiliary register */
295	mrc	CP15_ACTLR(r7)
296	bic	r8, r7, r5		/* Mask bits */
297	eor 	r8, r8, r6		/* Set bits */
298	teq 	r7, r8
299	mcrne 	CP15_ACTLR(r8)
300	ISB
301
302	/* Enable caches. */
303	mrc	CP15_SCTLR(r7)
304	orr	r7, #CPU_CONTROL_DC_ENABLE
305	orr	r7, #CPU_CONTROL_IC_ENABLE
306	orr	r7, #CPU_CONTROL_BPRD_ENABLE
307	mcr	CP15_SCTLR(r7)
308	DSB
309
310	mcr	CP15_TTBR0(r4)		/* Set new TTB */
311	DSB
312	ISB
313
314	/* Flush all TLBs */
315	mcr	CP15_TLBIALL
316	DSB
317	ISB
318
319#if 0 /* XXX writeback shouldn't be necessary */
320	/* Write back and invalidate all integrated caches */
321	bl 	dcache_wbinv_poc_all
322#else
323	bl	dcache_inv_pou_all
324#endif
325	mcr	CP15_ICIALLU
326	ISB
327
328	pop	{r4-r11, pc}
329END(reinit_mmu)
330
331
332/*
333 * Builds the page table
334 * r0 -	The table base address
335 * r1 -	The physical address (trashed)
336 * r2 -	The virtual address (trashed)
337 * r3 -	The number of 1MiB sections
338 * r4 -	Trashed
339 *
340 * Addresses must be 1MiB aligned
341 */
342build_pagetables:
343	/* Set the required page attributed */
344#if defined(ARM_NEW_PMAP)
345	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
346#elif defined(SMP)
347	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED)
348#else
349	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
350#endif
351	orr	r1, r4
352
353	/* Move	the virtual address to the correct bit location	*/
354	lsr	r2, #(PTE1_SHIFT - 2)
355
356	mov	r4, r3
3571:
358	str	r1, [r0, r2]
359	add	r2, r2,	#4
360	add	r1, r1,	#(PTE1_SIZE)
361	adds	r4, r4,	#-1
362	bhi	1b
363
364	mov	pc, lr
365
366VA_TO_PA_POINTER(Lpagetable, boot_pt1)
367
368
369.Lstart:
370	.word	_edata			/* Note that these three items are */
371	.word	_ebss			/* loaded with a single ldmia and */
372	.word	svcstk			/* must remain in order together. */
373
374.Lmainreturned:
375	.asciz	"main()	returned"
376	.align	2
377
378	.bss
379svcstk:
380	.space	INIT_ARM_STACK_SIZE * MAXCPU
381
382/*
383 * Memory for the initial pagetable. We	are unable to place this in
384 * the bss as this will	be cleared after the table is loaded.
385 */
386	.section ".init_pagetable"
387	.align	14 /* 16KiB aligned */
388	.globl	boot_pt1
389boot_pt1:
390	.space	L1_TABLE_SIZE
391
392	.text
393	.align	2
394
395.Lcpufuncs:
396	.word	_C_LABEL(cpufuncs)
397
398#if defined(SMP)
399
400ASENTRY_NP(mpentry)
401	/* Make	sure interrupts	are disabled. */
402	cpsid	ifa
403
404	/* Setup core, disable all caches. */
405	mrc	CP15_SCTLR(r0)
406	bic	r0, #CPU_CONTROL_MMU_ENABLE
407	bic	r0, #CPU_CONTROL_DC_ENABLE
408	bic	r0, #CPU_CONTROL_IC_ENABLE
409	bic	r0, #CPU_CONTROL_UNAL_ENABLE
410	bic	r0, #CPU_CONTROL_BPRD_ENABLE
411	bic	r0, #CPU_CONTROL_SW_ENABLE
412	orr	r0, #CPU_CONTROL_AFLT_ENABLE
413	orr	r0, #CPU_CONTROL_VECRELOC
414	mcr	CP15_SCTLR(r0)
415	ISB
416
417	/* Invalidate L1 cache I+D cache */
418	bl	dcache_inv_pou_all
419	mcr	CP15_ICIALLU
420	ISB
421
422	/* Find	the delta between VA and PA */
423	adr	r0, Lpagetable
424	bl	translate_va_to_pa
425
426	bl	init_mmu
427
428	adr	r1, .Lstart+8		/* Get initstack pointer from */
429	ldr	sp, [r1]		/* startup data. */
430	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
431	and	r0, r0,	#0x0f
432	mov	r1, #INIT_ARM_STACK_SIZE
433	mul	r2, r1,	r0		/* Point sp to initstack */
434	add	sp, sp,	r2		/* area for this processor. */
435
436	/* Switch to virtual addresses.	*/
437	ldr	pc, =1f
4381:
439	mov	fp, #0			/* trace back starts here */
440	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
441
442	adr	r0, .Lmpreturned
443	b	_C_LABEL(panic)
444	/* NOTREACHED */
445END(mpentry)
446
447.Lmpreturned:
448	.asciz	"init_secondary() returned"
449	.align	2
450#endif
451
452ENTRY_NP(cpu_halt)
453
454	/* XXX re-implement !!! */
455	cpsid	ifa
456	bl	dcache_wbinv_poc_all
457
458	ldr	r4, .Lcpu_reset_address
459	ldr	r4, [r4]
460	teq	r4, #0
461	movne	pc, r4
4621:
463	WFI
464	b	1b
465
466	/*
467	 * _cpu_reset_address contains the address to branch to, to complete
468	 * the cpu reset after turning the MMU off
469	 * This	variable is provided by	the hardware specific code
470	 */
471.Lcpu_reset_address:
472	.word	_C_LABEL(cpu_reset_address)
473END(cpu_halt)
474
475
476/*
477 * setjump + longjmp
478 */
479ENTRY(setjmp)
480	stmia	r0, {r4-r14}
481	mov	r0, #0x00000000
482	RET
483END(setjmp)
484
485ENTRY(longjmp)
486	ldmia	r0, {r4-r14}
487	mov	r0, #0x00000001
488	RET
489END(longjmp)
490
491	.data
492	.global	_C_LABEL(esym)
493_C_LABEL(esym):	.word	_C_LABEL(end)
494
495ENTRY_NP(abort)
496	b	_C_LABEL(abort)
497END(abort)
498
499ENTRY_NP(sigcode)
500	mov	r0, sp
501	add	r0, r0,	#SIGF_UC
502
503	/*
504	 * Call	the sigreturn system call.
505	 *
506	 * We have to load r7 manually rather than using
507	 * "ldr	r7, =SYS_sigreturn" to ensure the value	of szsigcode is
508	 * correct. Using the alternative places esigcode at the address
509	 * of the data rather than the address one past	the data.
510	 */
511
512	ldr	r7, [pc, #12]	/* Load	SYS_sigreturn */
513	swi	SYS_sigreturn
514
515	/* Well	if that	failed we better exit quick ! */
516
517	ldr	r7, [pc, #8]	/* Load	SYS_exit */
518	swi	SYS_exit
519
520	/* Branch back to retry	SYS_sigreturn */
521	b	. - 16
522END(sigcode)
523
524	.word	SYS_sigreturn
525	.word	SYS_exit
526
527	.align	2
528	.global	_C_LABEL(esigcode)
529		_C_LABEL(esigcode):
530
531	.data
532	.global	szsigcode
533szsigcode:
534	.long esigcode-sigcode
535
536/* End of locore.S */
537