locore-v6.S revision 276519
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.s"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/cpuconf.h>
38#include <machine/pte.h>
39
40__FBSDID("$FreeBSD: head/sys/arm/arm/locore-v6.S 276519 2015-01-01 23:18:37Z ian $");
41
42#ifndef	ARM_NEW_PMAP
43#define	PTE1_OFFSET	L1_S_OFFSET
44#define	PTE1_SHIFT	L1_S_SHIFT
45#define	PTE1_SIZE	L1_S_SIZE
46#endif
47
48/* A small statically-allocated stack used only during initarm() and AP startup. */
49#define	INIT_ARM_STACK_SIZE	2048
50
51	.text
52	.align	0
53
54/*
55 * On entry for	FreeBSD	boot ABI:
56 *	r0 - metadata pointer or 0 (boothowto on AT91's	boot2)
57 *	r1 - if	(r0 == 0) then metadata	pointer
58 * On entry for	Linux boot ABI:
59 *	r0 - 0
60 *	r1 - machine type (passed as arg2 to initarm)
61 *	r2 - Pointer to	a tagged list or dtb image (phys addr) (passed as arg1 initarm)
62 *
63 * For both types of boot we gather up the args, put them in a struct arm_boot_params
64 * structure and pass that to initarm.
65 */
66	.globl	btext
67btext:
68ASENTRY_NP(_start)
69	STOP_UNWINDING		/* Can't unwind	into the bootloader! */
70
71	/* Make	sure interrupts	are disabled. */
72	cpsid	ifa
73
74	mov	r8, r0		/* 0 or	boot mode from boot2 */
75	mov	r9, r1		/* Save	Machine	type */
76	mov	r10, r2		/* Save	meta data */
77	mov	r11, r3		/* Future expansion */
78
79	/*
80	 * Check whether data cache is enabled.  If it is, then we know
81	 * current tags are valid (not power-on garbage values) and there
82	 * might be dirty lines that need cleaning.  Disable cache to prevent
83	 * new lines being allocated, then call wbinv_poc_all to clean it.
84	 */
85	mrc	CP15_SCTLR(r7)
86	tst	r7, #CPU_CONTROL_DC_ENABLE
87	blne	dcache_wbinv_poc_all
88
89	/* ! Do not write to memory between wbinv and disabling cache ! */
90
91	/*
92	 * Now there are no dirty lines, but there may still be lines marked
93	 * valid.  Disable all caches and the MMU, and invalidate everything
94	 * before setting up new page tables and re-enabling the mmu.
95	 */
961:
97	bic	r7, #CPU_CONTROL_DC_ENABLE
98	bic	r7, #CPU_CONTROL_MMU_ENABLE
99	bic	r7, #CPU_CONTROL_IC_ENABLE
100	bic	r7, #CPU_CONTROL_UNAL_ENABLE
101	bic	r7, #CPU_CONTROL_BPRD_ENABLE
102	bic	r7, #CPU_CONTROL_SW_ENABLE
103	orr	r7, #CPU_CONTROL_AFLT_ENABLE
104	orr	r7, #CPU_CONTROL_VECRELOC
105	mcr	CP15_SCTLR(r7)
106	ISB
107	bl	dcache_inv_poc_all
108	mcr	CP15_ICIALLU
109	ISB
110
111	/*
112	 * Build page table from scratch.
113	 */
114
115	/* Calculate the physical address of the startup pagetable. */
116	adr	r0, Lpagetable
117	bl	translate_va_to_pa
118
119	/*
120	 * Map PA == VA
121	 */
122	/* Find	the start kernels load address */
123	adr	r5, _start
124	ldr	r2, =(PTE1_OFFSET)
125	bic	r5, r2
126	mov	r1, r5
127	mov	r2, r5
128	/* Map 64MiB, preserved	over calls to build_pagetables */
129	mov	r3, #64
130	bl	build_pagetables
131
132	/* Create the kernel map to jump to */
133	mov	r1, r5
134	ldr	r2, =(KERNVIRTADDR)
135	bl	build_pagetables
136
137#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
138	/* Create the custom map used for early_printf(). */
139	ldr	r1, =SOCDEV_PA
140	ldr	r2, =SOCDEV_VA
141	bl	build_pagetables
142#endif
143	bl	init_mmu
144
145	/* Switch to virtual addresses.	*/
146	ldr	pc, =1f
1471:
148
149	/* Setup stack,	clear BSS */
150	ldr	r1, =.Lstart
151	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
152	add	sp, sp,	#INIT_ARM_STACK_SIZE
153	sub	r2, r2,	r1		/* get zero init data */
154	mov	r3, #0
1552:
156	str	r3, [r1], #0x0004	/* get zero init data */
157	subs	r2, r2,	#4
158	bgt	2b
159
160	mov	r1, #28			/* loader info size is 28 bytes	also second arg	*/
161	subs	sp, sp,	r1		/* allocate arm_boot_params struct on stack */
162	mov	r0, sp			/* loader info pointer is first	arg */
163	bic	sp, sp,	#7		/* align stack to 8 bytes */
164	str	r1, [r0]		/* Store length	of loader info */
165	str	r8, [r0, #4]		/* Store r0 from boot loader */
166	str	r9, [r0, #8]		/* Store r1 from boot loader */
167	str	r10, [r0, #12]		/* store r2 from boot loader */
168	str	r11, [r0, #16]		/* store r3 from boot loader */
169	str	r5, [r0, #20]		/* store the physical address */
170	adr	r4, Lpagetable		/* load	the pagetable address */
171	ldr	r5, [r4, #4]
172	str	r5, [r0, #24]		/* store the pagetable address */
173	mov	fp, #0			/* trace back starts here */
174	bl	_C_LABEL(initarm)	/* Off we go */
175
176	/* init	arm will return	the new	stack pointer. */
177	mov	sp, r0
178
179	bl	_C_LABEL(mi_startup)	/* call	mi_startup()! */
180
181	ldr	r0, =.Lmainreturned
182	b	_C_LABEL(panic)
183	/* NOTREACHED */
184END(_start)
185
186#define VA_TO_PA_POINTER(name, table)	 \
187name:					;\
188	.word	.			;\
189	.word	table
190
191/*
192 * Returns the physical address of a magic va to pa pointer.
193 * r0     - The pagetable data pointer. This must be built using the
194 *          VA_TO_PA_POINTER macro.
195 *          e.g.
196 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
197 *            ...
198 *            adr  r0, Lpagetable
199 *            bl   translate_va_to_pa
200 *            r0 will now contain the physical address of pagetable
201 * r1, r2 - Trashed
202 */
203translate_va_to_pa:
204	ldr	r1, [r0]
205	sub	r2, r1, r0
206	/* At this point: r2 = VA - PA */
207
208	/*
209	 * Find the physical address of the table. After these two
210	 * instructions:
211	 * r1 = va(pagetable)
212	 *
213	 * r0 = va(pagetable) - (VA - PA)
214	 *    = va(pagetable) - VA + PA
215	 *    = pa(pagetable)
216	 */
217	ldr	r1, [r0, #4]
218	sub	r0, r1, r2
219	mov	pc, lr
220
221/*
222 * Init	MMU
223 * r0 -	The table base address
224 */
225
226ASENTRY_NP(init_mmu)
227
228	/* Setup TLB and MMU registers */
229	mcr	CP15_TTBR0(r0)		/* Set TTB */
230	mov	r0, #0
231	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
232
233	/* Set the Domain Access register */
234	mov	r0, #((DOMAIN_CLIENT <<	(PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
235	mcr	CP15_DACR(r0)
236
237#ifdef ARM_NEW_PMAP
238	/*
239	 * Set TEX remap registers
240	 *  - All is set to uncacheable memory
241	 */
242	ldr	r0, =0xAAAAA
243	mrc	CP15_PRRR(r0)
244	mov	r0, #0
245	mcr	CP15_NMRR(r0)
246#endif
247	mcr	CP15_TLBIALL		/* Flush TLB */
248	DSB
249	ISB
250
251	/* Enable MMU */
252	mrc	CP15_SCTLR(r0)
253	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
254	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
255#ifdef ARM_NEW_PMAP
256	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
257#endif
258	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
259	mcr	CP15_SCTLR(r0)
260	DSB
261	ISB
262	mcr	CP15_TLBIALL		/* Flush TLB */
263	mcr	CP15_BPIALL		/* Flush Branch predictor */
264	ISB
265	mov	pc, lr
266END(init_mmu)
267
268
269/*
270 * Init	SMP coherent mode, enable caching and switch to	final MMU table.
271 * Called with disabled	caches
272 * r0 -	The table base address
273 * r1 -	clear bits for aux register
274 * r2 -	set bits for aux register
275 */
276ASENTRY_NP(reinit_mmu)
277	push	{r4-r11, lr}
278	mov	r4, r0
279	mov	r5, r1
280	mov	r6, r2
281
282	/* !! Be very paranoid here !! */
283	/* !! We cannot write single bit here !! */
284
285#if 0	/* XXX writeback shouldn't be necessary */
286	/* Write back and invalidate all integrated caches */
287	bl 	dcache_wbinv_poc_all
288#else
289	bl	dcache_inv_pou_all
290#endif
291	mcr	CP15_ICIALLU
292	ISB
293
294	/* Set auxiliary register */
295	mrc	CP15_ACTLR(r7)
296	bic	r8, r7, r5		/* Mask bits */
297	eor 	r8, r8, r6		/* Set bits */
298	teq 	r7, r8
299	mcrne 	CP15_ACTLR(r8)
300	ISB
301
302	/* Enable caches. */
303	mrc	CP15_SCTLR(r7)
304	orr	r7, #CPU_CONTROL_DC_ENABLE
305	orr	r7, #CPU_CONTROL_IC_ENABLE
306	orr	r7, #CPU_CONTROL_BPRD_ENABLE
307	mcr	CP15_SCTLR(r7)
308	DSB
309
310	mcr	CP15_TTBR0(r4)		/* Set new TTB */
311	DSB
312	ISB
313
314	/* Flush all TLBs */
315	mcr	CP15_TLBIALL
316	DSB
317	ISB
318
319#if 0 /* XXX writeback shouldn't be necessary */
320	/* Write back and invalidate all integrated caches */
321	bl 	dcache_wbinv_poc_all
322#else
323	bl	dcache_inv_pou_all
324#endif
325	mcr	CP15_ICIALLU
326	ISB
327
328	pop	{r4-r11, pc}
329END(reinit_mmu)
330
331
332/*
333 * Builds the page table
334 * r0 -	The table base address
335 * r1 -	The physical address (trashed)
336 * r2 -	The virtual address (trashed)
337 * r3 -	The number of 1MiB sections
338 * r4 -	Trashed
339 *
340 * Addresses must be 1MiB aligned
341 */
342build_pagetables:
343	/* Set the required page attributed */
344#if defined(ARM_NEW_PMAP)
345	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
346#elif defined(SMP)
347	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED)
348#else
349	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
350#endif
351	orr	r1, r4
352
353	/* Move	the virtual address to the correct bit location	*/
354	lsr	r2, #(PTE1_SHIFT - 2)
355
356	mov	r4, r3
3571:
358	str	r1, [r0, r2]
359	add	r2, r2,	#4
360	add	r1, r1,	#(PTE1_SIZE)
361	adds	r4, r4,	#-1
362	bhi	1b
363
364	mov	pc, lr
365
366VA_TO_PA_POINTER(Lpagetable, boot_pt1)
367
368
369.Lstart:
370	.word	_edata
371	.word	_ebss
372	.word	svcstk
373
374.Lmainreturned:
375	.asciz	"main()	returned"
376	.align	0
377
378	.bss
379svcstk:
380	.space	INIT_ARM_STACK_SIZE * MAXCPU
381
382/*
383 * Memory for the initial pagetable. We	are unable to place this in
384 * the bss as this will	be cleared after the table is loaded.
385 */
386	.section ".init_pagetable"
387	.align	14 /* 16KiB aligned */
388	.globl	boot_pt1
389boot_pt1:
390	.space	L1_TABLE_SIZE
391
392	.text
393	.align	0
394
395.Lcpufuncs:
396	.word	_C_LABEL(cpufuncs)
397
398#if defined(SMP)
399
400ASENTRY_NP(mpentry)
401	/* Make	sure interrupts	are disabled. */
402	cpsid	ifa
403
404	/* Setup core, disable all caches. */
405	mrc	CP15_SCTLR(r0)
406	bic	r0, #CPU_CONTROL_MMU_ENABLE
407	bic	r0, #CPU_CONTROL_DC_ENABLE
408	bic	r0, #CPU_CONTROL_IC_ENABLE
409	bic	r0, #CPU_CONTROL_UNAL_ENABLE
410	bic	r0, #CPU_CONTROL_BPRD_ENABLE
411	bic	r0, #CPU_CONTROL_SW_ENABLE
412	orr	r0, #CPU_CONTROL_AFLT_ENABLE
413	orr	r0, #CPU_CONTROL_VECRELOC
414	mcr	CP15_SCTLR(r0)
415	ISB
416
417	/* Invalidate L1 cache I+D cache */
418	bl	dcache_inv_pou_all
419	mcr	CP15_ICIALLU
420	ISB
421
422	/* Find	the delta between VA and PA */
423	adr	r0, Lpagetable
424	bl	translate_va_to_pa
425
426	bl	init_mmu
427
428	adr	r1, .Lstart
429	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
430	mrc	p15, 0,	r0, c0,	c0, 5
431	and	r0, r0,	#15
432	mov	r1, #INIT_ARM_STACK_SIZE
433	mul	r2, r1,	r0
434	add	sp, sp,	r2
435	str	r1, [sp]
436
437	/* Switch to virtual addresses.	*/
438	ldr	pc, =1f
4391:
440	mov	fp, #0			/* trace back starts here */
441	bl	_C_LABEL(init_secondary)	/* Off we go */
442
443	adr	r0, .Lmpreturned
444	b	_C_LABEL(panic)
445	/* NOTREACHED */
446
447.Lmpreturned:
448	.asciz	"init_secondary() returned"
449	.align	0
450END(mpentry)
451#endif
452
453ENTRY_NP(cpu_halt)
454
455	/* XXX re-implement !!! */
456	cpsid	ifa
457	bl	dcache_wbinv_poc_all
458
459	ldr	r4, .Lcpu_reset_address
460	ldr	r4, [r4]
461	teq	r4, #0
462	movne	pc, r4
4631:
464	WFI
465	b	1b
466
467	/*
468	 * _cpu_reset_address contains the address to branch to, to complete
469	 * the cpu reset after turning the MMU off
470	 * This	variable is provided by	the hardware specific code
471	 */
472.Lcpu_reset_address:
473	.word	_C_LABEL(cpu_reset_address)
474END(cpu_halt)
475
476
477/*
478 * setjump + longjmp
479 */
480ENTRY(setjmp)
481	stmia	r0, {r4-r14}
482	mov	r0, #0x00000000
483	RET
484END(setjmp)
485
486ENTRY(longjmp)
487	ldmia	r0, {r4-r14}
488	mov	r0, #0x00000001
489	RET
490END(longjmp)
491
492	.data
493	.global	_C_LABEL(esym)
494_C_LABEL(esym):	.word	_C_LABEL(end)
495
496ENTRY_NP(abort)
497	b	_C_LABEL(abort)
498END(abort)
499
500ENTRY_NP(sigcode)
501	mov	r0, sp
502	add	r0, r0,	#SIGF_UC
503
504	/*
505	 * Call	the sigreturn system call.
506	 *
507	 * We have to load r7 manually rather than using
508	 * "ldr	r7, =SYS_sigreturn" to ensure the value	of szsigcode is
509	 * correct. Using the alternative places esigcode at the address
510	 * of the data rather than the address one past	the data.
511	 */
512
513	ldr	r7, [pc, #12]	/* Load	SYS_sigreturn */
514	swi	SYS_sigreturn
515
516	/* Well	if that	failed we better exit quick ! */
517
518	ldr	r7, [pc, #8]	/* Load	SYS_exit */
519	swi	SYS_exit
520
521	/* Branch back to retry	SYS_sigreturn */
522	b	. - 16
523END(sigcode)
524
525	.word	SYS_sigreturn
526	.word	SYS_exit
527
528	.align	0
529	.global	_C_LABEL(esigcode)
530		_C_LABEL(esigcode):
531
532	.data
533	.global	szsigcode
534szsigcode:
535	.long esigcode-sigcode
536
537/* End of locore.S */
538