locore-v6.S revision 287127
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.s"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/cpuconf.h>
38#include <machine/pte.h>
39
40__FBSDID("$FreeBSD: head/sys/arm/arm/locore-v6.S 287127 2015-08-25 14:49:11Z zbb $");
41
42#ifndef ARM_NEW_PMAP
43#define	PTE1_OFFSET	L1_S_OFFSET
44#define	PTE1_SHIFT	L1_S_SHIFT
45#define	PTE1_SIZE	L1_S_SIZE
46#endif
47
48#if __ARM_ARCH >= 7
49#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
50/*
51 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
52 * when enabled. llvm >= 3.6 supports it too.
53 */
54.arch_extension virt
55#define	MSR_ELR_HYP(regnum)	msr	elr_hyp, lr
56#define	ERET	eret
57#else
58#define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum)
59#define ERET .word 0xe160006e
60#endif
61#endif /* __ARM_ARCH >= 7 */
62
63/* A small statically-allocated stack used only during initarm() and AP startup. */
64#define	INIT_ARM_STACK_SIZE	2048
65
66	.text
67	.align	2
68
69#if __ARM_ARCH >= 7
70#define LEAVE_HYP							\
71       /* Leave HYP mode */						;\
72       mrs	r0, cpsr		   				;\
73       and	r0, r0, #(PSR_MODE)     /* Mode is in the low 5 bits of CPSR */	;\
74       teq	r0, #(PSR_HYP32_MODE)   /* Hyp Mode? */			;\
75       bne	1f							;\
76       /* Ensure that IRQ, FIQ and Aborts will be disabled after eret */;\
77       mrs	r0, spsr						;\
78       orr	r0, r0, #(PSR_I | PSR_F | PSR_A)			;\
79       msr	spsr, r0						;\
80       /* Exit hypervisor mode */					;\
81       adr	lr, 1f							;\
82       MSR_ELR_HYP(14)							;\
83       ERET								;\
841:
85#else
86#define LEAVE_HYP
87#endif /* __ARM_ARCH >= 7 */
88
89/*
90 * On entry for FreeBSD boot ABI:
91 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
92 *	r1 - if (r0 == 0) then metadata pointer
93 * On entry for Linux boot ABI:
94 *	r0 - 0
95 *	r1 - machine type (passed as arg2 to initarm)
96 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
97 *
98 * For both types of boot we gather up the args, put them in a struct arm_boot_params
99 * structure and pass that to initarm.
100 */
101	.globl	btext
102btext:
103ASENTRY_NP(_start)
104	STOP_UNWINDING		/* Can't unwind into the bootloader! */
105
106	/* Make sure interrupts are disabled. */
107	cpsid	ifa
108
109	mov	r8, r0		/* 0 or boot mode from boot2 */
110	mov	r9, r1		/* Save Machine type */
111	mov	r10, r2		/* Save meta data */
112	mov	r11, r3		/* Future expansion */
113
114	LEAVE_HYP
115
116	/*
117	 * Check whether data cache is enabled.  If it is, then we know
118	 * current tags are valid (not power-on garbage values) and there
119	 * might be dirty lines that need cleaning.  Disable cache to prevent
120	 * new lines being allocated, then call wbinv_poc_all to clean it.
121	 */
122	mrc	CP15_SCTLR(r7)
123	tst	r7, #CPU_CONTROL_DC_ENABLE
124	blne	dcache_wbinv_poc_all
125
126	/* ! Do not write to memory between wbinv and disabling cache ! */
127
128	/*
129	 * Now there are no dirty lines, but there may still be lines marked
130	 * valid.  Disable all caches and the MMU, and invalidate everything
131	 * before setting up new page tables and re-enabling the mmu.
132	 */
1331:
134	bic	r7, #CPU_CONTROL_DC_ENABLE
135	bic	r7, #CPU_CONTROL_MMU_ENABLE
136	bic	r7, #CPU_CONTROL_IC_ENABLE
137	bic	r7, #CPU_CONTROL_UNAL_ENABLE
138	bic	r7, #CPU_CONTROL_BPRD_ENABLE
139	bic	r7, #CPU_CONTROL_SW_ENABLE
140	orr	r7, #CPU_CONTROL_AFLT_ENABLE
141	orr	r7, #CPU_CONTROL_VECRELOC
142	mcr	CP15_SCTLR(r7)
143	ISB
144	bl	dcache_inv_poc_all
145	mcr	CP15_ICIALLU
146	ISB
147
148	/*
149	 * Build page table from scratch.
150	 */
151
152	/* Calculate the physical address of the startup pagetable. */
153	adr	r0, Lpagetable
154	bl	translate_va_to_pa
155
156	/*
157	 * Map PA == VA
158	 */
159	/* Find the start kernels load address */
160	adr	r5, _start
161	ldr	r2, =(PTE1_OFFSET)
162	bic	r5, r2
163	mov	r1, r5
164	mov	r2, r5
165	/* Map 64MiB, preserved over calls to build_pagetables */
166	mov	r3, #64
167	bl	build_pagetables
168
169	/* Create the kernel map to jump to */
170	mov	r1, r5
171	ldr	r2, =(KERNVIRTADDR)
172	bl	build_pagetables
173
174#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
175	/* Create the custom map used for early_printf(). */
176	ldr	r1, =SOCDEV_PA
177	ldr	r2, =SOCDEV_VA
178	bl	build_pagetables
179#endif
180	bl	init_mmu
181
182	/* Switch to virtual addresses. */
183	ldr	pc, =1f
1841:
185
186	/* Setup stack, clear BSS */
187	ldr	r1, =.Lstart
188	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
189	add	sp, sp,	#INIT_ARM_STACK_SIZE
190	sub	r2, r2, r1		/* get zero init data */
191	mov	r3, #0
1922:
193	str	r3, [r1], #0x0004	/* get zero init data */
194	subs	r2, r2, #4
195	bgt	2b
196
197	mov	r1, #28			/* loader info size is 28 bytes also second arg */
198	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
199	mov	r0, sp			/* loader info pointer is first arg */
200	bic	sp, sp, #7		/* align stack to 8 bytes */
201	str	r1, [r0]		/* Store length of loader info */
202	str	r8, [r0, #4]		/* Store r0 from boot loader */
203	str	r9, [r0, #8]		/* Store r1 from boot loader */
204	str	r10, [r0, #12]		/* store r2 from boot loader */
205	str	r11, [r0, #16]		/* store r3 from boot loader */
206	str	r5, [r0, #20]		/* store the physical address */
207	adr	r4, Lpagetable		/* load the pagetable address */
208	ldr	r5, [r4, #4]
209	str	r5, [r0, #24]		/* store the pagetable address */
210	mov	fp, #0			/* trace back starts here */
211	bl	_C_LABEL(initarm)	/* Off we go */
212
213	/* init arm will return the new stack pointer. */
214	mov	sp, r0
215
216	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
217
218	ldr	r0, =.Lmainreturned
219	b	_C_LABEL(panic)
220	/* NOTREACHED */
221END(_start)
222
223#define VA_TO_PA_POINTER(name, table)	 \
224name:					;\
225	.word	.			;\
226	.word	table
227
228/*
229 * Returns the physical address of a magic va to pa pointer.
230 * r0     - The pagetable data pointer. This must be built using the
231 *          VA_TO_PA_POINTER macro.
232 *          e.g.
233 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
234 *            ...
235 *            adr  r0, Lpagetable
236 *            bl   translate_va_to_pa
237 *            r0 will now contain the physical address of pagetable
238 * r1, r2 - Trashed
239 */
240translate_va_to_pa:
241	ldr	r1, [r0]
242	sub	r2, r1, r0
243	/* At this point: r2 = VA - PA */
244
245	/*
246	 * Find the physical address of the table. After these two
247	 * instructions:
248	 * r1 = va(pagetable)
249	 *
250	 * r0 = va(pagetable) - (VA - PA)
251	 *    = va(pagetable) - VA + PA
252	 *    = pa(pagetable)
253	 */
254	ldr	r1, [r0, #4]
255	sub	r0, r1, r2
256	mov	pc, lr
257
258/*
259 * Init MMU
260 * r0 - the table base address
261 */
262
263ASENTRY_NP(init_mmu)
264
265	/* Setup TLB and MMU registers */
266	mcr	CP15_TTBR0(r0)		/* Set TTB */
267	mov	r0, #0
268	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
269
270	/* Set the Domain Access register */
271	mov	r0, #((DOMAIN_CLIENT <<	(PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
272	mcr	CP15_DACR(r0)
273
274#ifdef ARM_NEW_PMAP
275	/*
276	 * Set TEX remap registers
277	 *  - All is set to uncacheable memory
278	 */
279	ldr	r0, =0xAAAAA
280	mcr	CP15_PRRR(r0)
281	mov	r0, #0
282	mcr	CP15_NMRR(r0)
283#endif
284	mcr	CP15_TLBIALL		/* Flush TLB */
285	DSB
286	ISB
287
288	/* Enable MMU */
289	mrc	CP15_SCTLR(r0)
290	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
291	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
292#ifdef ARM_NEW_PMAP
293	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
294#endif
295	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
296	mcr	CP15_SCTLR(r0)
297	DSB
298	ISB
299	mcr	CP15_TLBIALL		/* Flush TLB */
300	mcr	CP15_BPIALL		/* Flush Branch predictor */
301	ISB
302	mov	pc, lr
303END(init_mmu)
304
305
306/*
307 * Init SMP coherent mode, enable caching and switch to final MMU table.
308 * Called with disabled caches
309 * r0 - The table base address
310 * r1 - clear bits for aux register
311 * r2 - set bits for aux register
312 */
313ASENTRY_NP(reinit_mmu)
314	push	{r4-r11, lr}
315	mov	r4, r0
316	mov	r5, r1
317	mov	r6, r2
318
319	/* !! Be very paranoid here !! */
320	/* !! We cannot write single bit here !! */
321
322#if 0	/* XXX writeback shouldn't be necessary */
323	/* Write back and invalidate all integrated caches */
324	bl 	dcache_wbinv_poc_all
325#else
326	bl	dcache_inv_pou_all
327#endif
328	mcr	CP15_ICIALLU
329	ISB
330
331	/* Set auxiliary register */
332	mrc	CP15_ACTLR(r7)
333	bic	r8, r7, r5		/* Mask bits */
334	eor 	r8, r8, r6		/* Set bits */
335	teq 	r7, r8
336	mcrne 	CP15_ACTLR(r8)
337	ISB
338
339	/* Enable caches. */
340	mrc	CP15_SCTLR(r7)
341	orr	r7, #CPU_CONTROL_DC_ENABLE
342	orr	r7, #CPU_CONTROL_IC_ENABLE
343	orr	r7, #CPU_CONTROL_BPRD_ENABLE
344	mcr	CP15_SCTLR(r7)
345	DSB
346
347	mcr	CP15_TTBR0(r4)		/* Set new TTB */
348	DSB
349	ISB
350
351	/* Flush all TLBs */
352	mcr	CP15_TLBIALL
353	DSB
354	ISB
355
356#if 0 /* XXX writeback shouldn't be necessary */
357	/* Write back and invalidate all integrated caches */
358	bl 	dcache_wbinv_poc_all
359#else
360	bl	dcache_inv_pou_all
361#endif
362	mcr	CP15_ICIALLU
363	ISB
364
365	pop	{r4-r11, pc}
366END(reinit_mmu)
367
368
369/*
370 * Builds the page table
371 * r0 - The table base address
372 * r1 - The physical address (trashed)
373 * r2 - The virtual address (trashed)
374 * r3 - The number of 1MiB sections
375 * r4 - Trashed
376 *
377 * Addresses must be 1MiB aligned
378 */
379build_pagetables:
380	/* Set the required page attributed */
381#if defined(ARM_NEW_PMAP)
382	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
383#elif defined(SMP)
384	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED)
385#else
386	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
387#endif
388	orr	r1, r4
389
390	/* Move the virtual address to the correct bit location */
391	lsr	r2, #(PTE1_SHIFT - 2)
392
393	mov	r4, r3
3941:
395	str	r1, [r0, r2]
396	add	r2, r2, #4
397	add	r1, r1, #(PTE1_SIZE)
398	adds	r4, r4, #-1
399	bhi	1b
400
401	mov	pc, lr
402
403VA_TO_PA_POINTER(Lpagetable, boot_pt1)
404
405
406.Lstart:
407	.word	_edata			/* Note that these three items are */
408	.word	_ebss			/* loaded with a single ldmia and */
409	.word	svcstk			/* must remain in order together. */
410
411.Lmainreturned:
412	.asciz	"main() returned"
413	.align	2
414
415	.bss
416svcstk:
417	.space	INIT_ARM_STACK_SIZE * MAXCPU
418
419/*
420 * Memory for the initial pagetable. We are unable to place this in
421 * the bss as this will be cleared after the table is loaded.
422 */
423	.section ".init_pagetable"
424	.align	14 /* 16KiB aligned */
425	.globl	boot_pt1
426boot_pt1:
427	.space	L1_TABLE_SIZE
428
429	.text
430	.align	2
431
432.Lcpufuncs:
433	.word	_C_LABEL(cpufuncs)
434
435#if defined(SMP)
436
437ASENTRY_NP(mpentry)
438	/* Make sure interrupts are disabled. */
439	cpsid	ifa
440
441	LEAVE_HYP
442
443	/* Setup core, disable all caches. */
444	mrc	CP15_SCTLR(r0)
445	bic	r0, #CPU_CONTROL_MMU_ENABLE
446	bic	r0, #CPU_CONTROL_DC_ENABLE
447	bic	r0, #CPU_CONTROL_IC_ENABLE
448	bic	r0, #CPU_CONTROL_UNAL_ENABLE
449	bic	r0, #CPU_CONTROL_BPRD_ENABLE
450	bic	r0, #CPU_CONTROL_SW_ENABLE
451	orr	r0, #CPU_CONTROL_AFLT_ENABLE
452	orr	r0, #CPU_CONTROL_VECRELOC
453	mcr	CP15_SCTLR(r0)
454	ISB
455
456	/* Invalidate L1 cache I+D cache */
457	bl	dcache_inv_pou_all
458	mcr	CP15_ICIALLU
459	ISB
460
461	/* Find the delta between VA and PA */
462	adr	r0, Lpagetable
463	bl	translate_va_to_pa
464
465	bl	init_mmu
466
467	adr	r1, .Lstart+8		/* Get initstack pointer from */
468	ldr	sp, [r1]		/* startup data. */
469	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
470	and	r0, r0,	#0x0f
471	mov	r1, #INIT_ARM_STACK_SIZE
472	mul	r2, r1,	r0		/* Point sp to initstack */
473	add	sp, sp,	r2		/* area for this processor. */
474
475	/* Switch to virtual addresses. */
476	ldr	pc, =1f
4771:
478	mov	fp, #0			/* trace back starts here */
479	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
480
481	adr	r0, .Lmpreturned
482	b	_C_LABEL(panic)
483	/* NOTREACHED */
484END(mpentry)
485
486.Lmpreturned:
487	.asciz	"init_secondary() returned"
488	.align	2
489#endif
490
491ENTRY_NP(cpu_halt)
492
493	/* XXX re-implement !!! */
494	cpsid	ifa
495	bl	dcache_wbinv_poc_all
496
497	ldr	r4, .Lcpu_reset_address
498	ldr	r4, [r4]
499	teq	r4, #0
500	movne	pc, r4
5011:
502	WFI
503	b	1b
504
505	/*
506	 * _cpu_reset_address contains the address to branch to, to complete
507	 * the cpu reset after turning the MMU off
508	 * This variable is provided by the hardware specific code
509	 */
510.Lcpu_reset_address:
511	.word	_C_LABEL(cpu_reset_address)
512END(cpu_halt)
513
514
515/*
516 * setjump + longjmp
517 */
518ENTRY(setjmp)
519	stmia	r0, {r4-r14}
520	mov	r0, #0x00000000
521	RET
522END(setjmp)
523
524ENTRY(longjmp)
525	ldmia	r0, {r4-r14}
526	mov	r0, #0x00000001
527	RET
528END(longjmp)
529
530	.data
531	.global	_C_LABEL(esym)
532_C_LABEL(esym):	.word	_C_LABEL(end)
533
534ENTRY_NP(abort)
535	b	_C_LABEL(abort)
536END(abort)
537
538ENTRY_NP(sigcode)
539	mov	r0, sp
540	add	r0, r0, #SIGF_UC
541
542	/*
543	 * Call the sigreturn system call.
544	 *
545	 * We have to load r7 manually rather than using
546	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
547	 * correct. Using the alternative places esigcode at the address
548	 * of the data rather than the address one past the data.
549	 */
550
551	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
552	swi	SYS_sigreturn
553
554	/* Well if that failed we better exit quick ! */
555
556	ldr	r7, [pc, #8]	/* Load SYS_exit */
557	swi	SYS_exit
558
559	/* Branch back to retry SYS_sigreturn */
560	b	. - 16
561END(sigcode)
562	.word	SYS_sigreturn
563	.word	SYS_exit
564
565	.align	2
566	.global _C_LABEL(esigcode)
567		_C_LABEL(esigcode):
568
569	.data
570	.global szsigcode
571szsigcode:
572	.long esigcode-sigcode
573
574/* End of locore.S */
575