locore-v6.S revision 321049
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.s"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/pte-v6.h>
38
39__FBSDID("$FreeBSD: stable/11/sys/arm/arm/locore-v6.S 321049 2017-07-16 17:04:56Z emaste $");
40
41
42#if __ARM_ARCH >= 7
43#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
44/*
45 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
46 * when enabled. llvm >= 3.6 supports it too.
47 */
48.arch_extension virt
49#define	MSR_ELR_HYP(regnum)	msr	elr_hyp, lr
50#define	ERET	eret
51#else
52#define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum)
53#define ERET .word 0xe160006e
54#endif
55#endif /* __ARM_ARCH >= 7 */
56
57/* A small statically-allocated stack used only during initarm() and AP startup. */
58#define	INIT_ARM_STACK_SIZE	2048
59
60	.text
61	.align	2
62
63#if __ARM_ARCH >= 7
64#define	LEAVE_HYP							\
65	/* Leave HYP mode */						;\
66	mrs	r0, cpsr						;\
67	and	r0, r0, #(PSR_MODE)   /* Mode is in the low 5 bits of CPSR */ ;\
68	teq	r0, #(PSR_HYP32_MODE) /* Hyp Mode? */			;\
69	bne	1f							;\
70	/* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
71	mrs	r0, cpsr						;\
72	bic	r0, r0, #(PSR_MODE)					;\
73	orr	r0, r0, #(PSR_SVC32_MODE)				;\
74	orr	r0, r0, #(PSR_I | PSR_F | PSR_A)			;\
75	msr	spsr_cxsf, r0						;\
76	/* Exit hypervisor mode */					;\
77	adr	lr, 1f							;\
78	MSR_ELR_HYP(14)							;\
79	ERET								;\
801:
81#else
82#define	LEAVE_HYP
83#endif /* __ARM_ARCH >= 7 */
84
85/*
86 * On entry for FreeBSD boot ABI:
87 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
88 *	r1 - if (r0 == 0) then metadata pointer
89 * On entry for Linux boot ABI:
90 *	r0 - 0
91 *	r1 - machine type (passed as arg2 to initarm)
92 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
93 *
94 * For both types of boot we gather up the args, put them in a struct arm_boot_params
95 * structure and pass that to initarm.
96 */
97	.globl	btext
98btext:
99ASENTRY_NP(_start)
100	STOP_UNWINDING		/* Can't unwind into the bootloader! */
101
102	/* Make sure interrupts are disabled. */
103	cpsid	ifa
104
105	mov	r8, r0		/* 0 or boot mode from boot2 */
106	mov	r9, r1		/* Save Machine type */
107	mov	r10, r2		/* Save meta data */
108	mov	r11, r3		/* Future expansion */
109
110	LEAVE_HYP
111
112	/*
113	 * Check whether data cache is enabled.  If it is, then we know
114	 * current tags are valid (not power-on garbage values) and there
115	 * might be dirty lines that need cleaning.  Disable cache to prevent
116	 * new lines being allocated, then call wbinv_poc_all to clean it.
117	 */
118	mrc	CP15_SCTLR(r7)
119	tst	r7, #CPU_CONTROL_DC_ENABLE
120	blne	dcache_wbinv_poc_all
121
122	/* ! Do not write to memory between wbinv and disabling cache ! */
123
124	/*
125	 * Now there are no dirty lines, but there may still be lines marked
126	 * valid.  Disable all caches and the MMU, and invalidate everything
127	 * before setting up new page tables and re-enabling the mmu.
128	 */
1291:
130	bic	r7, #CPU_CONTROL_DC_ENABLE
131	bic	r7, #CPU_CONTROL_AFLT_ENABLE
132	bic	r7, #CPU_CONTROL_MMU_ENABLE
133	bic	r7, #CPU_CONTROL_IC_ENABLE
134	bic	r7, #CPU_CONTROL_BPRD_ENABLE
135	bic	r7, #CPU_CONTROL_SW_ENABLE
136	orr	r7, #CPU_CONTROL_UNAL_ENABLE
137	orr	r7, #CPU_CONTROL_VECRELOC
138	mcr	CP15_SCTLR(r7)
139	DSB
140	ISB
141	bl	dcache_inv_poc_all
142	mcr	CP15_ICIALLU
143	DSB
144	ISB
145
146	/*
147	 * Build page table from scratch.
148	 */
149
150	/*
151	 * Figure out the physical address we're loaded at by assuming this
152	 * entry point code is in the first L1 section and so if we clear the
153	 * offset bits of the pc that will give us the section-aligned load
154	 * address, which remains in r5 throughout all the following code.
155	 */
156	ldr	r2, =(L1_S_OFFSET)
157	bic	r5, pc, r2
158
159	/* Find the delta between VA and PA, result stays in r0 throughout. */
160	adr	r0, Lpagetable
161	bl	translate_va_to_pa
162
163	/*
164	 * First map the entire 4GB address space as VA=PA.  It's mapped as
165	 * normal (cached) memory because it's for things like accessing the
166	 * parameters passed in from the bootloader, which might be at any
167	 * physical address, different for every platform.
168	 */
169	mov	r1, #0
170	mov	r2, #0
171	mov	r3, #4096
172	bl	build_pagetables
173
174	/*
175	 * Next we do 64MiB starting at the physical load address, mapped to
176	 * the VA the kernel is linked for.
177	 */
178	mov	r1, r5
179	ldr	r2, =(KERNVIRTADDR)
180	mov	r3, #64
181	bl	build_pagetables
182
183	/* Create a device mapping for early_printf if specified. */
184#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
185	ldr	r1, =SOCDEV_PA
186	ldr	r2, =SOCDEV_VA
187	mov	r3, #1
188	bl	build_device_pagetables
189#endif
190	bl	init_mmu
191
192	/* Transition the PC from physical to virtual addressing. */
193	ldr	pc, =1f
1941:
195
196	/* Setup stack, clear BSS */
197	ldr	r1, =.Lstart
198	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
199	add	sp, sp,	#INIT_ARM_STACK_SIZE
200	sub	r2, r2, r1		/* get zero init data */
201	mov	r3, #0
2022:
203	str	r3, [r1], #0x0004	/* get zero init data */
204	subs	r2, r2, #4
205	bgt	2b
206
207	mov	r1, #28			/* loader info size is 28 bytes also second arg */
208	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
209	mov	r0, sp			/* loader info pointer is first arg */
210	bic	sp, sp, #7		/* align stack to 8 bytes */
211	str	r1, [r0]		/* Store length of loader info */
212	str	r8, [r0, #4]		/* Store r0 from boot loader */
213	str	r9, [r0, #8]		/* Store r1 from boot loader */
214	str	r10, [r0, #12]		/* store r2 from boot loader */
215	str	r11, [r0, #16]		/* store r3 from boot loader */
216	str	r5, [r0, #20]		/* store the physical address */
217	adr	r4, Lpagetable		/* load the pagetable address */
218	ldr	r5, [r4, #4]
219	str	r5, [r0, #24]		/* store the pagetable address */
220	mov	fp, #0			/* trace back starts here */
221	bl	_C_LABEL(initarm)	/* Off we go */
222
223	/* init arm will return the new stack pointer. */
224	mov	sp, r0
225
226	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
227
228	ldr	r0, =.Lmainreturned
229	b	_C_LABEL(panic)
230	/* NOTREACHED */
231END(_start)
232
233#define VA_TO_PA_POINTER(name, table)	 \
234name:					;\
235	.word	.			;\
236	.word	table
237
238/*
239 * Returns the physical address of a magic va to pa pointer.
240 * r0     - The pagetable data pointer. This must be built using the
241 *          VA_TO_PA_POINTER macro.
242 *          e.g.
243 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
244 *            ...
245 *            adr  r0, Lpagetable
246 *            bl   translate_va_to_pa
247 *            r0 will now contain the physical address of pagetable
248 * r1, r2 - Trashed
249 */
250translate_va_to_pa:
251	ldr	r1, [r0]
252	sub	r2, r1, r0
253	/* At this point: r2 = VA - PA */
254
255	/*
256	 * Find the physical address of the table. After these two
257	 * instructions:
258	 * r1 = va(pagetable)
259	 *
260	 * r0 = va(pagetable) - (VA - PA)
261	 *    = va(pagetable) - VA + PA
262	 *    = pa(pagetable)
263	 */
264	ldr	r1, [r0, #4]
265	sub	r0, r1, r2
266	mov	pc, lr
267
268/*
269 * Init MMU
270 * r0 - the table base address
271 */
272
273ASENTRY_NP(init_mmu)
274
275	/* Setup TLB and MMU registers */
276	mcr	CP15_TTBR0(r0)		/* Set TTB */
277	mov	r0, #0
278	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
279
280	/* Set the Domain Access register */
281	mov	r0, #DOMAIN_CLIENT	/* Only domain #0 is used */
282	mcr	CP15_DACR(r0)
283
284	/*
285	 * Set TEX remap registers
286	 *  - All is set to uncacheable memory
287	 */
288	ldr	r0, =0xAAAAA
289	mcr	CP15_PRRR(r0)
290	mov	r0, #0
291	mcr	CP15_NMRR(r0)
292	mcr	CP15_TLBIALL		/* Flush TLB */
293	DSB
294	ISB
295
296	/* Enable MMU */
297	mrc	CP15_SCTLR(r0)
298	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
299	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
300	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
301	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
302	mcr	CP15_SCTLR(r0)
303	DSB
304	ISB
305	mcr	CP15_TLBIALL		/* Flush TLB */
306	mcr	CP15_BPIALL		/* Flush Branch predictor */
307	DSB
308	ISB
309
310	mov	pc, lr
311END(init_mmu)
312
313
314/*
315 * Init SMP coherent mode, enable caching and switch to final MMU table.
316 * Called with disabled caches
317 * r0 - The table base address
318 * r1 - clear bits for aux register
319 * r2 - set bits for aux register
320 */
321ASENTRY_NP(reinit_mmu)
322	push	{r4-r11, lr}
323	mov	r4, r0
324	mov	r5, r1
325	mov	r6, r2
326
327	/* !! Be very paranoid here !! */
328	/* !! We cannot write single bit here !! */
329
330#if 0	/* XXX writeback shouldn't be necessary */
331	/* Write back and invalidate all integrated caches */
332	bl 	dcache_wbinv_poc_all
333#else
334	bl	dcache_inv_pou_all
335#endif
336	mcr	CP15_ICIALLU
337	DSB
338	ISB
339
340	/* Set auxiliary register */
341	mrc	CP15_ACTLR(r7)
342	bic	r8, r7, r5		/* Mask bits */
343	eor 	r8, r8, r6		/* Set bits */
344	teq 	r7, r8
345	mcrne 	CP15_ACTLR(r8)
346	DSB
347	ISB
348
349	/* Enable caches. */
350	mrc	CP15_SCTLR(r7)
351	orr	r7, #CPU_CONTROL_DC_ENABLE
352	orr	r7, #CPU_CONTROL_IC_ENABLE
353	orr	r7, #CPU_CONTROL_BPRD_ENABLE
354	mcr	CP15_SCTLR(r7)
355	DSB
356
357	mcr	CP15_TTBR0(r4)		/* Set new TTB */
358	DSB
359	ISB
360
361	mcr	CP15_TLBIALL		/* Flush TLB */
362	mcr	CP15_BPIALL		/* Flush Branch predictor */
363	DSB
364	ISB
365
366#if 0 /* XXX writeback shouldn't be necessary */
367	/* Write back and invalidate all integrated caches */
368	bl 	dcache_wbinv_poc_all
369#else
370	bl	dcache_inv_pou_all
371#endif
372	mcr	CP15_ICIALLU
373	DSB
374	ISB
375
376	pop	{r4-r11, pc}
377END(reinit_mmu)
378
379
380/*
381 * Builds the page table
382 * r0 - The table base address
383 * r1 - The physical address (trashed)
384 * r2 - The virtual address (trashed)
385 * r3 - The number of 1MiB sections
386 * r4 - Trashed
387 *
388 * Addresses must be 1MiB aligned
389 */
390build_device_pagetables:
391	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
392	b	1f
393build_pagetables:
394	/* Set the required page attributed */
395	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
3961:
397	orr	r1, r4
398
399	/* Move the virtual address to the correct bit location */
400	lsr	r2, #(PTE1_SHIFT - 2)
401
402	mov	r4, r3
4032:
404	str	r1, [r0, r2]
405	add	r2, r2, #4
406	add	r1, r1, #(PTE1_SIZE)
407	adds	r4, r4, #-1
408	bhi	2b
409
410	mov	pc, lr
411
412VA_TO_PA_POINTER(Lpagetable, boot_pt1)
413
414
415.Lstart:
416	.word	_edata			/* Note that these three items are */
417	.word	_ebss			/* loaded with a single ldmia and */
418	.word	svcstk			/* must remain in order together. */
419
420.Lmainreturned:
421	.asciz	"main() returned"
422	.align	2
423
424	.bss
425svcstk:
426	.space	INIT_ARM_STACK_SIZE * MAXCPU
427
428/*
429 * Memory for the initial pagetable. We are unable to place this in
430 * the bss as this will be cleared after the table is loaded.
431 */
432	.section ".init_pagetable", "aw", %nobits
433	.align	14 /* 16KiB aligned */
434	.globl	boot_pt1
435boot_pt1:
436	.space	L1_TABLE_SIZE
437
438	.text
439	.align	2
440
441#if defined(SMP)
442
443ASENTRY_NP(mpentry)
444	/* Make sure interrupts are disabled. */
445	cpsid	ifa
446
447	LEAVE_HYP
448
449	/* Setup core, disable all caches. */
450	mrc	CP15_SCTLR(r0)
451	bic	r0, #CPU_CONTROL_MMU_ENABLE
452	bic	r0, #CPU_CONTROL_AFLT_ENABLE
453	bic	r0, #CPU_CONTROL_DC_ENABLE
454	bic	r0, #CPU_CONTROL_IC_ENABLE
455	bic	r0, #CPU_CONTROL_BPRD_ENABLE
456	bic	r0, #CPU_CONTROL_SW_ENABLE
457	orr	r0, #CPU_CONTROL_UNAL_ENABLE
458	orr	r0, #CPU_CONTROL_VECRELOC
459	mcr	CP15_SCTLR(r0)
460	DSB
461	ISB
462
463	/* Invalidate L1 cache I+D cache */
464	bl	dcache_inv_pou_all
465	mcr	CP15_ICIALLU
466	DSB
467	ISB
468
469	/* Find the delta between VA and PA */
470	adr	r0, Lpagetable
471	bl	translate_va_to_pa
472
473	bl	init_mmu
474
475	adr	r1, .Lstart+8		/* Get initstack pointer from */
476	ldr	sp, [r1]		/* startup data. */
477	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
478	and	r0, r0,	#0x0f
479	mov	r1, #INIT_ARM_STACK_SIZE
480	mul	r2, r1,	r0		/* Point sp to initstack */
481	add	sp, sp,	r2		/* area for this processor. */
482
483	/* Switch to virtual addresses. */
484	ldr	pc, =1f
4851:
486	mov	fp, #0			/* trace back starts here */
487	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
488
489	adr	r0, .Lmpreturned
490	b	_C_LABEL(panic)
491	/* NOTREACHED */
492END(mpentry)
493
494.Lmpreturned:
495	.asciz	"init_secondary() returned"
496	.align	2
497#endif
498
499ENTRY_NP(cpu_halt)
500
501	/* XXX re-implement !!! */
502	cpsid	ifa
503	bl	dcache_wbinv_poc_all
504
505	ldr	r4, .Lcpu_reset_address
506	ldr	r4, [r4]
507	teq	r4, #0
508	movne	pc, r4
5091:
510	WFI
511	b	1b
512
513	/*
514	 * _cpu_reset_address contains the address to branch to, to complete
515	 * the cpu reset after turning the MMU off
516	 * This variable is provided by the hardware specific code
517	 */
518.Lcpu_reset_address:
519	.word	_C_LABEL(cpu_reset_address)
520END(cpu_halt)
521
522
523/*
524 * setjump + longjmp
525 */
526ENTRY(setjmp)
527	stmia	r0, {r4-r14}
528	mov	r0, #0x00000000
529	RET
530END(setjmp)
531
532ENTRY(longjmp)
533	ldmia	r0, {r4-r14}
534	mov	r0, #0x00000001
535	RET
536END(longjmp)
537
538	.data
539	.global	_C_LABEL(esym)
540_C_LABEL(esym):	.word	_C_LABEL(end)
541
542ENTRY_NP(abort)
543	b	_C_LABEL(abort)
544END(abort)
545
546ENTRY_NP(sigcode)
547	mov	r0, sp
548	add	r0, r0, #SIGF_UC
549
550	/*
551	 * Call the sigreturn system call.
552	 *
553	 * We have to load r7 manually rather than using
554	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
555	 * correct. Using the alternative places esigcode at the address
556	 * of the data rather than the address one past the data.
557	 */
558
559	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
560	swi	SYS_sigreturn
561
562	/* Well if that failed we better exit quick ! */
563
564	ldr	r7, [pc, #8]	/* Load SYS_exit */
565	swi	SYS_exit
566
567	/* Branch back to retry SYS_sigreturn */
568	b	. - 16
569END(sigcode)
570	.word	SYS_sigreturn
571	.word	SYS_exit
572
573	.align	2
574	.global _C_LABEL(esigcode)
575		_C_LABEL(esigcode):
576
577	.data
578	.global szsigcode
579szsigcode:
580	.long esigcode-sigcode
581
582/* End of locore.S */
583