1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.s"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/pte-v6.h>
38
39__FBSDID("$FreeBSD: stable/11/sys/arm/arm/locore-v6.S 327667 2018-01-07 02:57:35Z ian $");
40
41/* We map 64MB of kernel unless overridden in assym.s by the kernel option. */
42#ifndef LOCORE_MAP_MB
43#define	LOCORE_MAP_MB	64
44#endif
45
46#if __ARM_ARCH >= 7
47#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
48/*
49 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
50 * when enabled. llvm >= 3.6 supports it too.
51 */
52.arch_extension virt
53#define	MSR_ELR_HYP(regnum)	msr	elr_hyp, lr
54#define	ERET	eret
55#else
56#define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum)
57#define ERET .word 0xe160006e
58#endif
59#endif /* __ARM_ARCH >= 7 */
60
61/* A small statically-allocated stack used only during initarm() and AP startup. */
62#define	INIT_ARM_STACK_SIZE	2048
63
64	.text
65	.align	2
66
67#if __ARM_ARCH >= 7
68#define	LEAVE_HYP							\
69	/* Leave HYP mode */						;\
70	mrs	r0, cpsr						;\
71	and	r0, r0, #(PSR_MODE)   /* Mode is in the low 5 bits of CPSR */ ;\
72	teq	r0, #(PSR_HYP32_MODE) /* Hyp Mode? */			;\
73	bne	1f							;\
74	/* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
75	mrs	r0, cpsr						;\
76	bic	r0, r0, #(PSR_MODE)					;\
77	orr	r0, r0, #(PSR_SVC32_MODE)				;\
78	orr	r0, r0, #(PSR_I | PSR_F | PSR_A)			;\
79	msr	spsr_cxsf, r0						;\
80	/* Exit hypervisor mode */					;\
81	adr	lr, 1f							;\
82	MSR_ELR_HYP(14)							;\
83	ERET								;\
841:
85#else
86#define	LEAVE_HYP
87#endif /* __ARM_ARCH >= 7 */
88
89/*
90 * On entry for FreeBSD boot ABI:
91 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
92 *	r1 - if (r0 == 0) then metadata pointer
93 * On entry for Linux boot ABI:
94 *	r0 - 0
95 *	r1 - machine type (passed as arg2 to initarm)
96 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
97 *
98 * For both types of boot we gather up the args, put them in a struct arm_boot_params
99 * structure and pass that to initarm.
100 */
101	.globl	btext
102btext:
103ASENTRY_NP(_start)
104	STOP_UNWINDING		/* Can't unwind into the bootloader! */
105
106	/* Make sure interrupts are disabled. */
107	cpsid	ifa
108
109	mov	r8, r0		/* 0 or boot mode from boot2 */
110	mov	r9, r1		/* Save Machine type */
111	mov	r10, r2		/* Save meta data */
112	mov	r11, r3		/* Future expansion */
113
114	LEAVE_HYP
115
116	/*
117	 * Check whether data cache is enabled.  If it is, then we know
118	 * current tags are valid (not power-on garbage values) and there
119	 * might be dirty lines that need cleaning.  Disable cache to prevent
120	 * new lines being allocated, then call wbinv_poc_all to clean it.
121	 */
122	mrc	CP15_SCTLR(r7)
123	tst	r7, #CPU_CONTROL_DC_ENABLE
124	blne	dcache_wbinv_poc_all
125
126	/* ! Do not write to memory between wbinv and disabling cache ! */
127
128	/*
129	 * Now there are no dirty lines, but there may still be lines marked
130	 * valid.  Disable all caches and the MMU, and invalidate everything
131	 * before setting up new page tables and re-enabling the mmu.
132	 */
1331:
134	bic	r7, #CPU_CONTROL_DC_ENABLE
135	bic	r7, #CPU_CONTROL_AFLT_ENABLE
136	bic	r7, #CPU_CONTROL_MMU_ENABLE
137	bic	r7, #CPU_CONTROL_IC_ENABLE
138	bic	r7, #CPU_CONTROL_BPRD_ENABLE
139	bic	r7, #CPU_CONTROL_SW_ENABLE
140	orr	r7, #CPU_CONTROL_UNAL_ENABLE
141	orr	r7, #CPU_CONTROL_VECRELOC
142	mcr	CP15_SCTLR(r7)
143	DSB
144	ISB
145	bl	dcache_inv_poc_all
146	mcr	CP15_ICIALLU
147	DSB
148	ISB
149
150	/*
151	 * Build page table from scratch.
152	 */
153
154	/*
155	 * Figure out the physical address we're loaded at by assuming this
156	 * entry point code is in the first L1 section and so if we clear the
157	 * offset bits of the pc that will give us the section-aligned load
158	 * address, which remains in r5 throughout all the following code.
159	 */
160	ldr	r2, =(L1_S_OFFSET)
161	bic	r5, pc, r2
162
163	/* Find the delta between VA and PA, result stays in r0 throughout. */
164	adr	r0, Lpagetable
165	bl	translate_va_to_pa
166
167	/*
168	 * First map the entire 4GB address space as VA=PA.  It's mapped as
169	 * normal (cached) memory because it's for things like accessing the
170	 * parameters passed in from the bootloader, which might be at any
171	 * physical address, different for every platform.
172	 */
173	mov	r1, #0
174	mov	r2, #0
175	mov	r3, #4096
176	bl	build_pagetables
177
178	/*
179	 * Next we map the kernel starting at the physical load address, mapped
180	 * to the VA the kernel is linked for.  The default size we map is 64MiB
181	 * but it can be overridden with a kernel option.
182	 */
183	mov	r1, r5
184	ldr	r2, =(KERNVIRTADDR)
185	ldr	r3, =(LOCORE_MAP_MB)
186	bl	build_pagetables
187
188	/* Create a device mapping for early_printf if specified. */
189#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
190	ldr	r1, =SOCDEV_PA
191	ldr	r2, =SOCDEV_VA
192	mov	r3, #1
193	bl	build_device_pagetables
194#endif
195	bl	init_mmu
196
197	/* Transition the PC from physical to virtual addressing. */
198	ldr	pc, =1f
1991:
200
201	/* Setup stack, clear BSS */
202	ldr	r1, =.Lstart
203	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
204	add	sp, sp,	#INIT_ARM_STACK_SIZE
205	sub	r2, r2, r1		/* get zero init data */
206	mov	r3, #0
2072:
208	str	r3, [r1], #0x0004	/* get zero init data */
209	subs	r2, r2, #4
210	bgt	2b
211
212	mov	r1, #28			/* loader info size is 28 bytes also second arg */
213	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
214	mov	r0, sp			/* loader info pointer is first arg */
215	bic	sp, sp, #7		/* align stack to 8 bytes */
216	str	r1, [r0]		/* Store length of loader info */
217	str	r8, [r0, #4]		/* Store r0 from boot loader */
218	str	r9, [r0, #8]		/* Store r1 from boot loader */
219	str	r10, [r0, #12]		/* store r2 from boot loader */
220	str	r11, [r0, #16]		/* store r3 from boot loader */
221	str	r5, [r0, #20]		/* store the physical address */
222	adr	r4, Lpagetable		/* load the pagetable address */
223	ldr	r5, [r4, #4]
224	str	r5, [r0, #24]		/* store the pagetable address */
225	mov	fp, #0			/* trace back starts here */
226	bl	_C_LABEL(initarm)	/* Off we go */
227
228	/* init arm will return the new stack pointer. */
229	mov	sp, r0
230
231	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
232
233	ldr	r0, =.Lmainreturned
234	b	_C_LABEL(panic)
235	/* NOTREACHED */
236END(_start)
237
238#define VA_TO_PA_POINTER(name, table)	 \
239name:					;\
240	.word	.			;\
241	.word	table
242
243/*
244 * Returns the physical address of a magic va to pa pointer.
245 * r0     - The pagetable data pointer. This must be built using the
246 *          VA_TO_PA_POINTER macro.
247 *          e.g.
248 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
249 *            ...
250 *            adr  r0, Lpagetable
251 *            bl   translate_va_to_pa
252 *            r0 will now contain the physical address of pagetable
253 * r1, r2 - Trashed
254 */
255translate_va_to_pa:
256	ldr	r1, [r0]
257	sub	r2, r1, r0
258	/* At this point: r2 = VA - PA */
259
260	/*
261	 * Find the physical address of the table. After these two
262	 * instructions:
263	 * r1 = va(pagetable)
264	 *
265	 * r0 = va(pagetable) - (VA - PA)
266	 *    = va(pagetable) - VA + PA
267	 *    = pa(pagetable)
268	 */
269	ldr	r1, [r0, #4]
270	sub	r0, r1, r2
271	mov	pc, lr
272
273/*
274 * Init MMU
275 * r0 - the table base address
276 */
277
278ASENTRY_NP(init_mmu)
279
280	/* Setup TLB and MMU registers */
281	mcr	CP15_TTBR0(r0)		/* Set TTB */
282	mov	r0, #0
283	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
284
285	/* Set the Domain Access register */
286	mov	r0, #DOMAIN_CLIENT	/* Only domain #0 is used */
287	mcr	CP15_DACR(r0)
288
289	/*
290	 * Set TEX remap registers
291	 *  - All is set to uncacheable memory
292	 */
293	ldr	r0, =0xAAAAA
294	mcr	CP15_PRRR(r0)
295	mov	r0, #0
296	mcr	CP15_NMRR(r0)
297	mcr	CP15_TLBIALL		/* Flush TLB */
298	DSB
299	ISB
300
301	/* Enable MMU */
302	mrc	CP15_SCTLR(r0)
303	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
304	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
305	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
306	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
307	mcr	CP15_SCTLR(r0)
308	DSB
309	ISB
310	mcr	CP15_TLBIALL		/* Flush TLB */
311	mcr	CP15_BPIALL		/* Flush Branch predictor */
312	DSB
313	ISB
314
315	mov	pc, lr
316END(init_mmu)
317
318
319/*
320 * Init SMP coherent mode, enable caching and switch to final MMU table.
321 * Called with disabled caches
322 * r0 - The table base address
323 * r1 - clear bits for aux register
324 * r2 - set bits for aux register
325 */
326ASENTRY_NP(reinit_mmu)
327	push	{r4-r11, lr}
328	mov	r4, r0
329	mov	r5, r1
330	mov	r6, r2
331
332	/* !! Be very paranoid here !! */
333	/* !! We cannot write single bit here !! */
334
335#if 0	/* XXX writeback shouldn't be necessary */
336	/* Write back and invalidate all integrated caches */
337	bl 	dcache_wbinv_poc_all
338#else
339	bl	dcache_inv_pou_all
340#endif
341	mcr	CP15_ICIALLU
342	DSB
343	ISB
344
345	/* Set auxiliary register */
346	mrc	CP15_ACTLR(r7)
347	bic	r8, r7, r5		/* Mask bits */
348	eor 	r8, r8, r6		/* Set bits */
349	teq 	r7, r8
350	mcrne 	CP15_ACTLR(r8)
351	DSB
352	ISB
353
354	/* Enable caches. */
355	mrc	CP15_SCTLR(r7)
356	orr	r7, #CPU_CONTROL_DC_ENABLE
357	orr	r7, #CPU_CONTROL_IC_ENABLE
358	orr	r7, #CPU_CONTROL_BPRD_ENABLE
359	mcr	CP15_SCTLR(r7)
360	DSB
361
362	mcr	CP15_TTBR0(r4)		/* Set new TTB */
363	DSB
364	ISB
365
366	mcr	CP15_TLBIALL		/* Flush TLB */
367	mcr	CP15_BPIALL		/* Flush Branch predictor */
368	DSB
369	ISB
370
371#if 0 /* XXX writeback shouldn't be necessary */
372	/* Write back and invalidate all integrated caches */
373	bl 	dcache_wbinv_poc_all
374#else
375	bl	dcache_inv_pou_all
376#endif
377	mcr	CP15_ICIALLU
378	DSB
379	ISB
380
381	pop	{r4-r11, pc}
382END(reinit_mmu)
383
384
385/*
386 * Builds the page table
387 * r0 - The table base address
388 * r1 - The physical address (trashed)
389 * r2 - The virtual address (trashed)
390 * r3 - The number of 1MiB sections
391 * r4 - Trashed
392 *
393 * Addresses must be 1MiB aligned
394 */
395build_device_pagetables:
396	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
397	b	1f
398build_pagetables:
399	/* Set the required page attributed */
400	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
4011:
402	orr	r1, r4
403
404	/* Move the virtual address to the correct bit location */
405	lsr	r2, #(PTE1_SHIFT - 2)
406
407	mov	r4, r3
4082:
409	str	r1, [r0, r2]
410	add	r2, r2, #4
411	add	r1, r1, #(PTE1_SIZE)
412	adds	r4, r4, #-1
413	bhi	2b
414
415	mov	pc, lr
416
417VA_TO_PA_POINTER(Lpagetable, boot_pt1)
418
419
420.Lstart:
421	.word	_edata			/* Note that these three items are */
422	.word	_ebss			/* loaded with a single ldmia and */
423	.word	svcstk			/* must remain in order together. */
424
425.Lmainreturned:
426	.asciz	"main() returned"
427	.align	2
428
429	.bss
430svcstk:
431	.space	INIT_ARM_STACK_SIZE * MAXCPU
432
433/*
434 * Memory for the initial pagetable. We are unable to place this in
435 * the bss as this will be cleared after the table is loaded.
436 */
437	.section ".init_pagetable", "aw", %nobits
438	.align	14 /* 16KiB aligned */
439	.globl	boot_pt1
440boot_pt1:
441	.space	L1_TABLE_SIZE
442
443	.text
444	.align	2
445
446#if defined(SMP)
447
448ASENTRY_NP(mpentry)
449	/* Make sure interrupts are disabled. */
450	cpsid	ifa
451
452	LEAVE_HYP
453
454	/* Setup core, disable all caches. */
455	mrc	CP15_SCTLR(r0)
456	bic	r0, #CPU_CONTROL_MMU_ENABLE
457	bic	r0, #CPU_CONTROL_AFLT_ENABLE
458	bic	r0, #CPU_CONTROL_DC_ENABLE
459	bic	r0, #CPU_CONTROL_IC_ENABLE
460	bic	r0, #CPU_CONTROL_BPRD_ENABLE
461	bic	r0, #CPU_CONTROL_SW_ENABLE
462	orr	r0, #CPU_CONTROL_UNAL_ENABLE
463	orr	r0, #CPU_CONTROL_VECRELOC
464	mcr	CP15_SCTLR(r0)
465	DSB
466	ISB
467
468	/* Invalidate L1 cache I+D cache */
469	bl	dcache_inv_pou_all
470	mcr	CP15_ICIALLU
471	DSB
472	ISB
473
474	/* Find the delta between VA and PA */
475	adr	r0, Lpagetable
476	bl	translate_va_to_pa
477
478	bl	init_mmu
479
480	adr	r1, .Lstart+8		/* Get initstack pointer from */
481	ldr	sp, [r1]		/* startup data. */
482	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
483	and	r0, r0,	#0x0f
484	mov	r1, #INIT_ARM_STACK_SIZE
485	mul	r2, r1,	r0		/* Point sp to initstack */
486	add	sp, sp,	r2		/* area for this processor. */
487
488	/* Switch to virtual addresses. */
489	ldr	pc, =1f
4901:
491	mov	fp, #0			/* trace back starts here */
492	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
493
494	adr	r0, .Lmpreturned
495	b	_C_LABEL(panic)
496	/* NOTREACHED */
497END(mpentry)
498
499.Lmpreturned:
500	.asciz	"init_secondary() returned"
501	.align	2
502#endif
503
504ENTRY_NP(cpu_halt)
505
506	/* XXX re-implement !!! */
507	cpsid	ifa
508	bl	dcache_wbinv_poc_all
509
510	ldr	r4, .Lcpu_reset_address
511	ldr	r4, [r4]
512	teq	r4, #0
513	movne	pc, r4
5141:
515	WFI
516	b	1b
517
518	/*
519	 * _cpu_reset_address contains the address to branch to, to complete
520	 * the cpu reset after turning the MMU off
521	 * This variable is provided by the hardware specific code
522	 */
523.Lcpu_reset_address:
524	.word	_C_LABEL(cpu_reset_address)
525END(cpu_halt)
526
527
528/*
529 * setjump + longjmp
530 */
531ENTRY(setjmp)
532	stmia	r0, {r4-r14}
533	mov	r0, #0x00000000
534	RET
535END(setjmp)
536
537ENTRY(longjmp)
538	ldmia	r0, {r4-r14}
539	mov	r0, #0x00000001
540	RET
541END(longjmp)
542
543	.data
544	.global	_C_LABEL(esym)
545_C_LABEL(esym):	.word	_C_LABEL(end)
546
547ENTRY_NP(abort)
548	b	_C_LABEL(abort)
549END(abort)
550
551ENTRY_NP(sigcode)
552	mov	r0, sp
553	add	r0, r0, #SIGF_UC
554
555	/*
556	 * Call the sigreturn system call.
557	 *
558	 * We have to load r7 manually rather than using
559	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
560	 * correct. Using the alternative places esigcode at the address
561	 * of the data rather than the address one past the data.
562	 */
563
564	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
565	swi	SYS_sigreturn
566
567	/* Well if that failed we better exit quick ! */
568
569	ldr	r7, [pc, #8]	/* Load SYS_exit */
570	swi	SYS_exit
571
572	/* Branch back to retry SYS_sigreturn */
573	b	. - 16
574END(sigcode)
575	.word	SYS_sigreturn
576	.word	SYS_exit
577
578	.align	2
579	.global _C_LABEL(esigcode)
580		_C_LABEL(esigcode):
581
582	.data
583	.global szsigcode
584szsigcode:
585	.long esigcode-sigcode
586
587/* End of locore.S */
588