locore-v6.S revision 290647
1/*-
2 * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
4 * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
5 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
6 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include "assym.s"
32#include <sys/syscall.h>
33#include <machine/asm.h>
34#include <machine/asmacros.h>
35#include <machine/armreg.h>
36#include <machine/sysreg.h>
37#include <machine/cpuconf.h>
38#include <machine/pte.h>
39
40__FBSDID("$FreeBSD: head/sys/arm/arm/locore-v6.S 290647 2015-11-10 11:45:41Z mmel $");
41
42#ifndef ARM_NEW_PMAP
43#define	PTE1_OFFSET	L1_S_OFFSET
44#define	PTE1_SHIFT	L1_S_SHIFT
45#define	PTE1_SIZE	L1_S_SIZE
46#endif
47
48#if __ARM_ARCH >= 7
49#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
50/*
51 * HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
52 * when enabled. llvm >= 3.6 supports it too.
53 */
54.arch_extension virt
55#define	MSR_ELR_HYP(regnum)	msr	elr_hyp, lr
56#define	ERET	eret
57#else
58#define MSR_ELR_HYP(regnum) .word (0xe12ef300 | regnum)
59#define ERET .word 0xe160006e
60#endif
61#endif /* __ARM_ARCH >= 7 */
62
63/* A small statically-allocated stack used only during initarm() and AP startup. */
64#define	INIT_ARM_STACK_SIZE	2048
65
66	.text
67	.align	2
68
69#if __ARM_ARCH >= 7
70#define	LEAVE_HYP							\
71	/* Leave HYP mode */						;\
72	mrs	r0, cpsr						;\
73	and	r0, r0, #(PSR_MODE)   /* Mode is in the low 5 bits of CPSR */ ;\
74	teq	r0, #(PSR_HYP32_MODE) /* Hyp Mode? */			;\
75	bne	1f							;\
76	/* Ensure that IRQ, FIQ and Aborts will be disabled after eret */ ;\
77	mrs	r0, cpsr						;\
78	bic	r0, r0, #(PSR_MODE)					;\
79	orr	r0, r0, #(PSR_SVC32_MODE)				;\
80	orr	r0, r0, #(PSR_I | PSR_F | PSR_A)			;\
81	msr	spsr_cxsf, r0						;\
82	/* Exit hypervisor mode */					;\
83	adr	lr, 1f							;\
84	MSR_ELR_HYP(14)							;\
85	ERET								;\
861:
87#else
88#define	LEAVE_HYP
89#endif /* __ARM_ARCH >= 7 */
90
91/*
92 * On entry for FreeBSD boot ABI:
93 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
94 *	r1 - if (r0 == 0) then metadata pointer
95 * On entry for Linux boot ABI:
96 *	r0 - 0
97 *	r1 - machine type (passed as arg2 to initarm)
98 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
99 *
100 * For both types of boot we gather up the args, put them in a struct arm_boot_params
101 * structure and pass that to initarm.
102 */
103	.globl	btext
104btext:
105ASENTRY_NP(_start)
106	STOP_UNWINDING		/* Can't unwind into the bootloader! */
107
108	/* Make sure interrupts are disabled. */
109	cpsid	ifa
110
111	mov	r8, r0		/* 0 or boot mode from boot2 */
112	mov	r9, r1		/* Save Machine type */
113	mov	r10, r2		/* Save meta data */
114	mov	r11, r3		/* Future expansion */
115
116	LEAVE_HYP
117
118	/*
119	 * Check whether data cache is enabled.  If it is, then we know
120	 * current tags are valid (not power-on garbage values) and there
121	 * might be dirty lines that need cleaning.  Disable cache to prevent
122	 * new lines being allocated, then call wbinv_poc_all to clean it.
123	 */
124	mrc	CP15_SCTLR(r7)
125	tst	r7, #CPU_CONTROL_DC_ENABLE
126	blne	dcache_wbinv_poc_all
127
128	/* ! Do not write to memory between wbinv and disabling cache ! */
129
130	/*
131	 * Now there are no dirty lines, but there may still be lines marked
132	 * valid.  Disable all caches and the MMU, and invalidate everything
133	 * before setting up new page tables and re-enabling the mmu.
134	 */
1351:
136	bic	r7, #CPU_CONTROL_DC_ENABLE
137	bic	r7, #CPU_CONTROL_MMU_ENABLE
138	bic	r7, #CPU_CONTROL_IC_ENABLE
139	bic	r7, #CPU_CONTROL_UNAL_ENABLE
140	bic	r7, #CPU_CONTROL_BPRD_ENABLE
141	bic	r7, #CPU_CONTROL_SW_ENABLE
142	orr	r7, #CPU_CONTROL_AFLT_ENABLE
143	orr	r7, #CPU_CONTROL_VECRELOC
144	mcr	CP15_SCTLR(r7)
145	DSB
146	ISB
147	bl	dcache_inv_poc_all
148	mcr	CP15_ICIALLU
149	DSB
150	ISB
151
152	/*
153	 * Build page table from scratch.
154	 */
155
156	/* Calculate the physical address of the startup pagetable. */
157	adr	r0, Lpagetable
158	bl	translate_va_to_pa
159
160	/* Clear boot page table */
161	mov	r1, r0
162	mov	r2, L1_TABLE_SIZE
163	mov	r3,#0
1641:	str	r3, [r1], #4
165	subs	r2, #4
166	bgt	1b
167
168	/*
169	 * Map PA == VA
170	 */
171	/* Find the start kernels load address */
172	adr	r5, _start
173	ldr	r2, =(PTE1_OFFSET)
174	bic	r5, r2
175	mov	r1, r5
176	mov	r2, r5
177	/* Map 64MiB, preserved over calls to build_pagetables */
178	mov	r3, #64
179	bl	build_pagetables
180
181	/* Create the kernel map to jump to */
182	mov	r1, r5
183	ldr	r2, =(KERNVIRTADDR)
184	bl	build_pagetables
185
186#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
187	/* Create the custom map (1MB) used for early_printf(). */
188	ldr	r1, =SOCDEV_PA
189	ldr	r2, =SOCDEV_VA
190	mov	r3, #1
191	bl	build_pagetables
192#endif
193	bl	init_mmu
194
195	/* Switch to virtual addresses. */
196	ldr	pc, =1f
1971:
198
199	/* Setup stack, clear BSS */
200	ldr	r1, =.Lstart
201	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
202	add	sp, sp,	#INIT_ARM_STACK_SIZE
203	sub	r2, r2, r1		/* get zero init data */
204	mov	r3, #0
2052:
206	str	r3, [r1], #0x0004	/* get zero init data */
207	subs	r2, r2, #4
208	bgt	2b
209
210	mov	r1, #28			/* loader info size is 28 bytes also second arg */
211	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
212	mov	r0, sp			/* loader info pointer is first arg */
213	bic	sp, sp, #7		/* align stack to 8 bytes */
214	str	r1, [r0]		/* Store length of loader info */
215	str	r8, [r0, #4]		/* Store r0 from boot loader */
216	str	r9, [r0, #8]		/* Store r1 from boot loader */
217	str	r10, [r0, #12]		/* store r2 from boot loader */
218	str	r11, [r0, #16]		/* store r3 from boot loader */
219	str	r5, [r0, #20]		/* store the physical address */
220	adr	r4, Lpagetable		/* load the pagetable address */
221	ldr	r5, [r4, #4]
222	str	r5, [r0, #24]		/* store the pagetable address */
223	mov	fp, #0			/* trace back starts here */
224	bl	_C_LABEL(initarm)	/* Off we go */
225
226	/* init arm will return the new stack pointer. */
227	mov	sp, r0
228
229	bl	_C_LABEL(mi_startup)	/* call mi_startup()! */
230
231	ldr	r0, =.Lmainreturned
232	b	_C_LABEL(panic)
233	/* NOTREACHED */
234END(_start)
235
236#define VA_TO_PA_POINTER(name, table)	 \
237name:					;\
238	.word	.			;\
239	.word	table
240
241/*
242 * Returns the physical address of a magic va to pa pointer.
243 * r0     - The pagetable data pointer. This must be built using the
244 *          VA_TO_PA_POINTER macro.
245 *          e.g.
246 *            VA_TO_PA_POINTER(Lpagetable, pagetable)
247 *            ...
248 *            adr  r0, Lpagetable
249 *            bl   translate_va_to_pa
250 *            r0 will now contain the physical address of pagetable
251 * r1, r2 - Trashed
252 */
253translate_va_to_pa:
254	ldr	r1, [r0]
255	sub	r2, r1, r0
256	/* At this point: r2 = VA - PA */
257
258	/*
259	 * Find the physical address of the table. After these two
260	 * instructions:
261	 * r1 = va(pagetable)
262	 *
263	 * r0 = va(pagetable) - (VA - PA)
264	 *    = va(pagetable) - VA + PA
265	 *    = pa(pagetable)
266	 */
267	ldr	r1, [r0, #4]
268	sub	r0, r1, r2
269	mov	pc, lr
270
271/*
272 * Init MMU
273 * r0 - the table base address
274 */
275
276ASENTRY_NP(init_mmu)
277
278	/* Setup TLB and MMU registers */
279	mcr	CP15_TTBR0(r0)		/* Set TTB */
280	mov	r0, #0
281	mcr	CP15_CONTEXTIDR(r0)	/* Set ASID to 0 */
282
283	/* Set the Domain Access register */
284	mov	r0, #((DOMAIN_CLIENT <<	(PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
285	mcr	CP15_DACR(r0)
286
287#ifdef ARM_NEW_PMAP
288	/*
289	 * Set TEX remap registers
290	 *  - All is set to uncacheable memory
291	 */
292	ldr	r0, =0xAAAAA
293	mcr	CP15_PRRR(r0)
294	mov	r0, #0
295	mcr	CP15_NMRR(r0)
296#endif
297	mcr	CP15_TLBIALL		/* Flush TLB */
298	DSB
299	ISB
300
301	/* Enable MMU */
302	mrc	CP15_SCTLR(r0)
303	orr	r0, r0,	#CPU_CONTROL_MMU_ENABLE
304	orr	r0, r0,	#CPU_CONTROL_V6_EXTPAGE
305#ifdef ARM_NEW_PMAP
306	orr	r0, r0,	#CPU_CONTROL_TR_ENABLE
307#endif
308	orr	r0, r0,	#CPU_CONTROL_AF_ENABLE
309	mcr	CP15_SCTLR(r0)
310	DSB
311	ISB
312	mcr	CP15_TLBIALL		/* Flush TLB */
313	mcr	CP15_BPIALL		/* Flush Branch predictor */
314	DSB
315	ISB
316
317	mov	pc, lr
318END(init_mmu)
319
320
321/*
322 * Init SMP coherent mode, enable caching and switch to final MMU table.
323 * Called with disabled caches
324 * r0 - The table base address
325 * r1 - clear bits for aux register
326 * r2 - set bits for aux register
327 */
328ASENTRY_NP(reinit_mmu)
329	push	{r4-r11, lr}
330	mov	r4, r0
331	mov	r5, r1
332	mov	r6, r2
333
334	/* !! Be very paranoid here !! */
335	/* !! We cannot write single bit here !! */
336
337#if 0	/* XXX writeback shouldn't be necessary */
338	/* Write back and invalidate all integrated caches */
339	bl 	dcache_wbinv_poc_all
340#else
341	bl	dcache_inv_pou_all
342#endif
343	mcr	CP15_ICIALLU
344	DSB
345	ISB
346
347	/* Set auxiliary register */
348	mrc	CP15_ACTLR(r7)
349	bic	r8, r7, r5		/* Mask bits */
350	eor 	r8, r8, r6		/* Set bits */
351	teq 	r7, r8
352	mcrne 	CP15_ACTLR(r8)
353	DSB
354	ISB
355
356	/* Enable caches. */
357	mrc	CP15_SCTLR(r7)
358	orr	r7, #CPU_CONTROL_DC_ENABLE
359	orr	r7, #CPU_CONTROL_IC_ENABLE
360	orr	r7, #CPU_CONTROL_BPRD_ENABLE
361	mcr	CP15_SCTLR(r7)
362	DSB
363
364	mcr	CP15_TTBR0(r4)		/* Set new TTB */
365	DSB
366	ISB
367
368	mcr	CP15_TLBIALL		/* Flush TLB */
369	mcr	CP15_BPIALL		/* Flush Branch predictor */
370	DSB
371	ISB
372
373#if 0 /* XXX writeback shouldn't be necessary */
374	/* Write back and invalidate all integrated caches */
375	bl 	dcache_wbinv_poc_all
376#else
377	bl	dcache_inv_pou_all
378#endif
379	mcr	CP15_ICIALLU
380	DSB
381	ISB
382
383	pop	{r4-r11, pc}
384END(reinit_mmu)
385
386
387/*
388 * Builds the page table
389 * r0 - The table base address
390 * r1 - The physical address (trashed)
391 * r2 - The virtual address (trashed)
392 * r3 - The number of 1MiB sections
393 * r4 - Trashed
394 *
395 * Addresses must be 1MiB aligned
396 */
397build_pagetables:
398	/* Set the required page attributed */
399#if defined(ARM_NEW_PMAP)
400	ldr	r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
401#elif defined(SMP)
402	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED)
403#else
404	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
405#endif
406	orr	r1, r4
407
408	/* Move the virtual address to the correct bit location */
409	lsr	r2, #(PTE1_SHIFT - 2)
410
411	mov	r4, r3
4121:
413	str	r1, [r0, r2]
414	add	r2, r2, #4
415	add	r1, r1, #(PTE1_SIZE)
416	adds	r4, r4, #-1
417	bhi	1b
418
419	mov	pc, lr
420
421VA_TO_PA_POINTER(Lpagetable, boot_pt1)
422
423
424.Lstart:
425	.word	_edata			/* Note that these three items are */
426	.word	_ebss			/* loaded with a single ldmia and */
427	.word	svcstk			/* must remain in order together. */
428
429.Lmainreturned:
430	.asciz	"main() returned"
431	.align	2
432
433	.bss
434svcstk:
435	.space	INIT_ARM_STACK_SIZE * MAXCPU
436
437/*
438 * Memory for the initial pagetable. We are unable to place this in
439 * the bss as this will be cleared after the table is loaded.
440 */
441	.section ".init_pagetable"
442	.align	14 /* 16KiB aligned */
443	.globl	boot_pt1
444boot_pt1:
445	.space	L1_TABLE_SIZE
446
447	.text
448	.align	2
449
450.Lcpufuncs:
451	.word	_C_LABEL(cpufuncs)
452
453#if defined(SMP)
454
455ASENTRY_NP(mpentry)
456	/* Make sure interrupts are disabled. */
457	cpsid	ifa
458
459	LEAVE_HYP
460
461	/* Setup core, disable all caches. */
462	mrc	CP15_SCTLR(r0)
463	bic	r0, #CPU_CONTROL_MMU_ENABLE
464	bic	r0, #CPU_CONTROL_DC_ENABLE
465	bic	r0, #CPU_CONTROL_IC_ENABLE
466	bic	r0, #CPU_CONTROL_UNAL_ENABLE
467	bic	r0, #CPU_CONTROL_BPRD_ENABLE
468	bic	r0, #CPU_CONTROL_SW_ENABLE
469	orr	r0, #CPU_CONTROL_AFLT_ENABLE
470	orr	r0, #CPU_CONTROL_VECRELOC
471	mcr	CP15_SCTLR(r0)
472	DSB
473	ISB
474
475	/* Invalidate L1 cache I+D cache */
476	bl	dcache_inv_pou_all
477	mcr	CP15_ICIALLU
478	DSB
479	ISB
480
481	/* Find the delta between VA and PA */
482	adr	r0, Lpagetable
483	bl	translate_va_to_pa
484
485	bl	init_mmu
486
487	adr	r1, .Lstart+8		/* Get initstack pointer from */
488	ldr	sp, [r1]		/* startup data. */
489	mrc	CP15_MPIDR(r0)		/* Get processor id number. */
490	and	r0, r0,	#0x0f
491	mov	r1, #INIT_ARM_STACK_SIZE
492	mul	r2, r1,	r0		/* Point sp to initstack */
493	add	sp, sp,	r2		/* area for this processor. */
494
495	/* Switch to virtual addresses. */
496	ldr	pc, =1f
4971:
498	mov	fp, #0			/* trace back starts here */
499	bl	_C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
500
501	adr	r0, .Lmpreturned
502	b	_C_LABEL(panic)
503	/* NOTREACHED */
504END(mpentry)
505
506.Lmpreturned:
507	.asciz	"init_secondary() returned"
508	.align	2
509#endif
510
511ENTRY_NP(cpu_halt)
512
513	/* XXX re-implement !!! */
514	cpsid	ifa
515	bl	dcache_wbinv_poc_all
516
517	ldr	r4, .Lcpu_reset_address
518	ldr	r4, [r4]
519	teq	r4, #0
520	movne	pc, r4
5211:
522	WFI
523	b	1b
524
525	/*
526	 * _cpu_reset_address contains the address to branch to, to complete
527	 * the cpu reset after turning the MMU off
528	 * This variable is provided by the hardware specific code
529	 */
530.Lcpu_reset_address:
531	.word	_C_LABEL(cpu_reset_address)
532END(cpu_halt)
533
534
535/*
536 * setjump + longjmp
537 */
538ENTRY(setjmp)
539	stmia	r0, {r4-r14}
540	mov	r0, #0x00000000
541	RET
542END(setjmp)
543
544ENTRY(longjmp)
545	ldmia	r0, {r4-r14}
546	mov	r0, #0x00000001
547	RET
548END(longjmp)
549
550	.data
551	.global	_C_LABEL(esym)
552_C_LABEL(esym):	.word	_C_LABEL(end)
553
554ENTRY_NP(abort)
555	b	_C_LABEL(abort)
556END(abort)
557
558ENTRY_NP(sigcode)
559	mov	r0, sp
560	add	r0, r0, #SIGF_UC
561
562	/*
563	 * Call the sigreturn system call.
564	 *
565	 * We have to load r7 manually rather than using
566	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
567	 * correct. Using the alternative places esigcode at the address
568	 * of the data rather than the address one past the data.
569	 */
570
571	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
572	swi	SYS_sigreturn
573
574	/* Well if that failed we better exit quick ! */
575
576	ldr	r7, [pc, #8]	/* Load SYS_exit */
577	swi	SYS_exit
578
579	/* Branch back to retry SYS_sigreturn */
580	b	. - 16
581END(sigcode)
582	.word	SYS_sigreturn
583	.word	SYS_exit
584
585	.align	2
586	.global _C_LABEL(esigcode)
587		_C_LABEL(esigcode):
588
589	.data
590	.global szsigcode
591szsigcode:
592	.long esigcode-sigcode
593
594/* End of locore.S */
595