locore-v4.S revision 265694
1/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2
3/*-
4 * Copyright 2011 Semihalf
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Brini.
20 * 4. The name of Brini may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36#include "assym.s"
37#include <sys/syscall.h>
38#include <machine/asm.h>
39#include <machine/armreg.h>
40#include <machine/pte.h>
41
42__FBSDID("$FreeBSD: head/sys/arm/arm/locore.S 265694 2014-05-08 18:36:42Z ian $");
43
44/* What size should this really be ? It is only used by initarm() */
45#define INIT_ARM_STACK_SIZE	(2048 * 4)
46
47#define	CPWAIT_BRANCH							 \
48	sub	pc, pc, #4
49
50#define	CPWAIT(tmp)							 \
51	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
52	mov	tmp, tmp		/* wait for it to complete */	;\
53	CPWAIT_BRANCH			/* branch to next insn */
54
55/*
56 * This is for kvm_mkdb, and should be the address of the beginning
57 * of the kernel text segment (not necessarily the same as kernbase).
58 */
59	.text
60	.align	0
61.globl kernbase
62.set kernbase,KERNBASE
63.globl physaddr
64.set physaddr,PHYSADDR
65
66/*
67 * On entry for FreeBSD boot ABI:
68 *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
69 *	r1 - if (r0 == 0) then metadata pointer
70 * On entry for Linux boot ABI:
71 *	r0 - 0
72 *	r1 - machine type (passed as arg2 to initarm)
73 *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
74 *
75 * For both types of boot we gather up the args, put them in a struct arm_boot_params
76 * structure and pass that to initarm.
77 */
78ENTRY_NP(btext)
79ASENTRY_NP(_start)
80	STOP_UNWINDING		/* Can't unwind into the bootloader! */
81
82	mov	r9, r0		/* 0 or boot mode from boot2 */
83	mov	r8, r1		/* Save Machine type */
84	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147	/*
148	 * Build page table from scratch.
149	 */
150
151	/* Find the delta between VA and PA */
152	adr	r0, Lpagetable
153	ldr	r1, [r0]
154	sub	r2, r1, r0
155	/* At this point: r2 = VA - PA */
156
157	/*
158	 * Find the physical address of the table. After these two
159	 * instructions:
160	 * r1 = va(pagetable)
161	 *
162	 * r0 = va(pagetable) - (VA - PA)
163	 *    = va(pagetable) - VA + PA
164	 *    = pa(pagetable)
165	 */
166	ldr	r1, [r0, #4]
167	sub	r0, r1, r2
168
169	/*
170	 * Map PA == VA
171	 */
172	/* Find the start kernels load address */
173	adr	r5, _start
174	ldr	r2, =(L1_S_OFFSET)
175	bic	r5, r2
176	mov	r1, r5
177	mov	r2, r5
178	/* Map 64MiB, preserved over calls to build_pagetables */
179	mov	r3, #64
180	bl	build_pagetables
181
182	/* Create the kernel map to jump to */
183	mov	r1, r5
184	ldr	r2, =(KERNVIRTADDR)
185	bl	build_pagetables
186
187#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
188	/* Create the custom map */
189	ldr	r1, =SOCDEV_PA
190	ldr	r2, =SOCDEV_VA
191	bl	build_pagetables
192#endif
193
194#if defined(SMP)
195	orr 	r0, r0, #2		/* Set TTB shared memory flag */
196#endif
197	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
198	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
199
200#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
201	mov	r0, #0
202	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
203#endif
204
205	/* Set the Domain Access register.  Very important! */
206	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
207	mcr	p15, 0, r0, c3, c0, 0
208	/*
209	 * Enable MMU.
210	 * On armv6 enable extended page tables, and set alignment checking
211	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
212	 * instructions emitted by clang.
213	 */
214	mrc	p15, 0, r0, c1, c0, 0
215#ifdef _ARM_ARCH_6
216	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
217	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
218	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
219#endif
220	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
221	mcr	p15, 0, r0, c1, c0, 0
222	nop
223	nop
224	nop
225	CPWAIT(r0)
226
227mmu_done:
228	nop
229	adr	r1, .Lstart
230	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
231	sub	r2, r2, r1		/* get zero init data */
232	mov	r3, #0
233.L1:
234	str	r3, [r1], #0x0004	/* get zero init data */
235	subs	r2, r2, #4
236	bgt	.L1
237	ldr	pc, .Lvirt_done
238
239virt_done:
240	mov	r1, #28			/* loader info size is 28 bytes also second arg */
241	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
242	mov	r0, sp			/* loader info pointer is first arg */
243	bic	sp, sp, #7		/* align stack to 8 bytes */
244	str	r1, [r0]		/* Store length of loader info */
245	str	r9, [r0, #4]		/* Store r0 from boot loader */
246	str	r8, [r0, #8]		/* Store r1 from boot loader */
247	str	ip, [r0, #12]		/* store r2 from boot loader */
248	str	fp, [r0, #16]		/* store r3 from boot loader */
249	str	r5, [r0, #20]		/* store the physical address */
250	adr	r4, Lpagetable		/* load the pagetable address */
251	ldr	r5, [r4, #4]
252	str	r5, [r0, #24]		/* store the pagetable address */
253	mov	fp, #0			/* trace back starts here */
254	bl	_C_LABEL(initarm)	/* Off we go */
255
256	/* init arm will return the new stack pointer. */
257	mov	sp, r0
258
259	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
260
261	adr	r0, .Lmainreturned
262	b	_C_LABEL(panic)
263	/* NOTREACHED */
264END(btext)
265END(_start)
266
267/*
268 * Builds the page table
269 * r0 - The table base address
270 * r1 - The physical address (trashed)
271 * r2 - The virtual address (trashed)
272 * r3 - The number of 1MiB sections
273 * r4 - Trashed
274 *
275 * Addresses must be 1MiB aligned
276 */
277build_pagetables:
278	/* Set the required page attributed */
279	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
280#if defined(SMP)
281	orr	r4, #(L1_SHARED)
282#endif
283	orr	r1, r4
284
285	/* Move the virtual address to the correct bit location */
286	lsr	r2, #(L1_S_SHIFT - 2)
287
288	mov	r4, r3
2891:
290	str	r1, [r0, r2]
291	add	r2, r2, #4
292	add	r1, r1, #(L1_S_SIZE)
293	adds	r4, r4, #-1
294	bhi	1b
295
296	RET
297
298Lpagetable:
299	.word	.
300	.word	pagetable
301
302Lvirtaddr:
303	.word	KERNVIRTADDR
304Lphysaddr:
305	.word	KERNPHYSADDR
306Lreal_start:
307	.word	_start
308Lend:
309	.word	_edata
310
311#ifdef SMP
312Lstartup_pagetable_secondary:
313	.word	temp_pagetable
314#endif
315
316.Lstart:
317	.word	_edata
318	.word	_ebss
319	.word	svcstk + INIT_ARM_STACK_SIZE
320
321.Lvirt_done:
322	.word	virt_done
323#if defined(SMP)
324.Lmpvirt_done:
325	.word	mpvirt_done
326#endif
327
328.Lmainreturned:
329	.asciz	"main() returned"
330	.align	0
331
332	.bss
333svcstk:
334	.space	INIT_ARM_STACK_SIZE
335
336/*
337 * Memory for the initial pagetable. We are unable to place this in
338 * the bss as this will be cleared after the table is loaded.
339 */
340	.section ".init_pagetable"
341	.align	14 /* 16KiB aligned */
342pagetable:
343	.space	L1_TABLE_SIZE
344
345	.text
346	.align	0
347
348.Lcpufuncs:
349	.word	_C_LABEL(cpufuncs)
350
351#if defined(SMP)
352
353ASENTRY_NP(mpentry)
354
355	/* Make sure interrupts are disabled. */
356	mrs	r7, cpsr
357	orr	r7, r7, #(I32_bit|F32_bit)
358	msr	cpsr_c, r7
359
360
361	adr     r7, Ltag
362	bic     r7, r7, #0xf0000000
363	orr     r7, r7, #PHYSADDR
364
365	/* Disable MMU for a while */
366	mrc	p15, 0, r2, c1, c0, 0
367	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
368	    CPU_CONTROL_WBUF_ENABLE)
369	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
370	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
371	mcr	p15, 0, r2, c1, c0, 0
372
373	nop
374	nop
375	nop
376
377Ltag:
378	ldr	r0, Lstartup_pagetable_secondary
379	bic	r0, r0, #0xf0000000
380	orr	r0, r0, #PHYSADDR
381	ldr	r0, [r0]
382#if defined(SMP)
383	orr 	r0, r0, #2		/* Set TTB shared memory flag */
384#endif
385	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
386	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
387
388#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
389	mov	r0, #0
390	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
391#endif
392
393	/* Set the Domain Access register.  Very important! */
394	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
395	mcr	p15, 0, r0, c3, c0, 0
396	/* Enable MMU */
397	mrc	p15, 0, r0, c1, c0, 0
398#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
399	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
400	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
401#endif
402	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
403	mcr	p15, 0, r0, c1, c0, 0
404	nop
405	nop
406	nop
407	CPWAIT(r0)
408
409	adr	r1, .Lstart
410	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
411	mrc	p15, 0, r0, c0, c0, 5
412	and	r0, r0, #15
413	mov	r1, #2048
414	mul	r2, r1, r0
415	sub	sp, sp, r2
416	str	r1, [sp]
417	ldr	pc, .Lmpvirt_done
418
419mpvirt_done:
420
421	mov	fp, #0			/* trace back starts here */
422	bl	_C_LABEL(init_secondary)	/* Off we go */
423
424	adr	r0, .Lmpreturned
425	b	_C_LABEL(panic)
426	/* NOTREACHED */
427
428.Lmpreturned:
429	.asciz	"main() returned"
430	.align	0
431END(mpentry)
432#endif
433
434ENTRY_NP(cpu_halt)
435	mrs     r2, cpsr
436	bic	r2, r2, #(PSR_MODE)
437	orr     r2, r2, #(PSR_SVC32_MODE)
438	orr	r2, r2, #(I32_bit | F32_bit)
439	msr     cpsr_fsxc, r2
440
441	ldr	r4, .Lcpu_reset_address
442	ldr	r4, [r4]
443
444	ldr	r0, .Lcpufuncs
445	mov	lr, pc
446	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
447	mov	lr, pc
448	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
449
450	/*
451	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
452	 * necessary.
453	 */
454
455	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
456	ldr	r1, [r1]
457	cmp	r1, #0
458	mov	r2, #0
459
460	/*
461	 * MMU & IDC off, 32 bit program & data space
462	 * Hurl ourselves into the ROM
463	 */
464	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
465	mcr     15, 0, r0, c1, c0, 0
466	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
467	mov     pc, r4
468
469	/*
470	 * _cpu_reset_address contains the address to branch to, to complete
471	 * the cpu reset after turning the MMU off
472	 * This variable is provided by the hardware specific code
473	 */
474.Lcpu_reset_address:
475	.word	_C_LABEL(cpu_reset_address)
476
477	/*
478	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
479	 * v4 MMU disable instruction needs executing... it is an illegal instruction
480	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
481	 * instruction / data-abort / reset loop.
482	 */
483.Lcpu_reset_needs_v4_MMU_disable:
484	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
485END(cpu_halt)
486
487
488/*
489 * setjump + longjmp
490 */
491ENTRY(setjmp)
492	stmia	r0, {r4-r14}
493	mov	r0, #0x00000000
494	RET
495END(setjmp)
496
497ENTRY(longjmp)
498	ldmia	r0, {r4-r14}
499	mov	r0, #0x00000001
500	RET
501END(longjmp)
502
503	.data
504	.global _C_LABEL(esym)
505_C_LABEL(esym):	.word	_C_LABEL(end)
506
507ENTRY_NP(abort)
508	b	_C_LABEL(abort)
509END(abort)
510
511ENTRY_NP(sigcode)
512	mov	r0, sp
513	add	r0, r0, #SIGF_UC
514
515	/*
516	 * Call the sigreturn system call.
517	 *
518	 * We have to load r7 manually rather than using
519	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
520	 * correct. Using the alternative places esigcode at the address
521	 * of the data rather than the address one past the data.
522	 */
523
524	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
525	swi	SYS_sigreturn
526
527	/* Well if that failed we better exit quick ! */
528
529	ldr	r7, [pc, #8]	/* Load SYS_exit */
530	swi	SYS_exit
531
532	/* Branch back to retry SYS_sigreturn */
533	b	. - 16
534
535	.word	SYS_sigreturn
536	.word	SYS_exit
537
538	.align	0
539	.global _C_LABEL(esigcode)
540		_C_LABEL(esigcode):
541
542	.data
543	.global szsigcode
544szsigcode:
545	.long esigcode-sigcode
546END(sigcode)
547/* End of locore.S */
548