locore-v4.S revision 261651
191094Sdes/*	$NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $	*/
2115619Sdes
3228690Sdes/*-
491094Sdes * Copyright 2011 Semihalf
591094Sdes * Copyright (C) 1994-1997 Mark Brinicombe
691094Sdes * Copyright (C) 1994 Brini
799158Sdes * All rights reserved.
899158Sdes *
999158Sdes * Redistribution and use in source and binary forms, with or without
1091094Sdes * modification, are permitted provided that the following conditions
1191094Sdes * are met:
1291094Sdes * 1. Redistributions of source code must retain the above copyright
1391094Sdes *    notice, this list of conditions and the following disclaimer.
1491094Sdes * 2. Redistributions in binary form must reproduce the above copyright
1591094Sdes *    notice, this list of conditions and the following disclaimer in the
1691094Sdes *    documentation and/or other materials provided with the distribution.
1791094Sdes * 3. All advertising materials mentioning features or use of this software
1891094Sdes *    must display the following acknowledgement:
1991094Sdes *	This product includes software developed by Brini.
2091094Sdes * 4. The name of Brini may not be used to endorse or promote products
2191094Sdes *    derived from this software without specific prior written permission.
2291094Sdes *
2391094Sdes * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
2491094Sdes * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
2591094Sdes * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2691094Sdes * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2791094Sdes * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2891094Sdes * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2991094Sdes * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
3091094Sdes * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
3191094Sdes * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
3291094Sdes * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3391094Sdes *
3491094Sdes */
35228690Sdes
3691094Sdes#include "assym.s"
3791094Sdes#include <sys/syscall.h>
38228690Sdes#include <machine/asm.h>
39228690Sdes#include <machine/armreg.h>
40228690Sdes#include <machine/pte.h>
41228690Sdes
4291094Sdes__FBSDID("$FreeBSD: head/sys/arm/arm/locore.S 261651 2014-02-09 12:52:39Z andrew $");
4391094Sdes
4491094Sdes/* What size should this really be ? It is only used by initarm() */
4591094Sdes#define INIT_ARM_STACK_SIZE	(2048 * 4)
4691094Sdes
4791094Sdes#define	CPWAIT_BRANCH							 \
48115619Sdes	sub	pc, pc, #4
49115619Sdes
5091094Sdes#define	CPWAIT(tmp)							 \
5191094Sdes	mrc	p15, 0, tmp, c2, c0, 0	/* arbitrary read of CP15 */	;\
5291094Sdes	mov	tmp, tmp		/* wait for it to complete */	;\
5391094Sdes	CPWAIT_BRANCH			/* branch to next insn */
5491094Sdes
5591094Sdes/*
5691094Sdes * This is for kvm_mkdb, and should be the address of the beginning
57174832Sdes * of the kernel text segment (not necessarily the same as kernbase).
5891094Sdes */
5999158Sdes	.text
6091094Sdes	.align	0
6191094Sdes.globl kernbase
6291094Sdes.set kernbase,KERNBASE
6391094Sdes.globl physaddr
6491094Sdes.set physaddr,PHYSADDR
65115619Sdes
6691094Sdes/*
6791094Sdes * On entry for FreeBSD boot ABI:
6891100Sdes *	r0 - metadata pointer or 0 (boothowto on AT91's boot2)
6991100Sdes *	r1 - if (r0 == 0) then metadata pointer
7091100Sdes * On entry for Linux boot ABI:
7191100Sdes *	r0 - 0
7291100Sdes *	r1 - machine type (passed as arg2 to initarm)
7391100Sdes *	r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
7491100Sdes *
7591100Sdes * For both types of boot we gather up the args, put them in a struct arm_boot_params
7691100Sdes * structure and pass that to initarm.
7791100Sdes */
7891100SdesENTRY_NP(btext)
7991100SdesASENTRY_NP(_start)
8091684Sdes	STOP_UNWINDING		/* Can't unwind into the bootloader! */
8191100Sdes
8291100Sdes	mov	r9, r0		/* 0 or boot mode from boot2 */
8391100Sdes	mov	r8, r1		/* Save Machine type */
8491100Sdes	mov	ip, r2		/* Save meta data */
85	mov	fp, r3		/* Future expantion */
86
87	/* Make sure interrupts are disabled. */
88	mrs	r7, cpsr
89	orr	r7, r7, #(I32_bit|F32_bit)
90	msr	cpsr_c, r7
91
92#if defined (FLASHADDR) && defined(LOADERRAMADDR)
93	/* Check if we're running from flash. */
94	ldr	r7, =FLASHADDR
95	/*
96	 * If we're running with MMU disabled, test against the
97	 * physical address instead.
98	 */
99	mrc     p15, 0, r2, c1, c0, 0
100	ands	r2, r2, #CPU_CONTROL_MMU_ENABLE
101	ldreq	r6, =PHYSADDR
102	ldrne	r6, =LOADERRAMADDR
103	cmp	r7, r6
104	bls 	flash_lower
105	cmp	r7, pc
106	bhi	from_ram
107	b	do_copy
108
109flash_lower:
110	cmp	r6, pc
111	bls	from_ram
112do_copy:
113	ldr	r7, =KERNBASE
114	adr	r1, _start
115	ldr	r0, Lreal_start
116	ldr	r2, Lend
117	sub	r2, r2, r0
118	sub	r0, r0, r7
119	add	r0, r0, r6
120	mov	r4, r0
121	bl	memcpy
122	ldr	r0, Lram_offset
123	add	pc, r4, r0
124Lram_offset:	.word from_ram-_C_LABEL(_start)
125from_ram:
126	nop
127#endif
128	adr	r7, Lunmapped
129	bic     r7, r7, #0xf0000000
130	orr     r7, r7, #PHYSADDR
131
132
133disable_mmu:
134	/* Disable MMU for a while */
135	mrc     p15, 0, r2, c1, c0, 0
136	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
137	    CPU_CONTROL_WBUF_ENABLE)
138	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
139	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
140	mcr     p15, 0, r2, c1, c0, 0
141
142	nop
143	nop
144	nop
145	mov	pc, r7
146Lunmapped:
147	/*
148	 * Build page table from scratch.
149	 */
150
151	/* Load the page tables physical address */
152	ldr	r1, Lstartup_pagetable
153	ldr	r2, =(KERNVIRTADDR - KERNPHYSADDR)
154	sub	r0, r1, r2
155
156	/*
157	 * Map PA == VA
158	 */
159	ldr	r5, =(PHYSADDR)
160	mov	r1, r5
161	mov	r2, r5
162	/* Map 64MiB, preserved over calls to build_pagetables */
163	mov	r3, #64
164	bl	build_pagetables
165
166	/* Create the kernel map to jump to */
167	mov	r1, r5
168	ldr	r2, =(KERNBASE)
169	bl	build_pagetables
170
171#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
172	/* Create the custom map */
173	ldr	r1, =SOCDEV_VA
174	ldr	r2, =SOCDEV_PA
175	bl	build_pagetables
176#endif
177
178#if defined(SMP)
179	orr 	r0, r0, #2		/* Set TTB shared memory flag */
180#endif
181	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
182	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
183
184#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
185	mov	r0, #0
186	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
187#endif
188
189	/* Set the Domain Access register.  Very important! */
190	mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
191	mcr	p15, 0, r0, c3, c0, 0
192	/*
193	 * Enable MMU.
194	 * On armv6 enable extended page tables, and set alignment checking
195	 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd
196	 * instructions emitted by clang.
197	 */
198	mrc	p15, 0, r0, c1, c0, 0
199#ifdef _ARM_ARCH_6
200	orr	r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
201	orr	r0, r0, #(CPU_CONTROL_AFLT_ENABLE)
202	orr	r0, r0, #(CPU_CONTROL_AF_ENABLE)
203#endif
204	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE)
205	mcr	p15, 0, r0, c1, c0, 0
206	nop
207	nop
208	nop
209	CPWAIT(r0)
210
211mmu_done:
212	nop
213	adr	r1, .Lstart
214	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
215	sub	r2, r2, r1		/* get zero init data */
216	mov	r3, #0
217.L1:
218	str	r3, [r1], #0x0004	/* get zero init data */
219	subs	r2, r2, #4
220	bgt	.L1
221	ldr	pc, .Lvirt_done
222
223virt_done:
224	mov	r1, #24			/* loader info size is 24 bytes also second arg */
225	subs	sp, sp, r1		/* allocate arm_boot_params struct on stack */
226	bic	sp, sp, #7		/* align stack to 8 bytes */
227	mov	r0, sp			/* loader info pointer is first arg */
228	str	r1, [r0]		/* Store length of loader info */
229	str	r9, [r0, #4]		/* Store r0 from boot loader */
230	str	r8, [r0, #8]		/* Store r1 from boot loader */
231	str	ip, [r0, #12]		/* store r2 from boot loader */
232	str	fp, [r0, #16]		/* store r3 from boot loader */
233	ldr	r5, =KERNPHYSADDR	/* load KERNPHYSADDR as the physical address */
234	str	r5, [r0, #20]		/* store the physical address */
235	mov	fp, #0			/* trace back starts here */
236	bl	_C_LABEL(initarm)	/* Off we go */
237
238	/* init arm will return the new stack pointer. */
239	mov	sp, r0
240
241	bl	_C_LABEL(mi_startup)		/* call mi_startup()! */
242
243	adr	r0, .Lmainreturned
244	b	_C_LABEL(panic)
245	/* NOTREACHED */
246END(btext)
247END(_start)
248
249/*
250 * Builds the page table
251 * r0 - The table base address
252 * r1 - The physical address (trashed)
253 * r2 - The virtual address (trashed)
254 * r3 - The number of 1MiB sections
255 * r4 - Trashed
256 *
257 * Addresses must be 1MiB aligned
258 */
259build_pagetables:
260	/* Set the required page attributed */
261	ldr	r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
262#if defined(SMP)
263	orr	r4, #(L1_SHARED)
264#endif
265	orr	r1, r4
266
267	/* Move the virtual address to the correct bit location */
268	lsr	r2, #(L1_S_SHIFT - 2)
269
270	mov	r4, r3
2711:
272	str	r1, [r0, r2]
273	add	r2, r2, #4
274	add	r1, r1, #(L1_S_SIZE)
275	adds	r4, r4, #-1
276	bhi	1b
277
278	RET
279
280Lvirtaddr:
281	.word	KERNVIRTADDR
282Lphysaddr:
283	.word	KERNPHYSADDR
284Lreal_start:
285	.word	_start
286Lend:
287	.word	_edata
288Lstartup_pagetable:
289	.word	pagetable
290#ifdef SMP
291Lstartup_pagetable_secondary:
292	.word	temp_pagetable
293#endif
294
295.Lstart:
296	.word	_edata
297	.word	_ebss
298	.word	svcstk + INIT_ARM_STACK_SIZE
299
300.Lvirt_done:
301	.word	virt_done
302#if defined(SMP)
303.Lmpvirt_done:
304	.word	mpvirt_done
305#endif
306
307.Lmainreturned:
308	.asciz	"main() returned"
309	.align	0
310
311	.bss
312svcstk:
313	.space	INIT_ARM_STACK_SIZE
314
315/*
316 * Memory for the initial pagetable. We are unable to place this in
317 * the bss as this will be cleared after the table is loaded.
318 */
319	.section ".init_pagetable"
320	.align	14 /* 16KiB aligned */
321pagetable:
322	.space	L1_TABLE_SIZE
323
324	.text
325	.align	0
326
327.Lcpufuncs:
328	.word	_C_LABEL(cpufuncs)
329
330#if defined(SMP)
331Lsramaddr:
332	.word	0xffff0080
333
334#if 0
335#define	AP_DEBUG(tmp)			\
336	mrc	p15, 0, r1, c0, c0, 5;	\
337	ldr	r0, Lsramaddr;		\
338	add	r0, r1, lsl #2;		\
339	mov	r1, tmp;		\
340	str	r1, [r0], #0x0000;
341#else
342#define AP_DEBUG(tmp)
343#endif
344
345
346ASENTRY_NP(mptramp)
347	mov	r0, #0
348	mcr	p15, 0, r0, c7, c7, 0
349
350	AP_DEBUG(#1)
351
352	mrs	r3, cpsr
353	bic	r3, r3, #(PSR_MODE)
354	orr	r3, r3, #(PSR_SVC32_MODE)
355        msr	cpsr_fsxc, r3
356
357	mrc	p15, 0, r0, c0, c0, 5
358	and	r0, #0x0f		/* Get CPU ID */
359
360	/* Read boot address for CPU */
361	mov	r1, #0x100
362	mul	r2, r0, r1
363	ldr	r1, Lpmureg
364	add	r0, r2, r1
365	ldr	r1, [r0], #0x00
366
367	mov pc, r1
368
369Lpmureg:
370        .word   0xd0022124
371END(mptramp)
372
373ASENTRY_NP(mpentry)
374
375	AP_DEBUG(#2)
376
377	/* Make sure interrupts are disabled. */
378	mrs	r7, cpsr
379	orr	r7, r7, #(I32_bit|F32_bit)
380	msr	cpsr_c, r7
381
382
383	adr     r7, Ltag
384	bic     r7, r7, #0xf0000000
385	orr     r7, r7, #PHYSADDR
386
387	/* Disable MMU for a while */
388	mrc	p15, 0, r2, c1, c0, 0
389	bic	r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
390	    CPU_CONTROL_WBUF_ENABLE)
391	bic	r2, r2, #(CPU_CONTROL_IC_ENABLE)
392	bic	r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
393	mcr	p15, 0, r2, c1, c0, 0
394
395	nop
396	nop
397	nop
398
399	AP_DEBUG(#3)
400
401Ltag:
402	ldr	r0, Lstartup_pagetable_secondary
403	bic	r0, r0, #0xf0000000
404	orr	r0, r0, #PHYSADDR
405	ldr	r0, [r0]
406#if defined(SMP)
407	orr 	r0, r0, #0		/* Set TTB shared memory flag */
408#endif
409	mcr	p15, 0, r0, c2, c0, 0	/* Set TTB */
410	mcr	p15, 0, r0, c8, c7, 0	/* Flush TLB */
411
412#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
413	mov	r0, #0
414	mcr	p15, 0, r0, c13, c0, 1	/* Set ASID to 0 */
415#endif
416
417	AP_DEBUG(#4)
418
419	/* Set the Domain Access register.  Very important! */
420	mov	r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
421	mcr	p15, 0, r0, c3, c0, 0
422	/* Enable MMU */
423	mrc	p15, 0, r0, c1, c0, 0
424#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
425	orr	r0, r0, #CPU_CONTROL_V6_EXTPAGE
426	orr	r0, r0, #CPU_CONTROL_AF_ENABLE
427#endif
428	orr	r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE)
429	mcr	p15, 0, r0, c1, c0, 0
430	nop
431	nop
432	nop
433	CPWAIT(r0)
434
435	adr	r1, .Lstart
436	ldmia	r1, {r1, r2, sp}	/* Set initial stack and */
437	mrc	p15, 0, r0, c0, c0, 5
438	and	r0, r0, #15
439	mov	r1, #2048
440	mul	r2, r1, r0
441	sub	sp, sp, r2
442	str	r1, [sp]
443	ldr	pc, .Lmpvirt_done
444
445mpvirt_done:
446
447	mov	fp, #0			/* trace back starts here */
448	bl	_C_LABEL(init_secondary)	/* Off we go */
449
450	adr	r0, .Lmpreturned
451	b	_C_LABEL(panic)
452	/* NOTREACHED */
453
454.Lmpreturned:
455	.asciz	"main() returned"
456	.align	0
457END(mpentry)
458#endif
459
460ENTRY_NP(cpu_halt)
461	mrs     r2, cpsr
462	bic	r2, r2, #(PSR_MODE)
463	orr     r2, r2, #(PSR_SVC32_MODE)
464	orr	r2, r2, #(I32_bit | F32_bit)
465	msr     cpsr_fsxc, r2
466
467	ldr	r4, .Lcpu_reset_address
468	ldr	r4, [r4]
469
470	ldr	r0, .Lcpufuncs
471	mov	lr, pc
472	ldr	pc, [r0, #CF_IDCACHE_WBINV_ALL]
473	mov	lr, pc
474	ldr	pc, [r0, #CF_L2CACHE_WBINV_ALL]
475
476	/*
477	 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
478	 * necessary.
479	 */
480
481	ldr	r1, .Lcpu_reset_needs_v4_MMU_disable
482	ldr	r1, [r1]
483	cmp	r1, #0
484	mov	r2, #0
485
486	/*
487	 * MMU & IDC off, 32 bit program & data space
488	 * Hurl ourselves into the ROM
489	 */
490	mov	r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
491	mcr     15, 0, r0, c1, c0, 0
492	mcrne   15, 0, r2, c8, c7, 0 	/* nail I+D TLB on ARMv4 and greater */
493	mov     pc, r4
494
495	/*
496	 * _cpu_reset_address contains the address to branch to, to complete
497	 * the cpu reset after turning the MMU off
498	 * This variable is provided by the hardware specific code
499	 */
500.Lcpu_reset_address:
501	.word	_C_LABEL(cpu_reset_address)
502
503	/*
504	 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
505	 * v4 MMU disable instruction needs executing... it is an illegal instruction
506	 * on f.e. ARM6/7 that locks up the computer in an endless illegal
507	 * instruction / data-abort / reset loop.
508	 */
509.Lcpu_reset_needs_v4_MMU_disable:
510	.word	_C_LABEL(cpu_reset_needs_v4_MMU_disable)
511END(cpu_halt)
512
513
514/*
515 * setjump + longjmp
516 */
517ENTRY(setjmp)
518	stmia	r0, {r4-r14}
519	mov	r0, #0x00000000
520	RET
521END(setjmp)
522
523ENTRY(longjmp)
524	ldmia	r0, {r4-r14}
525	mov	r0, #0x00000001
526	RET
527END(longjmp)
528
529	.data
530	.global _C_LABEL(esym)
531_C_LABEL(esym):	.word	_C_LABEL(end)
532
533ENTRY_NP(abort)
534	b	_C_LABEL(abort)
535END(abort)
536
537ENTRY_NP(sigcode)
538	mov	r0, sp
539
540	/*
541	 * Call the sigreturn system call.
542	 *
543	 * We have to load r7 manually rather than using
544	 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
545	 * correct. Using the alternative places esigcode at the address
546	 * of the data rather than the address one past the data.
547	 */
548
549	ldr	r7, [pc, #12]	/* Load SYS_sigreturn */
550	swi	SYS_sigreturn
551
552	/* Well if that failed we better exit quick ! */
553
554	ldr	r7, [pc, #8]	/* Load SYS_exit */
555	swi	SYS_exit
556
557	/* Branch back to retry SYS_sigreturn */
558	b	. - 16
559
560	.word	SYS_sigreturn
561	.word	SYS_exit
562
563	.align	0
564	.global _C_LABEL(esigcode)
565		_C_LABEL(esigcode):
566
567	.data
568	.global szsigcode
569szsigcode:
570	.long esigcode-sigcode
571END(sigcode)
572/* End of locore.S */
573