locore.S revision 191375
1/*-
2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 191375 2009-04-22 13:11:38Z raj $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/psl.h>
36#include <machine/pte.h>
37#include <machine/trap.h>
38#include <machine/vmparam.h>
39#include <machine/tlb.h>
40#include <machine/bootinfo.h>
41
42#define TMPSTACKSZ	16384
43
44	.text
45	.globl	btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52	.globl	kernel_text
53kernel_text:
54
55/*
56 * Startup entry.  Note, this must be the first thing in the text segment!
57 */
58	.text
59	.globl	__start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 *  - system memory starts from physical address 0
65 *  - it's mapped by a single TBL1 entry
66 *  - TLB1 mapping is 1:1 pa to va
67 *  - kernel is loaded at 16MB boundary
68 *  - all PID registers are set to the same value
69 *  - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 *	r1	: stack pointer
73 *	r3	: metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 *  - find TLB1 entry we started in
77 *  - make sure it's protected, ivalidate other entries
78 *  - create temp entry in the second AS (make sure it's not TLB[1])
79 *  - switch to temp mapping
80 *  - map 16MB of RAM in TLB1[1]
81 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 *  - switch to to TLB1[1] mapping
83 *  - invalidate temp mapping
84 *
85 * locore registers use:
86 *	r1	: stack pointer
87 *	r2	: trace pointer (AP only, for early diagnostics)
88 *	r3-r27	: scratch registers
89 *	r28	: kernload
90 *	r29	: temp TLB1 entry
91 *	r30	: initial TLB1 entry we started in
92 *	r31	: metadata pointer
93 */
94
95/*
96 * Keep metadata ptr in r31 for later use.
97 */
98	mr	%r31, %r3
99
100/*
101 * Initial cleanup
102 */
103	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
104	mtmsr	%r3
105	isync
106
107	lis	%r3, HID0_E500_DEFAULT_SET@h
108	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
109	mtspr	SPR_HID0, %r3
110	isync
111	lis	%r3, HID1_E500_DEFAULT_SET@h
112	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
113	mtspr	SPR_HID1, %r3
114	isync
115
116	/* Invalidate all entries in TLB0 */
117	li	%r3, 0
118	bl	tlb_inval_all
119
120/*
121 * Locate the TLB1 entry that maps this code
122 */
123	bl	1f
1241:	mflr	%r3
125	bl	tlb1_find_current	/* the entry number found is returned in r30 */
126
127	bl	tlb1_inval_all_but_current
128/*
129 * Create temporary mapping in AS=1 and switch to it
130 */
131	bl	tlb1_temp_mapping_as1
132
133	mfmsr	%r3
134	ori	%r3, %r3, (PSL_IS | PSL_DS)
135	bl	2f
1362:	mflr	%r4
137	addi	%r4, %r4, 20
138	mtspr	SPR_SRR0, %r4
139	mtspr	SPR_SRR1, %r3
140	rfi				/* Switch context */
141
142/*
143 * Invalidate initial entry
144 */
145	mr	%r3, %r30
146	bl	tlb1_inval_entry
147
148/*
149 * Setup final mapping in TLB1[1] and switch to it
150 */
151	/* Final kernel mapping, map in 16 MB of RAM */
152	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
153	li	%r4, 1			/* Entry 1 */
154	rlwimi	%r3, %r4, 16, 12, 15
155	mtspr	SPR_MAS0, %r3
156	isync
157
158	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
159	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
160	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
161	isync
162
163	lis	%r3, KERNBASE@h
164	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
165	mtspr	SPR_MAS2, %r3
166	isync
167
168	/* Discover phys load address */
169	bl	3f
1703:	mflr	%r4			/* Use current address */
171	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
172	mr	%r28, %r4		/* Keep kernel load address */
173	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
174	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
175	isync
176	tlbwe
177	isync
178	msync
179
180	/* Switch to the above TLB1[1] mapping */
181	bl	4f
1824:	mflr	%r4
183	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
184	rlwinm	%r3, %r3, 0, 0, 19
185	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
186	addi	%r4, %r4, 36
187	li	%r3, PSL_DE		/* Note AS=0 */
188	mtspr   SPR_SRR0, %r4
189	mtspr   SPR_SRR1, %r3
190	rfi
191
192/*
193 * Invalidate temp mapping
194 */
195	mr	%r3, %r29
196	bl	tlb1_inval_entry
197
198/*
199 * Save kernel load address for later use.
200 */
201	lis	%r3, kernload@ha
202	addi	%r3, %r3, kernload@l
203	stw	%r28, 0(%r3)
204
205/*
206 * Setup a temporary stack
207 */
208	lis	%r1, tmpstack@ha
209	addi	%r1, %r1, tmpstack@l
210	addi	%r1, %r1, (TMPSTACKSZ - 8)
211
212/*
213 * Initialise exception vector offsets
214 */
215	bl	ivor_setup
216
217/*
218 * Set up arguments and jump to system initialization code
219 */
220	lis	%r3, kernel_text@ha
221	addi	%r3, %r3, kernel_text@l
222	lis	%r4, _end@ha
223	addi	%r4, %r4, _end@l
224	mr	%r5, %r31		/* metadata ptr */
225
226	/* Prepare e500 core */
227	bl	e500_init
228
229	/* Switch to thread0.td_kstack now */
230	mr	%r1, %r3
231	li	%r3, 0
232	stw	%r3, 0(%r1)
233
234	/* Machine independet part, does not return */
235	bl	mi_startup
236	/* NOT REACHED */
2375:	b	5b
238
239/*
240 * Invalidate all entries in the given TLB.
241 *
242 * r3	TLBSEL
243 */
244tlb_inval_all:
245	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
246	ori	%r3, %r3, 0x4		/* INVALL */
247	tlbivax	0, %r3
248	isync
249	msync
250
251	tlbsync
252	msync
253	blr
254
255/*
256 * expects address to look up in r3, returns entry number in r30
257 *
258 * FIXME: the hidden assumption is we are now running in AS=0, but we should
259 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
260 */
261tlb1_find_current:
262	mfspr	%r17, SPR_PID0
263	slwi	%r17, %r17, MAS6_SPID0_SHIFT
264	mtspr	SPR_MAS6, %r17
265	isync
266	tlbsx	0, %r3
267	mfspr	%r17, SPR_MAS0
268	rlwinm	%r30, %r17, 16, 20, 31		/* MAS0[ESEL] -> r30 */
269
270	/* Make sure we have IPROT set on the entry */
271	mfspr	%r17, SPR_MAS1
272	oris	%r17, %r17, MAS1_IPROT@h
273	mtspr	SPR_MAS1, %r17
274	isync
275	tlbwe
276	isync
277	msync
278	blr
279
280/*
281 * Invalidates a single entry in TLB1.
282 *
283 * r3		ESEL
284 * r4-r5	scratched
285 */
286tlb1_inval_entry:
287	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
288	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
289	mtspr	SPR_MAS0, %r4
290	isync
291	tlbre
292	li	%r5, 0			/* MAS1[V] = 0 */
293	mtspr	SPR_MAS1, %r5
294	isync
295	tlbwe
296	isync
297	msync
298	blr
299
300/*
301 * r30		current entry number
302 * r29		returned temp entry
303 * r3-r5	scratched
304 */
305tlb1_temp_mapping_as1:
306	/* Read our current translation */
307	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
308	rlwimi	%r3, %r30, 16, 12, 15	/* Select our current entry */
309	mtspr	SPR_MAS0, %r3
310	isync
311	tlbre
312
313	/*
314	 * Prepare and write temp entry
315	 *
316	 * FIXME this is not robust against overflow i.e. when the current
317	 * entry is the last in TLB1
318	 */
319	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
320	addi	%r29, %r30, 1		/* Use next entry. */
321	li	%r4, 1
322	cmpw	%r4, %r29
323	bne	1f
324	addi	%r29, %r29, 1
3251:	rlwimi	%r3, %r29, 16, 12, 15	/* Select temp entry */
326	mtspr	SPR_MAS0, %r3
327	isync
328	mfspr	%r5, SPR_MAS1
329	li	%r4, 1			/* AS=1 */
330	rlwimi	%r5, %r4, 12, 19, 19
331	li	%r4, 0			/* Global mapping, TID=0 */
332	rlwimi	%r5, %r4, 16, 8, 15
333	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
334	mtspr	SPR_MAS1, %r5
335	isync
336	tlbwe
337	isync
338	msync
339	blr
340
341/*
342 * Loops over TLB1, invalidates all entries skipping the one which currently
343 * maps this code.
344 *
345 * r30		current entry
346 * r3-r5	scratched
347 */
348tlb1_inval_all_but_current:
349	mr	%r6, %r3
350	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
351	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
352	li	%r4, 0			/* Start from Entry 0 */
3531:	lis	%r5, MAS0_TLBSEL1@h
354	rlwimi	%r5, %r4, 16, 12, 15
355	mtspr	SPR_MAS0, %r5
356	isync
357	tlbre
358	mfspr	%r5, SPR_MAS1
359	cmpw	%r4, %r30		/* our current entry? */
360	beq	2f
361	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
362	mtspr	SPR_MAS1, %r5
363	isync
364	tlbwe
365	isync
366	msync
3672:	addi	%r4, %r4, 1
368	cmpw	%r4, %r3		/* Check if this is the last entry */
369	bne	1b
370	blr
371
372/************************************************************************/
373/* locore subroutines */
374/************************************************************************/
375
376ivor_setup:
377	/* Set base address of interrupt handler routines */
378	lis	%r3, interrupt_vector_base@h
379	mtspr	SPR_IVPR, %r3
380
381	/* Assign interrupt handler routines offsets */
382	li	%r3, int_critical_input@l
383	mtspr	SPR_IVOR0, %r3
384	li	%r3, int_machine_check@l
385	mtspr	SPR_IVOR1, %r3
386	li	%r3, int_data_storage@l
387	mtspr	SPR_IVOR2, %r3
388	li	%r3, int_instr_storage@l
389	mtspr	SPR_IVOR3, %r3
390	li	%r3, int_external_input@l
391	mtspr	SPR_IVOR4, %r3
392	li	%r3, int_alignment@l
393	mtspr	SPR_IVOR5, %r3
394	li	%r3, int_program@l
395	mtspr	SPR_IVOR6, %r3
396	li	%r3, int_syscall@l
397	mtspr	SPR_IVOR8, %r3
398	li	%r3, int_decrementer@l
399	mtspr	SPR_IVOR10, %r3
400	li	%r3, int_fixed_interval_timer@l
401	mtspr	SPR_IVOR11, %r3
402	li	%r3, int_watchdog@l
403	mtspr	SPR_IVOR12, %r3
404	li	%r3, int_data_tlb_error@l
405	mtspr	SPR_IVOR13, %r3
406	li	%r3, int_inst_tlb_error@l
407	mtspr	SPR_IVOR14, %r3
408	li	%r3, int_debug@l
409	mtspr	SPR_IVOR15, %r3
410	blr
411
412/*
413 * void tid_flush(tlbtid_t tid);
414 *
415 * Invalidate all TLB0 entries which match the given TID. Note this is
416 * dedicated for cases when invalidation(s) should NOT be propagated to other
417 * CPUs.
418 *
419 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
420 * correctly (by tlb0_get_tlbconf()).
421 *
422 */
423ENTRY(tid_flush)
424	cmpwi	%r3, TID_KERNEL
425	beq	tid_flush_end	/* don't evict kernel translations */
426
427	/* Number of TLB0 ways */
428	lis	%r4, tlb0_ways@h
429	ori	%r4, %r4, tlb0_ways@l
430	lwz	%r4, 0(%r4)
431
432	/* Number of entries / way */
433	lis	%r5, tlb0_entries_per_way@h
434	ori	%r5, %r5, tlb0_entries_per_way@l
435	lwz	%r5, 0(%r5)
436
437	/* Disable interrupts */
438	mfmsr	%r10
439	wrteei	0
440
441	li	%r6, 0		/* ways counter */
442loop_ways:
443	li	%r7, 0		/* entries [per way] counter */
444loop_entries:
445	/* Select TLB0 and ESEL (way) */
446	lis	%r8, MAS0_TLBSEL0@h
447	rlwimi	%r8, %r6, 16, 14, 15
448	mtspr	SPR_MAS0, %r8
449	isync
450
451	/* Select EPN (entry within the way) */
452	rlwinm	%r8, %r7, 12, 13, 19
453	mtspr	SPR_MAS2, %r8
454	isync
455	tlbre
456
457	/* Check if valid entry */
458	mfspr	%r8, SPR_MAS1
459	andis.	%r9, %r8, MAS1_VALID@h
460	beq	next_entry	/* invalid entry */
461
462	/* Check if this is our TID */
463	rlwinm	%r9, %r8, 16, 24, 31
464
465	cmplw	%r9, %r3
466	bne	next_entry	/* not our TID */
467
468	/* Clear VALID bit */
469	rlwinm	%r8, %r8, 0, 1, 31
470	mtspr	SPR_MAS1, %r8
471	isync
472	tlbwe
473	isync
474	msync
475
476next_entry:
477	addi	%r7, %r7, 1
478	cmpw	%r7, %r5
479	bne	loop_entries
480
481	/* Next way */
482	addi	%r6, %r6, 1
483	cmpw	%r6, %r4
484	bne	loop_ways
485
486	/* Restore MSR (possibly re-enable interrupts) */
487	mtmsr	%r10
488	isync
489
490tid_flush_end:
491	blr
492
493/*
494 * Cache disable/enable/inval sequences according
495 * to section 2.16 of E500CORE RM.
496 */
497ENTRY(dcache_inval)
498	/* Invalidate d-cache */
499	mfspr	%r3, SPR_L1CSR0
500	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
501	msync
502	isync
503	mtspr	SPR_L1CSR0, %r3
504	isync
5051:	mfspr	%r3, SPR_L1CSR0
506	andi.	%r3, %r3, L1CSR0_DCFI
507	bne	1b
508	blr
509
510ENTRY(dcache_disable)
511	/* Disable d-cache */
512	mfspr	%r3, SPR_L1CSR0
513	li	%r4, L1CSR0_DCE@l
514	not	%r4, %r4
515	and	%r3, %r3, %r4
516	msync
517	isync
518	mtspr	SPR_L1CSR0, %r3
519	isync
520	blr
521
522ENTRY(dcache_enable)
523	/* Enable d-cache */
524	mfspr	%r3, SPR_L1CSR0
525	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
526	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
527	msync
528	isync
529	mtspr	SPR_L1CSR0, %r3
530	isync
531	blr
532
533ENTRY(icache_inval)
534	/* Invalidate i-cache */
535	mfspr	%r3, SPR_L1CSR1
536	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
537	isync
538	mtspr	SPR_L1CSR1, %r3
539	isync
5401:	mfspr	%r3, SPR_L1CSR1
541	andi.	%r3, %r3, L1CSR1_ICFI
542	bne	1b
543	blr
544
545ENTRY(icache_disable)
546	/* Disable i-cache */
547	mfspr	%r3, SPR_L1CSR1
548	li	%r4, L1CSR1_ICE@l
549	not	%r4, %r4
550	and	%r3, %r3, %r4
551	isync
552	mtspr	SPR_L1CSR1, %r3
553	isync
554	blr
555
556ENTRY(icache_enable)
557	/* Enable i-cache */
558	mfspr	%r3, SPR_L1CSR1
559	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
560	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
561	isync
562	mtspr	SPR_L1CSR1, %r3
563	isync
564	blr
565
566/*
567 * int setfault()
568 *
569 * Similar to setjmp to setup for handling faults on accesses to user memory.
570 * Any routine using this may only call bcopy, either the form below,
571 * or the (currently used) C code optimized, so it doesn't use any non-volatile
572 * registers.
573 */
574	.globl	setfault
575setfault:
576	mflr	%r0
577	mfsprg0	%r4
578	lwz	%r4, PC_CURTHREAD(%r4)
579	lwz	%r4, TD_PCB(%r4)
580	stw	%r3, PCB_ONFAULT(%r4)
581	mfcr	%r10
582	mfctr	%r11
583	mfxer	%r12
584	stw	%r0, 0(%r3)
585	stw	%r1, 4(%r3)
586	stw	%r2, 8(%r3)
587	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
588	li	%r3, 0			/* return FALSE */
589	blr
590
591/************************************************************************/
592/* Data section								*/
593/************************************************************************/
594	.data
595	.align	4
596tmpstack:
597	.space	TMPSTACKSZ
598
599/*
600 * Compiled KERNBASE locations
601 */
602	.globl	kernbase
603	.set	kernbase, KERNBASE
604
605/*
606 * Globals
607 */
608#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
609
610GLOBAL(kernload)
611	.long	0
612GLOBAL(intrnames)
613	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
614GLOBAL(eintrnames)
615	.align 4
616GLOBAL(intrcnt)
617	.space	INTRCNT_COUNT * 4 * 2
618GLOBAL(eintrcnt)
619
620#include <powerpc/booke/trap_subr.S>
621