locore.S revision 186229
1/*-
2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 186229 2008-12-17 15:44:34Z raj $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/param.h>
33#include <machine/spr.h>
34#include <machine/psl.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39#include <machine/bootinfo.h>
40
41#define TMPSTACKSZ	16384
42
43	.text
44	.globl	btext
45btext:
46
47/*
48 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
49 * mark the start of kernel text.
50 */
51	.globl	kernel_text
52kernel_text:
53
54/*
55 * Startup entry.  Note, this must be the first thing in the text segment!
56 */
57	.text
58	.globl	__start
59__start:
60
61/*
62 * Assumptions on the boot loader:
63 *  - system memory starts from physical address 0
64 *  - it's mapped by a single TBL1 entry
65 *  - TLB1 mapping is 1:1 pa to va
66 *  - kernel is loaded at 16MB boundary
67 *  - all PID registers are set to the same value
68 *  - CPU is running in AS=0
69 *
70 * Registers contents provided by the loader(8):
71 *	r1	: stack pointer
72 *	r3	: metadata pointer
73 *
74 * We rearrange the TLB1 layout as follows:
75 *  - find TLB1 entry we started in
76 *  - make sure it's protected, ivalidate other entries
77 *  - create temp entry in the second AS (make sure it's not TLB[1])
78 *  - switch to temp mapping
79 *  - map 16MB of RAM in TLB1[1]
80 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
81 *  - switch to to TLB1[1] mapping
82 *  - invalidate temp mapping
83 *
84 * locore registers use:
85 *	r1	: stack pointer
86 *	r2	: trace pointer (AP only, for early diagnostics)
87 *	r3-r27	: scratch registers
88 *	r28	: kernload
89 *	r29	: temp TLB1 entry
90 *	r30	: initial TLB1 entry we started in
91 *	r31	: metadata pointer
92 */
93
94/*
95 * Keep metadata ptr in r31 for later use.
96 */
97	mr	%r31, %r3
98
99/*
100 * Initial cleanup
101 */
102	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
103	mtmsr	%r3
104	isync
105
106	/* Invalidate all entries in TLB0 */
107	li	%r3, 0
108	bl	tlb_inval_all
109
110/*
111 * Locate the TLB1 entry that maps this code
112 */
113	bl	1f
1141:	mflr	%r3
115	bl	tlb1_find_current	/* the entry number found is returned in r30 */
116
117	bl	tlb1_inval_all_but_current
118/*
119 * Create temporary mapping in AS=1 and switch to it
120 */
121	bl	tlb1_temp_mapping_as1
122
123	mfmsr	%r3
124	ori	%r3, %r3, (PSL_IS | PSL_DS)
125	bl	2f
1262:	mflr	%r4
127	addi	%r4, %r4, 20
128	mtspr	SPR_SRR0, %r4
129	mtspr	SPR_SRR1, %r3
130	rfi				/* Switch context */
131
132/*
133 * Invalidate initial entry
134 */
135	mr	%r3, %r30
136	bl	tlb1_inval_entry
137
138/*
139 * Setup final mapping in TLB1[1] and switch to it
140 */
141	/* Final kernel mapping, map in 16 MB of RAM */
142	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
143	li	%r4, 1			/* Entry 1 */
144	rlwimi	%r3, %r4, 16, 12, 15
145	mtspr	SPR_MAS0, %r3
146	isync
147
148	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
149	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
150	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
151	isync
152
153	lis	%r3, KERNBASE@h
154	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
155	mtspr	SPR_MAS2, %r3
156	isync
157
158	/* Discover phys load address */
159	bl	3f
1603:	mflr	%r4			/* Use current address */
161	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
162	mr	%r28, %r4		/* Keep kernel load address */
163	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
164	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
165	isync
166	tlbwe
167	isync
168	msync
169
170	/* Switch to the above TLB1[1] mapping */
171	bl	4f
1724:	mflr	%r4
173	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
174	rlwinm	%r3, %r3, 0, 0, 19
175	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
176	addi	%r4, %r4, 36
177	li	%r3, PSL_DE		/* Note AS=0 */
178	mtspr   SPR_SRR0, %r4
179	mtspr   SPR_SRR1, %r3
180	rfi
181
182/*
183 * Invalidate temp mapping
184 */
185	mr	%r3, %r29
186	bl	tlb1_inval_entry
187
188/*
189 * Save kernel load address for later use.
190 */
191	lis	%r3, kernload@ha
192	addi	%r3, %r3, kernload@l
193	stw	%r28, 0(%r3)
194
195/*
196 * Setup a temporary stack
197 */
198	lis	%r1, tmpstack@ha
199	addi	%r1, %r1, tmpstack@l
200	addi	%r1, %r1, (TMPSTACKSZ - 8)
201
202/*
203 * Intialise exception vector offsets
204 */
205	bl	ivor_setup
206
207/*
208 * Set up arguments and jump to system initialization code
209 */
210	lis	%r3, kernel_text@ha
211	addi	%r3, %r3, kernel_text@l
212	lis	%r4, _end@ha
213	addi	%r4, %r4, _end@l
214	mr	%r5, %r31		/* metadata ptr */
215
216	/* Prepare e500 core */
217	bl	e500_init
218
219	/* Switch to thread0.td_kstack now */
220	mr	%r1, %r3
221	li	%r3, 0
222	stw	%r3, 0(%r1)
223
224	/* Machine independet part, does not return */
225	bl	mi_startup
226	/* NOT REACHED */
2275:	b	5b
228
229/*
230 * Invalidate all entries in the given TLB.
231 *
232 * r3	TLBSEL
233 */
234tlb_inval_all:
235	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
236	ori	%r3, %r3, 0x4		/* INVALL */
237	tlbivax	0, %r3
238	isync
239	msync
240
241	tlbsync
242	msync
243	blr
244
245/*
246 * expects address to look up in r3, returns entry number in r30
247 *
248 * FIXME: the hidden assumption is we are now running in AS=0, but we should
249 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
250 */
251tlb1_find_current:
252	mfspr	%r17, SPR_PID0
253	slwi	%r17, %r17, MAS6_SPID0_SHIFT
254	mtspr	SPR_MAS6, %r17
255	isync
256	tlbsx	0, %r3
257	mfspr	%r17, SPR_MAS0
258	rlwinm	%r30, %r17, 16, 20, 31		/* MAS0[ESEL] -> r30 */
259
260	/* Make sure we have IPROT set on the entry */
261	mfspr	%r17, SPR_MAS1
262	oris	%r17, %r17, MAS1_IPROT@h
263	mtspr	SPR_MAS1, %r17
264	isync
265	tlbwe
266	isync
267	msync
268	blr
269
270/*
271 * Invalidates a single entry in TLB1.
272 *
273 * r3		ESEL
274 * r4-r5	scratched
275 */
276tlb1_inval_entry:
277	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
278	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
279	mtspr	SPR_MAS0, %r4
280	isync
281	tlbre
282	li	%r5, 0			/* MAS1[V] = 0 */
283	mtspr	SPR_MAS1, %r5
284	isync
285	tlbwe
286	isync
287	msync
288	blr
289
290/*
291 * r30		current entry number
292 * r29		returned temp entry
293 * r3-r5	scratched
294 */
295tlb1_temp_mapping_as1:
296	/* Read our current translation */
297	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
298	rlwimi	%r3, %r30, 16, 12, 15	/* Select our current entry */
299	mtspr	SPR_MAS0, %r3
300	isync
301	tlbre
302
303	/*
304	 * Prepare and write temp entry
305	 *
306	 * FIXME this is not robust against overflow i.e. when the current
307	 * entry is the last in TLB1
308	 */
309	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
310	addi	%r29, %r30, 1		/* Use next entry. */
311	li	%r4, 1
312	cmpw	%r4, %r29
313	bne	1f
314	addi	%r29, %r29, 1
3151:	rlwimi	%r3, %r29, 16, 12, 15	/* Select temp entry */
316	mtspr	SPR_MAS0, %r3
317	isync
318	mfspr	%r5, SPR_MAS1
319	li	%r4, 1			/* AS=1 */
320	rlwimi	%r5, %r4, 12, 19, 19
321	li	%r4, 0			/* Global mapping, TID=0 */
322	rlwimi	%r5, %r4, 16, 8, 15
323	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
324	mtspr	SPR_MAS1, %r5
325	isync
326	tlbwe
327	isync
328	msync
329	blr
330
331/*
332 * Loops over TLB1, invalidates all entries skipping the one which currently
333 * maps this code.
334 *
335 * r30		current entry
336 * r3-r5	scratched
337 */
338tlb1_inval_all_but_current:
339	mr	%r6, %r3
340	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
341	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
342	li	%r4, 0			/* Start from Entry 0 */
3431:	lis	%r5, MAS0_TLBSEL1@h
344	rlwimi	%r5, %r4, 16, 12, 15
345	mtspr	SPR_MAS0, %r5
346	isync
347	tlbre
348	mfspr	%r5, SPR_MAS1
349	cmpw	%r4, %r30		/* our current entry? */
350	beq	2f
351	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
352	mtspr	SPR_MAS1, %r5
353	isync
354	tlbwe
355	isync
356	msync
3572:	addi	%r4, %r4, 1
358	cmpw	%r4, %r3		/* Check if this is the last entry */
359	bne	1b
360	blr
361
362/************************************************************************/
363/* locore subroutines */
364/************************************************************************/
365
366ivor_setup:
367	/* Set base address of interrupt handler routines */
368	lis	%r3, interrupt_vector_base@h
369	mtspr	SPR_IVPR, %r3
370
371	/* Assign interrupt handler routines offsets */
372	li	%r3, int_critical_input@l
373	mtspr	SPR_IVOR0, %r3
374	li	%r3, int_machine_check@l
375	mtspr	SPR_IVOR1, %r3
376	li	%r3, int_data_storage@l
377	mtspr	SPR_IVOR2, %r3
378	li	%r3, int_instr_storage@l
379	mtspr	SPR_IVOR3, %r3
380	li	%r3, int_external_input@l
381	mtspr	SPR_IVOR4, %r3
382	li	%r3, int_alignment@l
383	mtspr	SPR_IVOR5, %r3
384	li	%r3, int_program@l
385	mtspr	SPR_IVOR6, %r3
386	li	%r3, int_syscall@l
387	mtspr	SPR_IVOR8, %r3
388	li	%r3, int_decrementer@l
389	mtspr	SPR_IVOR10, %r3
390	li	%r3, int_fixed_interval_timer@l
391	mtspr	SPR_IVOR11, %r3
392	li	%r3, int_watchdog@l
393	mtspr	SPR_IVOR12, %r3
394	li	%r3, int_data_tlb_error@l
395	mtspr	SPR_IVOR13, %r3
396	li	%r3, int_inst_tlb_error@l
397	mtspr	SPR_IVOR14, %r3
398	li	%r3, int_debug@l
399	mtspr	SPR_IVOR15, %r3
400	blr
401
402/*
403 * Cache disable/enable/inval sequences according
404 * to section 2.16 of E500CORE RM.
405 */
406ENTRY(dcache_inval)
407	/* Invalidate d-cache */
408	mfspr	%r3, SPR_L1CSR0
409	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
410	msync
411	isync
412	mtspr	SPR_L1CSR0, %r3
413	isync
414	blr
415
416ENTRY(dcache_disable)
417	/* Disable d-cache */
418	mfspr	%r3, SPR_L1CSR0
419	li	%r4, L1CSR0_DCE@l
420	not	%r4, %r4
421	and	%r3, %r3, %r4
422	msync
423	isync
424	mtspr	SPR_L1CSR0, %r3
425	isync
426	blr
427
428ENTRY(dcache_enable)
429	/* Enable d-cache */
430	mfspr	%r3, SPR_L1CSR0
431	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
432	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
433	msync
434	isync
435	mtspr	SPR_L1CSR0, %r3
436	isync
437	blr
438
439ENTRY(icache_inval)
440	/* Invalidate i-cache */
441	mfspr	%r3, SPR_L1CSR1
442	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
443	isync
444	mtspr	SPR_L1CSR1, %r3
445	isync
446	blr
447
448ENTRY(icache_disable)
449	/* Disable i-cache */
450	mfspr	%r3, SPR_L1CSR1
451	li	%r4, L1CSR1_ICE@l
452	not	%r4, %r4
453	and	%r3, %r3, %r4
454	isync
455	mtspr	SPR_L1CSR1, %r3
456	isync
457	blr
458
459ENTRY(icache_enable)
460	/* Enable i-cache */
461	mfspr	%r3, SPR_L1CSR1
462	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
463	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
464	isync
465	mtspr	SPR_L1CSR1, %r3
466	isync
467	blr
468
469/*
470 * int setfault()
471 *
472 * Similar to setjmp to setup for handling faults on accesses to user memory.
473 * Any routine using this may only call bcopy, either the form below,
474 * or the (currently used) C code optimized, so it doesn't use any non-volatile
475 * registers.
476 */
477	.globl	setfault
478setfault:
479	mflr	%r0
480	mfsprg0	%r4
481	lwz	%r4, PC_CURTHREAD(%r4)
482	lwz	%r4, TD_PCB(%r4)
483	stw	%r3, PCB_ONFAULT(%r4)
484	mfcr	%r10
485	mfctr	%r11
486	mfxer	%r12
487	stw	%r0, 0(%r3)
488	stw	%r1, 4(%r3)
489	stw	%r2, 8(%r3)
490	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
491	li	%r3, 0			/* return FALSE */
492	blr
493
494/************************************************************************/
495/* Data section								*/
496/************************************************************************/
497	.data
498	.align	4
499tmpstack:
500	.space	TMPSTACKSZ
501
502/*
503 * Compiled KERNBASE locations
504 */
505	.globl	kernbase
506	.set	kernbase, KERNBASE
507
508/*
509 * Globals
510 */
511#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
512
513GLOBAL(kernload)
514	.long	0
515GLOBAL(intrnames)
516	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
517GLOBAL(eintrnames)
518	.align 4
519GLOBAL(intrcnt)
520	.space	INTRCNT_COUNT * 4 * 2
521GLOBAL(eintrcnt)
522
523#include <powerpc/booke/trap_subr.S>
524