locore.S revision 176771
1176771Sraj/*-
2176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
3176771Sraj * All rights reserved.
4176771Sraj *
5176771Sraj * Redistribution and use in source and binary forms, with or without
6176771Sraj * modification, are permitted provided that the following conditions
7176771Sraj * are met:
8176771Sraj * 1. Redistributions of source code must retain the above copyright
9176771Sraj *    notice, this list of conditions and the following disclaimer.
10176771Sraj * 2. Redistributions in binary form must reproduce the above copyright
11176771Sraj *    notice, this list of conditions and the following disclaimer in the
12176771Sraj *    documentation and/or other materials provided with the distribution.
13176771Sraj * 3. The name of the author may not be used to endorse or promote products
14176771Sraj *    derived from this software without specific prior written permission.
15176771Sraj *
16176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26176771Sraj *
27176771Sraj * $FreeBSD: head/sys/powerpc/booke/locore.S 176771 2008-03-03 17:17:00Z raj $
28176771Sraj */
29176771Sraj
30176771Sraj#include "assym.s"
31176771Sraj
32176771Sraj#include <machine/param.h>
33176771Sraj#include <machine/asm.h>
34176771Sraj#include <machine/spr.h>
35176771Sraj#include <machine/psl.h>
36176771Sraj#include <machine/pte.h>
37176771Sraj#include <machine/trap.h>
38176771Sraj#include <machine/vmparam.h>
39176771Sraj#include <machine/tlb.h>
40176771Sraj#include <machine/bootinfo.h>
41176771Sraj
42176771Sraj/*
43176771Sraj * This symbol is here for the benefit of kvm_mkdb, and is supposed to
44176771Sraj * mark the start of kernel text.
45176771Sraj */
46176771Sraj	.text
47176771Sraj	.globl	kernel_text
48176771Srajkernel_text:
49176771Sraj
50176771Sraj/*
51176771Sraj * Startup entry.  Note, this must be the first thing in the text segment!
52176771Sraj */
53176771Sraj	.text
54176771Sraj	.globl	__start
55176771Sraj__start:
56176771Sraj
57176771Sraj/*
58176771Sraj * Assumption on a boot loader:
59176771Sraj *  - system memory starts from physical address 0
60176771Sraj *  - kernel is loaded at 16MB boundary
61176771Sraj *  - it's mapped by a single TBL1 entry
62176771Sraj *  - TLB1 mapping is 1:1 pa to va
63176771Sraj *  - all PID registers are set to the same value
64176771Sraj *
65176771Sraj * Loader register use:
66176771Sraj *	r1	: stack pointer
67176771Sraj *	r3	: metadata pointer
68176771Sraj *
69176771Sraj * We rearrange the TLB1 layout as follows:
70176771Sraj *  - find AS and entry kernel started in
71176771Sraj *  - make sure it's protected, ivalidate other entries
72176771Sraj *  - create temp entry in the second AS (make sure it's not TLB[15])
73176771Sraj *  - switch to temp mapping
74176771Sraj *  - map 16MB of RAM in TLB1[15]
75176771Sraj *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
76176771Sraj *  - switch to to TLB1[15] mapping
77176771Sraj *  - invalidate temp mapping
78176771Sraj *
79176771Sraj * locore register use:
80176771Sraj *	r1	: stack pointer
81176771Sraj *	r2	: unused
82176771Sraj *	r3	: kernel_text
83176771Sraj *	r4	: _end
84176771Sraj *	r5	: metadata pointer
85176771Sraj *	r6-r9	: unused
86176771Sraj *	r10	: entry we started in
87176771Sraj *	r11	: temp entry
88176771Sraj *	r12	: AS we started in
89176771Sraj *	r13-r31 : auxiliary registers
90176771Sraj */
91176771Sraj
92176771Sraj/*
93176771Sraj * Move metadata ptr to r5
94176771Sraj */
95176771Sraj	mr	%r5, %r3
96176771Sraj
97176771Sraj/*
98176771Sraj * Initial cleanup
99176771Sraj */
100176771Sraj	li	%r16, 0x200		/* Keep debug exceptions for CodeWarrior. */
101176771Sraj	mtmsr	%r16
102176771Sraj	isync
103176771Sraj#if 0
104176771Sraj	mtspr	SPR_HID0, %r16
105176771Sraj	isync
106176771Sraj	msync
107176771Sraj	mtspr	SPR_HID1, %r16
108176771Sraj	isync
109176771Sraj#endif
110176771Sraj
111176771Sraj	/* Issue INV_ALL Invalidate on TLB0 */
112176771Sraj	li      %r16, 0x04
113176771Sraj	tlbivax	0, %r16
114176771Sraj	isync
115176771Sraj	msync
116176771Sraj
117176771Sraj/*
118176771Sraj * Use tblsx to locate the TLB1 entry that maps kernel code
119176771Sraj */
120176771Sraj	bl	1f			/* Current address */
121176771Sraj1:	mflr	%r15
122176771Sraj
123176771Sraj	/* Find entry that maps current address */
124176771Sraj	mfspr	%r17, SPR_PID0
125176771Sraj	slwi	%r17, %r17, MAS6_SPID0_SHIFT
126176771Sraj	mtspr	SPR_MAS6, %r17
127176771Sraj	isync
128176771Sraj	tlbsx	0, %r15
129176771Sraj
130176771Sraj	/* Copy entry number to r10 */
131176771Sraj	mfspr	%r17, SPR_MAS0
132176771Sraj	rlwinm	%r10, %r17, 16, 28, 31
133176771Sraj
134176771Sraj	/* Invalidate TLB1, skipping our entry. */
135176771Sraj	mfspr	%r17, SPR_TLB1CFG	/* Get number of entries */
136176771Sraj	andi.	%r17, %r17, TLBCFG_NENTRY_MASK@l
137176771Sraj	li	%r16, 0			/* Start from Entry 0 */
138176771Sraj
139176771Sraj2:	lis	%r15, MAS0_TLBSEL1@h	/* Select TLB1 */
140176771Sraj	rlwimi	%r15, %r16, 16, 12, 15
141176771Sraj	mtspr	SPR_MAS0, %r15
142176771Sraj	isync
143176771Sraj	tlbre
144176771Sraj	mfspr	%r15, SPR_MAS1
145176771Sraj	cmpw	%r16, %r10
146176771Sraj	beq	3f
147176771Sraj	/* Clear VALID and IPROT bits for other entries */
148176771Sraj	rlwinm	%r15, %r15, 0, 2, 31
149176771Sraj	mtspr	SPR_MAS1, %r15
150176771Sraj	isync
151176771Sraj	tlbwe
152176771Sraj	isync
153176771Sraj	msync
154176771Sraj3:	addi	%r16, %r16, 1
155176771Sraj	cmpw	%r16, %r17		/* Check if this is the last entry */
156176771Sraj	bne	2b
157176771Sraj
158176771Sraj/*
159176771Sraj * Create temporary mapping in the other Address Space
160176771Sraj */
161176771Sraj	lis	%r17, MAS0_TLBSEL1@h	/* Select TLB1 */
162176771Sraj	rlwimi	%r17, %r10, 16, 12, 15	/* Select our entry */
163176771Sraj	mtspr	SPR_MAS0, %r17
164176771Sraj	isync
165176771Sraj	tlbre				/* Read it in */
166176771Sraj
167176771Sraj	/* Prepare and write temp entry */
168176771Sraj	lis	%r17, MAS0_TLBSEL1@h	/* Select TLB1 */
169176771Sraj	addi	%r11, %r10, 0x1		/* Use next entry. */
170176771Sraj	rlwimi	%r17, %r11, 16, 12, 15	/* Select temp entry */
171176771Sraj	mtspr	SPR_MAS0, %r17
172176771Sraj	isync
173176771Sraj
174176771Sraj	mfspr	%r16, SPR_MAS1
175176771Sraj	li	%r15, 1			/* AS 1 */
176176771Sraj	rlwimi	%r16, %r15, 12, 19, 19
177176771Sraj	mtspr	SPR_MAS1, %r16
178176771Sraj	li	%r17, 0
179176771Sraj	rlwimi	%r16, %r17, 0, 8, 15	/* Global mapping, TID=0 */
180176771Sraj	isync
181176771Sraj
182176771Sraj	tlbwe
183176771Sraj	isync
184176771Sraj	msync
185176771Sraj
186176771Sraj	mfmsr	%r16
187176771Sraj	ori	%r16, %r16, 0x30	/* Switch to AS 1. */
188176771Sraj
189176771Sraj	bl	4f			/* Find current execution address */
190176771Sraj4:	mflr	%r15
191176771Sraj	addi	%r15, %r15, 20		/* Increment to instruction after rfi */
192176771Sraj	mtspr	SPR_SRR0, %r15
193176771Sraj	mtspr	SPR_SRR1, %r16
194176771Sraj	rfi				/* Switch context */
195176771Sraj
196176771Sraj/*
197176771Sraj * Invalidate initial entry
198176771Sraj */
199176771Sraj	mr	%r22, %r10
200176771Sraj	bl	tlb1_inval_entry
201176771Sraj
202176771Sraj/*
203176771Sraj * Setup final mapping in TLB1[1] and switch to it
204176771Sraj */
205176771Sraj	/* Final kernel mapping, map in 16 MB of RAM */
206176771Sraj	lis	%r16, MAS0_TLBSEL1@h	/* Select TLB1 */
207176771Sraj	li	%r17, 1			/* Entry 1 */
208176771Sraj	rlwimi	%r16, %r17, 16, 12, 15
209176771Sraj	mtspr	SPR_MAS0, %r16
210176771Sraj	isync
211176771Sraj
212176771Sraj	li	%r16, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
213176771Sraj	oris	%r16, %r16, (MAS1_VALID | MAS1_IPROT)@h
214176771Sraj	mtspr	SPR_MAS1, %r16
215176771Sraj	isync
216176771Sraj
217176771Sraj	lis	%r19, KERNBASE@h
218176771Sraj	ori	%r19, %r19, KERNBASE@l
219176771Sraj	mtspr	SPR_MAS2, %r19		/* Set final EPN, clear WIMG */
220176771Sraj	isync
221176771Sraj
222176771Sraj	bl	5f
223176771Sraj5:	mflr	%r16			/* Use current address */
224176771Sraj	lis	%r18, 0xff00		/* 16MB alignment mask */
225176771Sraj	and	%r16, %r16, %r18
226176771Sraj	mr	%r25, %r16		/* Copy kernel load address */
227176771Sraj	ori	%r16, %r16, (MAS3_SX | MAS3_SW | MAS3_SR)@l
228176771Sraj	mtspr	SPR_MAS3, %r16		/* Set RPN and protection */
229176771Sraj	isync
230176771Sraj	tlbwe
231176771Sraj	isync
232176771Sraj	msync
233176771Sraj
234176771Sraj	/* Switch to the above TLB1[1] mapping */
235176771Sraj	lis	%r18, 0x00ff		/* 16MB offset mask */
236176771Sraj	ori	%r18, %r18, 0xffff
237176771Sraj	bl	6f
238176771Sraj6:	mflr	%r20			/* Use current address */
239176771Sraj	and	%r20, %r20, %r18	/* Offset from kernel load address */
240176771Sraj	add	%r20, %r20, %r19	/* Move to kernel virtual address */
241176771Sraj	addi	%r20, %r20, 32		/* Increment to instr. after rfi  */
242176771Sraj	li	%r21, 0x200
243176771Sraj	mtspr   SPR_SRR0, %r20
244176771Sraj	mtspr   SPR_SRR1, %r21
245176771Sraj	rfi
246176771Sraj
247176771Sraj	/* Save kernel load address for later use */
248176771Sraj	lis	%r24, kernload@ha
249176771Sraj	addi	%r24, %r24, kernload@l
250176771Sraj	stw	%r25, 0(%r24)
251176771Sraj
252176771Sraj/*
253176771Sraj * Invalidate temp mapping
254176771Sraj */
255176771Sraj	mr	%r22, %r11
256176771Sraj	bl	tlb1_inval_entry
257176771Sraj
258176771Sraj/*
259176771Sraj * Setup a temporary stack
260176771Sraj */
261176771Sraj	lis	%r1, kstack0_space@ha
262176771Sraj	addi	%r1, %r1, kstack0_space@l
263176771Sraj	addi	%r1, %r1, (16384 - 512)
264176771Sraj
265176771Sraj/*
266176771Sraj * Intialise exception vector offsets
267176771Sraj */
268176771Sraj	bl	ivor_setup
269176771Sraj
270176771Sraj/*
271176771Sraj * Jump to system initialization code
272176771Sraj *
273176771Sraj * Setup first two arguments for e500_init, metadata (r5) is already in place.
274176771Sraj */
275176771Sraj	lis	%r3, kernel_text@ha
276176771Sraj	addi	%r3, %r3, kernel_text@l
277176771Sraj	lis	%r4, _end@ha
278176771Sraj	addi	%r4, %r4, _end@l
279176771Sraj
280176771Sraj	bl	e500_init   /* Prepare e500 core */
281176771Sraj	bl	mi_startup  /* Machine independet part, does not return */
282176771Sraj
283176771Sraj/************************************************************************/
284176771Sraj/* locore subroutines */
285176771Sraj/************************************************************************/
286176771Sraj
287176771Srajtlb1_inval_entry:
288176771Sraj	lis	%r17, MAS0_TLBSEL1@h	/* Select TLB1 */
289176771Sraj	rlwimi	%r17, %r22, 16, 12, 15	/* Select our entry */
290176771Sraj	mtspr	SPR_MAS0, %r17
291176771Sraj	isync
292176771Sraj	tlbre				/* Read it in */
293176771Sraj
294176771Sraj	li	%r16, 0
295176771Sraj	mtspr	SPR_MAS1, %r16
296176771Sraj	isync
297176771Sraj	tlbwe
298176771Sraj	isync
299176771Sraj	msync
300176771Sraj	blr
301176771Sraj
302176771Srajivor_setup:
303176771Sraj	/* Set base address of interrupt handler routines */
304176771Sraj	lis	%r21, interrupt_vector_base@h
305176771Sraj	mtspr	SPR_IVPR, %r21
306176771Sraj
307176771Sraj	/* Assign interrupt handler routines offsets */
308176771Sraj	li	%r21, int_critical_input@l
309176771Sraj	mtspr	SPR_IVOR0, %r21
310176771Sraj	li	%r21, int_machine_check@l
311176771Sraj	mtspr	SPR_IVOR1, %r21
312176771Sraj	li	%r21, int_data_storage@l
313176771Sraj	mtspr	SPR_IVOR2, %r21
314176771Sraj	li	%r21, int_instr_storage@l
315176771Sraj	mtspr	SPR_IVOR3, %r21
316176771Sraj	li	%r21, int_external_input@l
317176771Sraj	mtspr	SPR_IVOR4, %r21
318176771Sraj	li	%r21, int_alignment@l
319176771Sraj	mtspr	SPR_IVOR5, %r21
320176771Sraj	li	%r21, int_program@l
321176771Sraj	mtspr	SPR_IVOR6, %r21
322176771Sraj	li	%r21, int_syscall@l
323176771Sraj	mtspr	SPR_IVOR8, %r21
324176771Sraj	li	%r21, int_decrementer@l
325176771Sraj	mtspr	SPR_IVOR10, %r21
326176771Sraj	li	%r21, int_fixed_interval_timer@l
327176771Sraj	mtspr	SPR_IVOR11, %r21
328176771Sraj	li	%r21, int_watchdog@l
329176771Sraj	mtspr	SPR_IVOR12, %r21
330176771Sraj	li	%r21, int_data_tlb_error@l
331176771Sraj	mtspr	SPR_IVOR13, %r21
332176771Sraj	li	%r21, int_inst_tlb_error@l
333176771Sraj	mtspr	SPR_IVOR14, %r21
334176771Sraj	li	%r21, int_debug@l
335176771Sraj	mtspr	SPR_IVOR15, %r21
336176771Sraj	blr
337176771Sraj
338176771Sraj/*
339176771Sraj * void tlb1_inval_va(vm_offset_t va)
340176771Sraj *
341176771Sraj * r3 - va to invalidate
342176771Sraj */
343176771SrajENTRY(tlb1_inval_va)
344176771Sraj	/* EA mask */
345176771Sraj	lis	%r6, 0xffff
346176771Sraj	ori	%r6, %r6, 0xf000
347176771Sraj	and	%r3, %r3, %r6
348176771Sraj
349176771Sraj	/* Select TLB1 */
350176771Sraj	ori	%r3, %r3, 0x08
351176771Sraj
352176771Sraj	isync
353176771Sraj	tlbivax 0, %r3
354176771Sraj	isync
355176771Sraj	msync
356176771Sraj	blr
357176771Sraj
358176771Sraj/*
359176771Sraj * void tlb0_inval_va(vm_offset_t va)
360176771Sraj *
361176771Sraj * r3 - va to invalidate
362176771Sraj */
363176771SrajENTRY(tlb0_inval_va)
364176771Sraj	/* EA mask, this also clears TLBSEL, selecting TLB0 */
365176771Sraj	lis	%r6, 0xffff
366176771Sraj	ori	%r6, %r6, 0xf000
367176771Sraj	and	%r3, %r3, %r6
368176771Sraj
369176771Sraj	isync
370176771Sraj	tlbivax 0, %r3
371176771Sraj	isync
372176771Sraj	msync
373176771Sraj	blr
374176771Sraj
375176771Sraj/*
376176771Sraj * Cache disable/enable/inval sequences according
377176771Sraj * to section 2.16 of E500CORE RM.
378176771Sraj */
379176771SrajENTRY(dcache_inval)
380176771Sraj	/* Invalidate d-cache */
381176771Sraj	mfspr	%r3, SPR_L1CSR0
382176771Sraj	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
383176771Sraj	msync
384176771Sraj	isync
385176771Sraj	mtspr	SPR_L1CSR0, %r3
386176771Sraj	isync
387176771Sraj	blr
388176771Sraj
389176771SrajENTRY(dcache_disable)
390176771Sraj	/* Disable d-cache */
391176771Sraj	mfspr	%r3, SPR_L1CSR0
392176771Sraj	li	%r4, L1CSR0_DCE@l
393176771Sraj	not	%r4, %r4
394176771Sraj	and	%r3, %r3, %r4
395176771Sraj	msync
396176771Sraj	isync
397176771Sraj	mtspr	SPR_L1CSR0, %r3
398176771Sraj	isync
399176771Sraj	blr
400176771Sraj
401176771SrajENTRY(dcache_enable)
402176771Sraj	/* Enable d-cache */
403176771Sraj	mfspr	%r3, SPR_L1CSR0
404176771Sraj	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
405176771Sraj	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
406176771Sraj	msync
407176771Sraj	isync
408176771Sraj	mtspr	SPR_L1CSR0, %r3
409176771Sraj	isync
410176771Sraj	blr
411176771Sraj
412176771SrajENTRY(icache_inval)
413176771Sraj	/* Invalidate i-cache */
414176771Sraj	mfspr	%r3, SPR_L1CSR1
415176771Sraj	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
416176771Sraj	isync
417176771Sraj	mtspr	SPR_L1CSR1, %r3
418176771Sraj	isync
419176771Sraj	blr
420176771Sraj
421176771SrajENTRY(icache_disable)
422176771Sraj	/* Disable i-cache */
423176771Sraj	mfspr	%r3, SPR_L1CSR1
424176771Sraj	li	%r4, L1CSR1_ICE@l
425176771Sraj	not	%r4, %r4
426176771Sraj	and	%r3, %r3, %r4
427176771Sraj	isync
428176771Sraj	mtspr	SPR_L1CSR1, %r3
429176771Sraj	isync
430176771Sraj	blr
431176771Sraj
432176771SrajENTRY(icache_enable)
433176771Sraj	/* Enable i-cache */
434176771Sraj	mfspr	%r3, SPR_L1CSR1
435176771Sraj	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
436176771Sraj	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
437176771Sraj	isync
438176771Sraj	mtspr	SPR_L1CSR1, %r3
439176771Sraj	isync
440176771Sraj	blr
441176771Sraj
442176771Sraj/*
443176771Sraj * int setfault()
444176771Sraj *
445176771Sraj * Similar to setjmp to setup for handling faults on accesses to user memory.
446176771Sraj * Any routine using this may only call bcopy, either the form below,
447176771Sraj * or the (currently used) C code optimized, so it doesn't use any non-volatile
448176771Sraj * registers.
449176771Sraj */
450176771Sraj	.globl	setfault
451176771Srajsetfault:
452176771Sraj	mflr	%r0
453176771Sraj	mfsprg0	%r4
454176771Sraj	lwz	%r4, PC_CURTHREAD(%r4)
455176771Sraj	lwz	%r4, TD_PCB(%r4)
456176771Sraj	stw	%r3, PCB_ONFAULT(%r4)
457176771Sraj	mfcr	%r10
458176771Sraj	mfctr	%r11
459176771Sraj	mfxer	%r12
460176771Sraj	stw	%r0, 0(%r3)
461176771Sraj	stw	%r1, 4(%r3)
462176771Sraj	stw	%r2, 8(%r3)
463176771Sraj	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
464176771Sraj	li	%r3, 0			/* return FALSE */
465176771Sraj	blr
466176771Sraj
467176771Sraj/************************************************************************/
468176771Sraj/* Data section								*/
469176771Sraj/************************************************************************/
470176771Sraj	.data
471176771Sraj	.align	4
472176771SrajGLOBAL(kstack0_space)
473176771Sraj	.space	16384
474176771Sraj
475176771Sraj/*
476176771Sraj * Compiled KERNBASE locations
477176771Sraj */
478176771Sraj	.globl	kernbase
479176771Sraj	.set	kernbase, KERNBASE
480176771Sraj
481176771Sraj/*
482176771Sraj * Globals
483176771Sraj */
484176771Sraj#define	INTSTK		16384		/* 16K interrupt stack */
485176771Sraj#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
486176771Sraj
487176771SrajGLOBAL(kernload)
488176771Sraj	.long
489176771SrajGLOBAL(intrnames)
490176771Sraj	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
491176771SrajGLOBAL(eintrnames)
492176771Sraj	.align 4
493176771SrajGLOBAL(intrcnt)
494176771Sraj	.space	INTRCNT_COUNT * 4 * 2
495176771SrajGLOBAL(eintrcnt)
496176771Sraj
497176771Sraj#include <powerpc/booke/trap_subr.S>
498