locore.S revision 224551
1176771Sraj/*-
2192532Sraj * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4176771Sraj * All rights reserved.
5176771Sraj *
6176771Sraj * Redistribution and use in source and binary forms, with or without
7176771Sraj * modification, are permitted provided that the following conditions
8176771Sraj * are met:
9176771Sraj * 1. Redistributions of source code must retain the above copyright
10176771Sraj *    notice, this list of conditions and the following disclaimer.
11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright
12176771Sraj *    notice, this list of conditions and the following disclaimer in the
13176771Sraj *    documentation and/or other materials provided with the distribution.
14176771Sraj *
15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25176771Sraj *
26176771Sraj * $FreeBSD: head/sys/powerpc/booke/locore.S 224551 2011-07-31 18:26:47Z marcel $
27176771Sraj */
28176771Sraj
29176771Sraj#include "assym.s"
30176771Sraj
31186229Sraj#include <machine/asm.h>
32191375Sraj#include <machine/hid.h>
33176771Sraj#include <machine/param.h>
34176771Sraj#include <machine/spr.h>
35176771Sraj#include <machine/pte.h>
36176771Sraj#include <machine/trap.h>
37176771Sraj#include <machine/vmparam.h>
38176771Sraj#include <machine/tlb.h>
39176771Sraj
40182198Sraj#define TMPSTACKSZ	16384
41182198Sraj
42184319Smarcel	.text
43184319Smarcel	.globl	btext
44184319Smarcelbtext:
45184319Smarcel
46176771Sraj/*
47176771Sraj * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48176771Sraj * mark the start of kernel text.
49176771Sraj */
50176771Sraj	.globl	kernel_text
51176771Srajkernel_text:
52176771Sraj
53176771Sraj/*
54176771Sraj * Startup entry.  Note, this must be the first thing in the text segment!
55176771Sraj */
56176771Sraj	.text
57176771Sraj	.globl	__start
58176771Sraj__start:
59176771Sraj
60176771Sraj/*
61186229Sraj * Assumptions on the boot loader:
62176771Sraj *  - system memory starts from physical address 0
63176771Sraj *  - it's mapped by a single TBL1 entry
64176771Sraj *  - TLB1 mapping is 1:1 pa to va
65186229Sraj *  - kernel is loaded at 16MB boundary
66176771Sraj *  - all PID registers are set to the same value
67186229Sraj *  - CPU is running in AS=0
68176771Sraj *
69186229Sraj * Registers contents provided by the loader(8):
70176771Sraj *	r1	: stack pointer
71176771Sraj *	r3	: metadata pointer
72176771Sraj *
73176771Sraj * We rearrange the TLB1 layout as follows:
74186229Sraj *  - find TLB1 entry we started in
75176771Sraj *  - make sure it's protected, ivalidate other entries
76186229Sraj *  - create temp entry in the second AS (make sure it's not TLB[1])
77176771Sraj *  - switch to temp mapping
78186229Sraj *  - map 16MB of RAM in TLB1[1]
79176771Sraj *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80186229Sraj *  - switch to to TLB1[1] mapping
81176771Sraj *  - invalidate temp mapping
82176771Sraj *
83186229Sraj * locore registers use:
84176771Sraj *	r1	: stack pointer
85186229Sraj *	r2	: trace pointer (AP only, for early diagnostics)
86222400Smarcel *	r3-r26	: scratch registers
87222400Smarcel *	r27	: kernload
88222400Smarcel *	r28	: temp TLB1 entry
89222400Smarcel *	r29	: initial TLB1 entry we started in
90222400Smarcel *	r30-r31	: arguments (metadata pointer)
91176771Sraj */
92176771Sraj
93176771Sraj/*
94222400Smarcel * Keep arguments in r30 & r31 for later use.
95176771Sraj */
96222400Smarcel	mr	%r30, %r3
97222400Smarcel	mr	%r31, %r4
98176771Sraj
99176771Sraj/*
100176771Sraj * Initial cleanup
101176771Sraj */
102186229Sraj	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
103186229Sraj	mtmsr	%r3
104176771Sraj	isync
105176771Sraj
106191375Sraj	lis	%r3, HID0_E500_DEFAULT_SET@h
107191375Sraj	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
108191375Sraj	mtspr	SPR_HID0, %r3
109191375Sraj	isync
110191375Sraj	lis	%r3, HID1_E500_DEFAULT_SET@h
111191375Sraj	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
112191375Sraj	mtspr	SPR_HID1, %r3
113191375Sraj	isync
114191375Sraj
115186229Sraj	/* Invalidate all entries in TLB0 */
116186229Sraj	li	%r3, 0
117186229Sraj	bl	tlb_inval_all
118176771Sraj
119176771Sraj/*
120186229Sraj * Locate the TLB1 entry that maps this code
121176771Sraj */
122186229Sraj	bl	1f
123186229Sraj1:	mflr	%r3
124222400Smarcel	bl	tlb1_find_current	/* the entry found is returned in r29 */
125176771Sraj
126186229Sraj	bl	tlb1_inval_all_but_current
127176771Sraj/*
128186229Sraj * Create temporary mapping in AS=1 and switch to it
129176771Sraj */
130186229Sraj	bl	tlb1_temp_mapping_as1
131176771Sraj
132186229Sraj	mfmsr	%r3
133186229Sraj	ori	%r3, %r3, (PSL_IS | PSL_DS)
134186229Sraj	bl	2f
135186229Sraj2:	mflr	%r4
136186229Sraj	addi	%r4, %r4, 20
137186229Sraj	mtspr	SPR_SRR0, %r4
138186229Sraj	mtspr	SPR_SRR1, %r3
139176771Sraj	rfi				/* Switch context */
140176771Sraj
141176771Sraj/*
142176771Sraj * Invalidate initial entry
143176771Sraj */
144222400Smarcel	mr	%r3, %r29
145176771Sraj	bl	tlb1_inval_entry
146176771Sraj
147176771Sraj/*
148176771Sraj * Setup final mapping in TLB1[1] and switch to it
149176771Sraj */
150176771Sraj	/* Final kernel mapping, map in 16 MB of RAM */
151186229Sraj	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
152222391Smarcel	li	%r4, 0			/* Entry 0 */
153186229Sraj	rlwimi	%r3, %r4, 16, 12, 15
154186229Sraj	mtspr	SPR_MAS0, %r3
155176771Sraj	isync
156176771Sraj
157186229Sraj	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
158186229Sraj	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
159186229Sraj	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
160176771Sraj	isync
161176771Sraj
162186229Sraj	lis	%r3, KERNBASE@h
163186229Sraj	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
164192532Sraj#ifdef SMP
165192532Sraj	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
166192532Sraj#endif
167186229Sraj	mtspr	SPR_MAS2, %r3
168176771Sraj	isync
169176771Sraj
170186229Sraj	/* Discover phys load address */
171186229Sraj	bl	3f
172186229Sraj3:	mflr	%r4			/* Use current address */
173186229Sraj	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
174222400Smarcel	mr	%r27, %r4		/* Keep kernel load address */
175186229Sraj	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
176186229Sraj	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
177176771Sraj	isync
178176771Sraj	tlbwe
179176771Sraj	isync
180176771Sraj	msync
181176771Sraj
182176771Sraj	/* Switch to the above TLB1[1] mapping */
183186229Sraj	bl	4f
184186229Sraj4:	mflr	%r4
185186229Sraj	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
186186229Sraj	rlwinm	%r3, %r3, 0, 0, 19
187186229Sraj	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
188186229Sraj	addi	%r4, %r4, 36
189186229Sraj	li	%r3, PSL_DE		/* Note AS=0 */
190186229Sraj	mtspr   SPR_SRR0, %r4
191186229Sraj	mtspr   SPR_SRR1, %r3
192176771Sraj	rfi
193176771Sraj
194176771Sraj/*
195176771Sraj * Invalidate temp mapping
196176771Sraj */
197222400Smarcel	mr	%r3, %r28
198176771Sraj	bl	tlb1_inval_entry
199176771Sraj
200176771Sraj/*
201186229Sraj * Save kernel load address for later use.
202186229Sraj */
203186229Sraj	lis	%r3, kernload@ha
204186229Sraj	addi	%r3, %r3, kernload@l
205222400Smarcel	stw	%r27, 0(%r3)
206192532Sraj#ifdef SMP
207192532Sraj	/*
208192532Sraj	 * APs need a separate copy of kernload info within the __boot_page
209192532Sraj	 * area so they can access this value very early, before their TLBs
210192532Sraj	 * are fully set up and the kernload global location is available.
211192532Sraj	 */
212192532Sraj	lis	%r3, kernload_ap@ha
213192532Sraj	addi	%r3, %r3, kernload_ap@l
214222400Smarcel	stw	%r27, 0(%r3)
215192532Sraj	msync
216192532Sraj#endif
217186229Sraj
218186229Sraj/*
219176771Sraj * Setup a temporary stack
220176771Sraj */
221182198Sraj	lis	%r1, tmpstack@ha
222182198Sraj	addi	%r1, %r1, tmpstack@l
223182198Sraj	addi	%r1, %r1, (TMPSTACKSZ - 8)
224176771Sraj
225176771Sraj/*
226186289Sraj * Initialise exception vector offsets
227176771Sraj */
228176771Sraj	bl	ivor_setup
229176771Sraj
230176771Sraj/*
231186229Sraj * Set up arguments and jump to system initialization code
232176771Sraj */
233222400Smarcel	mr	%r3, %r30
234222400Smarcel	mr	%r4, %r31
235176771Sraj
236186229Sraj	/* Prepare e500 core */
237222400Smarcel	bl	booke_init
238182198Sraj
239186229Sraj	/* Switch to thread0.td_kstack now */
240182198Sraj	mr	%r1, %r3
241182198Sraj	li	%r3, 0
242182198Sraj	stw	%r3, 0(%r1)
243182198Sraj
244186229Sraj	/* Machine independet part, does not return */
245186229Sraj	bl	mi_startup
246186229Sraj	/* NOT REACHED */
247186229Sraj5:	b	5b
248176771Sraj
249192532Sraj
250192532Sraj#ifdef SMP
251192532Sraj/************************************************************************/
252192532Sraj/* AP Boot page */
253192532Sraj/************************************************************************/
254192532Sraj	.text
255192532Sraj	.globl	__boot_page
256192532Sraj	.align	12
257192532Sraj__boot_page:
258192532Sraj	bl	1f
259192532Sraj
260192532Srajkernload_ap:
261192532Sraj	.long	0
262192532Sraj
263186229Sraj/*
264192532Sraj * Initial configuration
265192532Sraj */
266192532Sraj1:
267192532Sraj	/* Set HIDs */
268192532Sraj	lis	%r3, HID0_E500_DEFAULT_SET@h
269192532Sraj	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
270192532Sraj	mtspr	SPR_HID0, %r3
271192532Sraj	isync
272192532Sraj	lis	%r3, HID1_E500_DEFAULT_SET@h
273192532Sraj	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
274192532Sraj	mtspr	SPR_HID1, %r3
275192532Sraj	isync
276192532Sraj
277192532Sraj	/* Enable branch prediction */
278192532Sraj	li	%r3, BUCSR_BPEN
279192532Sraj	mtspr	SPR_BUCSR, %r3
280192532Sraj	isync
281192532Sraj
282192532Sraj	/* Invalidate all entries in TLB0 */
283192532Sraj	li	%r3, 0
284192532Sraj	bl	tlb_inval_all
285192532Sraj
286192532Sraj/*
287192532Sraj * Find TLB1 entry which is translating us now
288192532Sraj */
289192532Sraj	bl	2f
290192532Sraj2:	mflr	%r3
291222400Smarcel	bl	tlb1_find_current	/* the entry number found is in r29 */
292192532Sraj
293192532Sraj	bl	tlb1_inval_all_but_current
294192532Sraj/*
295192532Sraj * Create temporary translation in AS=1 and switch to it
296192532Sraj */
297192532Sraj	bl	tlb1_temp_mapping_as1
298192532Sraj
299192532Sraj	mfmsr	%r3
300192532Sraj	ori	%r3, %r3, (PSL_IS | PSL_DS)
301192532Sraj	bl	3f
302192532Sraj3:	mflr	%r4
303192532Sraj	addi	%r4, %r4, 20
304192532Sraj	mtspr	SPR_SRR0, %r4
305192532Sraj	mtspr	SPR_SRR1, %r3
306192532Sraj	rfi				/* Switch context */
307192532Sraj
308192532Sraj/*
309192532Sraj * Invalidate initial entry
310192532Sraj */
311222400Smarcel	mr	%r3, %r29
312192532Sraj	bl	tlb1_inval_entry
313192532Sraj
314192532Sraj/*
315192532Sraj * Setup final mapping in TLB1[1] and switch to it
316192532Sraj */
317192532Sraj	/* Final kernel mapping, map in 16 MB of RAM */
318192532Sraj	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
319222391Smarcel	li	%r4, 0			/* Entry 0 */
320192532Sraj	rlwimi	%r3, %r4, 16, 4, 15
321192532Sraj	mtspr	SPR_MAS0, %r3
322192532Sraj	isync
323192532Sraj
324192532Sraj	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
325192532Sraj	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
326192532Sraj	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
327192532Sraj	isync
328192532Sraj
329192532Sraj	lis	%r3, KERNBASE@h
330192532Sraj	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
331192532Sraj	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
332192532Sraj	mtspr	SPR_MAS2, %r3
333192532Sraj	isync
334192532Sraj
335192532Sraj	/* Retrieve kernel load [physical] address from kernload_ap */
336192532Sraj	bl	4f
337192532Sraj4:	mflr	%r3
338192532Sraj	rlwinm	%r3, %r3, 0, 0, 19
339192532Sraj	lis	%r4, kernload_ap@h
340192532Sraj	ori	%r4, %r4, kernload_ap@l
341192532Sraj	lis	%r5, __boot_page@h
342192532Sraj	ori	%r5, %r5, __boot_page@l
343192532Sraj	sub	%r4, %r4, %r5	/* offset of kernload_ap within __boot_page */
344192532Sraj	lwzx	%r3, %r4, %r3
345192532Sraj
346192532Sraj	/* Set RPN and protection */
347192532Sraj	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
348192532Sraj	mtspr	SPR_MAS3, %r3
349192532Sraj	isync
350192532Sraj	tlbwe
351192532Sraj	isync
352192532Sraj	msync
353192532Sraj
354192532Sraj	/* Switch to the final mapping */
355192532Sraj	bl	5f
356192532Sraj5:	mflr	%r3
357192532Sraj	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
358192532Sraj	add	%r3, %r3, %r5		/* Make this virtual address */
359192532Sraj	addi	%r3, %r3, 32
360192532Sraj	li	%r4, 0			/* Note AS=0 */
361192532Sraj	mtspr	SPR_SRR0, %r3
362192532Sraj	mtspr	SPR_SRR1, %r4
363192532Sraj	rfi
364192532Sraj
365192532Sraj/*
366192532Sraj * At this point we're running at virtual addresses KERNBASE and beyond so
367192532Sraj * it's allowed to directly access all locations the kernel was linked
368192532Sraj * against.
369192532Sraj */
370192532Sraj
371192532Sraj/*
372192532Sraj * Invalidate temp mapping
373192532Sraj */
374222400Smarcel	mr	%r3, %r28
375192532Sraj	bl	tlb1_inval_entry
376192532Sraj
377192532Sraj/*
378192532Sraj * Setup a temporary stack
379192532Sraj */
380192532Sraj	lis	%r1, tmpstack@ha
381192532Sraj	addi	%r1, %r1, tmpstack@l
382192532Sraj	addi	%r1, %r1, (TMPSTACKSZ - 8)
383192532Sraj
384192532Sraj/*
385192532Sraj * Initialise exception vector offsets
386192532Sraj */
387192532Sraj	bl	ivor_setup
388192532Sraj
389192532Sraj	/*
390192532Sraj	 * Assign our pcpu instance
391192532Sraj	 */
392192532Sraj	lis	%r3, ap_pcpu@h
393192532Sraj	ori	%r3, %r3, ap_pcpu@l
394192532Sraj	lwz	%r3, 0(%r3)
395192532Sraj	mtsprg0	%r3
396192532Sraj
397192532Sraj	bl	pmap_bootstrap_ap
398192532Sraj
399192532Sraj	bl	cpudep_ap_bootstrap
400192532Sraj	/* Switch to the idle thread's kstack */
401192532Sraj	mr	%r1, %r3
402192532Sraj
403192532Sraj	bl	machdep_ap_bootstrap
404192532Sraj
405192532Sraj	/* NOT REACHED */
406192532Sraj6:	b	6b
407192532Sraj#endif /* SMP */
408192532Sraj
409192532Sraj/*
410186229Sraj * Invalidate all entries in the given TLB.
411186229Sraj *
412186229Sraj * r3	TLBSEL
413186229Sraj */
414186229Srajtlb_inval_all:
415186229Sraj	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
416186229Sraj	ori	%r3, %r3, 0x4		/* INVALL */
417186229Sraj	tlbivax	0, %r3
418186229Sraj	isync
419186229Sraj	msync
420176771Sraj
421186229Sraj	tlbsync
422186229Sraj	msync
423186229Sraj	blr
424186229Sraj
425186229Sraj/*
426222400Smarcel * expects address to look up in r3, returns entry number in r29
427186229Sraj *
428186229Sraj * FIXME: the hidden assumption is we are now running in AS=0, but we should
429186229Sraj * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
430186229Sraj */
431186229Srajtlb1_find_current:
432186229Sraj	mfspr	%r17, SPR_PID0
433186229Sraj	slwi	%r17, %r17, MAS6_SPID0_SHIFT
434186229Sraj	mtspr	SPR_MAS6, %r17
435176771Sraj	isync
436186229Sraj	tlbsx	0, %r3
437186229Sraj	mfspr	%r17, SPR_MAS0
438222400Smarcel	rlwinm	%r29, %r17, 16, 20, 31		/* MAS0[ESEL] -> r29 */
439176771Sraj
440186229Sraj	/* Make sure we have IPROT set on the entry */
441186229Sraj	mfspr	%r17, SPR_MAS1
442186229Sraj	oris	%r17, %r17, MAS1_IPROT@h
443186229Sraj	mtspr	SPR_MAS1, %r17
444176771Sraj	isync
445176771Sraj	tlbwe
446176771Sraj	isync
447176771Sraj	msync
448176771Sraj	blr
449176771Sraj
450186229Sraj/*
451186229Sraj * Invalidates a single entry in TLB1.
452186229Sraj *
453186229Sraj * r3		ESEL
454186229Sraj * r4-r5	scratched
455186229Sraj */
456186229Srajtlb1_inval_entry:
457186229Sraj	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
458186229Sraj	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
459186229Sraj	mtspr	SPR_MAS0, %r4
460186229Sraj	isync
461186229Sraj	tlbre
462186229Sraj	li	%r5, 0			/* MAS1[V] = 0 */
463186229Sraj	mtspr	SPR_MAS1, %r5
464186229Sraj	isync
465186229Sraj	tlbwe
466186229Sraj	isync
467186229Sraj	msync
468176771Sraj	blr
469176771Sraj
470176771Sraj/*
471222400Smarcel * r29		current entry number
472222400Smarcel * r28		returned temp entry
473186229Sraj * r3-r5	scratched
474176771Sraj */
475186229Srajtlb1_temp_mapping_as1:
476186229Sraj	/* Read our current translation */
477186229Sraj	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
478222400Smarcel	rlwimi	%r3, %r29, 16, 12, 15	/* Select our current entry */
479186229Sraj	mtspr	SPR_MAS0, %r3
480186229Sraj	isync
481186229Sraj	tlbre
482176771Sraj
483186229Sraj	/*
484186229Sraj	 * Prepare and write temp entry
485186229Sraj	 *
486186229Sraj	 * FIXME this is not robust against overflow i.e. when the current
487186229Sraj	 * entry is the last in TLB1
488186229Sraj	 */
489186229Sraj	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
490222400Smarcel	addi	%r28, %r29, 1		/* Use next entry. */
491222400Smarcel	rlwimi	%r3, %r28, 16, 12, 15	/* Select temp entry */
492186229Sraj	mtspr	SPR_MAS0, %r3
493176771Sraj	isync
494186229Sraj	mfspr	%r5, SPR_MAS1
495186229Sraj	li	%r4, 1			/* AS=1 */
496186229Sraj	rlwimi	%r5, %r4, 12, 19, 19
497186229Sraj	li	%r4, 0			/* Global mapping, TID=0 */
498186229Sraj	rlwimi	%r5, %r4, 16, 8, 15
499186229Sraj	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
500186229Sraj	mtspr	SPR_MAS1, %r5
501176771Sraj	isync
502186229Sraj	tlbwe
503186229Sraj	isync
504176771Sraj	msync
505176771Sraj	blr
506176771Sraj
507176771Sraj/*
508186229Sraj * Loops over TLB1, invalidates all entries skipping the one which currently
509186229Sraj * maps this code.
510176771Sraj *
511222400Smarcel * r29		current entry
512186229Sraj * r3-r5	scratched
513176771Sraj */
514186229Srajtlb1_inval_all_but_current:
515186229Sraj	mr	%r6, %r3
516186229Sraj	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
517186229Sraj	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
518186229Sraj	li	%r4, 0			/* Start from Entry 0 */
519186229Sraj1:	lis	%r5, MAS0_TLBSEL1@h
520186229Sraj	rlwimi	%r5, %r4, 16, 12, 15
521186229Sraj	mtspr	SPR_MAS0, %r5
522176771Sraj	isync
523186229Sraj	tlbre
524186229Sraj	mfspr	%r5, SPR_MAS1
525222400Smarcel	cmpw	%r4, %r29		/* our current entry? */
526186229Sraj	beq	2f
527186229Sraj	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
528186229Sraj	mtspr	SPR_MAS1, %r5
529176771Sraj	isync
530186229Sraj	tlbwe
531186229Sraj	isync
532176771Sraj	msync
533186229Sraj2:	addi	%r4, %r4, 1
534186229Sraj	cmpw	%r4, %r3		/* Check if this is the last entry */
535186229Sraj	bne	1b
536176771Sraj	blr
537176771Sraj
538192532Sraj#ifdef SMP
539192532Sraj__boot_page_padding:
540192532Sraj	/*
541192532Sraj	 * Boot page needs to be exactly 4K, with the last word of this page
542192532Sraj	 * acting as the reset vector, so we need to stuff the remainder.
543192532Sraj	 * Upon release from holdoff CPU fetches the last word of the boot
544192532Sraj	 * page.
545192532Sraj	 */
546192532Sraj	.space	4092 - (__boot_page_padding - __boot_page)
547192532Sraj	b	__boot_page
548192532Sraj#endif /* SMP */
549192532Sraj
550186229Sraj/************************************************************************/
551186229Sraj/* locore subroutines */
552186229Sraj/************************************************************************/
553186229Sraj
554186229Srajivor_setup:
555186229Sraj	/* Set base address of interrupt handler routines */
556186229Sraj	lis	%r3, interrupt_vector_base@h
557186229Sraj	mtspr	SPR_IVPR, %r3
558186229Sraj
559186229Sraj	/* Assign interrupt handler routines offsets */
560186229Sraj	li	%r3, int_critical_input@l
561186229Sraj	mtspr	SPR_IVOR0, %r3
562186229Sraj	li	%r3, int_machine_check@l
563186229Sraj	mtspr	SPR_IVOR1, %r3
564186229Sraj	li	%r3, int_data_storage@l
565186229Sraj	mtspr	SPR_IVOR2, %r3
566186229Sraj	li	%r3, int_instr_storage@l
567186229Sraj	mtspr	SPR_IVOR3, %r3
568186229Sraj	li	%r3, int_external_input@l
569186229Sraj	mtspr	SPR_IVOR4, %r3
570186229Sraj	li	%r3, int_alignment@l
571186229Sraj	mtspr	SPR_IVOR5, %r3
572186229Sraj	li	%r3, int_program@l
573186229Sraj	mtspr	SPR_IVOR6, %r3
574186229Sraj	li	%r3, int_syscall@l
575186229Sraj	mtspr	SPR_IVOR8, %r3
576186229Sraj	li	%r3, int_decrementer@l
577186229Sraj	mtspr	SPR_IVOR10, %r3
578186229Sraj	li	%r3, int_fixed_interval_timer@l
579186229Sraj	mtspr	SPR_IVOR11, %r3
580186229Sraj	li	%r3, int_watchdog@l
581186229Sraj	mtspr	SPR_IVOR12, %r3
582186229Sraj	li	%r3, int_data_tlb_error@l
583186229Sraj	mtspr	SPR_IVOR13, %r3
584186229Sraj	li	%r3, int_inst_tlb_error@l
585186229Sraj	mtspr	SPR_IVOR14, %r3
586186229Sraj	li	%r3, int_debug@l
587186229Sraj	mtspr	SPR_IVOR15, %r3
588186229Sraj	blr
589186229Sraj
590176771Sraj/*
591187149Sraj * void tid_flush(tlbtid_t tid);
592187149Sraj *
593187149Sraj * Invalidate all TLB0 entries which match the given TID. Note this is
594187149Sraj * dedicated for cases when invalidation(s) should NOT be propagated to other
595187149Sraj * CPUs.
596187149Sraj *
597187149Sraj * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
598187149Sraj * correctly (by tlb0_get_tlbconf()).
599187149Sraj *
600187149Sraj */
601187149SrajENTRY(tid_flush)
602187149Sraj	cmpwi	%r3, TID_KERNEL
603187149Sraj	beq	tid_flush_end	/* don't evict kernel translations */
604187149Sraj
605187149Sraj	/* Number of TLB0 ways */
606187149Sraj	lis	%r4, tlb0_ways@h
607187149Sraj	ori	%r4, %r4, tlb0_ways@l
608187149Sraj	lwz	%r4, 0(%r4)
609187149Sraj
610187149Sraj	/* Number of entries / way */
611187149Sraj	lis	%r5, tlb0_entries_per_way@h
612187149Sraj	ori	%r5, %r5, tlb0_entries_per_way@l
613187149Sraj	lwz	%r5, 0(%r5)
614187149Sraj
615187149Sraj	/* Disable interrupts */
616187149Sraj	mfmsr	%r10
617187149Sraj	wrteei	0
618187149Sraj
619187149Sraj	li	%r6, 0		/* ways counter */
620187149Srajloop_ways:
621187149Sraj	li	%r7, 0		/* entries [per way] counter */
622187149Srajloop_entries:
623187149Sraj	/* Select TLB0 and ESEL (way) */
624187149Sraj	lis	%r8, MAS0_TLBSEL0@h
625187149Sraj	rlwimi	%r8, %r6, 16, 14, 15
626187149Sraj	mtspr	SPR_MAS0, %r8
627187149Sraj	isync
628187149Sraj
629187149Sraj	/* Select EPN (entry within the way) */
630187149Sraj	rlwinm	%r8, %r7, 12, 13, 19
631187149Sraj	mtspr	SPR_MAS2, %r8
632187149Sraj	isync
633187149Sraj	tlbre
634187149Sraj
635187149Sraj	/* Check if valid entry */
636187149Sraj	mfspr	%r8, SPR_MAS1
637187149Sraj	andis.	%r9, %r8, MAS1_VALID@h
638187149Sraj	beq	next_entry	/* invalid entry */
639187149Sraj
640187149Sraj	/* Check if this is our TID */
641187149Sraj	rlwinm	%r9, %r8, 16, 24, 31
642187149Sraj
643187149Sraj	cmplw	%r9, %r3
644187149Sraj	bne	next_entry	/* not our TID */
645187149Sraj
646187149Sraj	/* Clear VALID bit */
647187149Sraj	rlwinm	%r8, %r8, 0, 1, 31
648187149Sraj	mtspr	SPR_MAS1, %r8
649187149Sraj	isync
650187149Sraj	tlbwe
651187149Sraj	isync
652187149Sraj	msync
653187149Sraj
654187149Srajnext_entry:
655187149Sraj	addi	%r7, %r7, 1
656187149Sraj	cmpw	%r7, %r5
657187149Sraj	bne	loop_entries
658187149Sraj
659187149Sraj	/* Next way */
660187149Sraj	addi	%r6, %r6, 1
661187149Sraj	cmpw	%r6, %r4
662187149Sraj	bne	loop_ways
663187149Sraj
664187149Sraj	/* Restore MSR (possibly re-enable interrupts) */
665187149Sraj	mtmsr	%r10
666187149Sraj	isync
667187149Sraj
668187149Srajtid_flush_end:
669187149Sraj	blr
670187149Sraj
671187149Sraj/*
672176771Sraj * Cache disable/enable/inval sequences according
673176771Sraj * to section 2.16 of E500CORE RM.
674176771Sraj */
675176771SrajENTRY(dcache_inval)
676176771Sraj	/* Invalidate d-cache */
677176771Sraj	mfspr	%r3, SPR_L1CSR0
678176771Sraj	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
679176771Sraj	msync
680176771Sraj	isync
681176771Sraj	mtspr	SPR_L1CSR0, %r3
682176771Sraj	isync
683186230Sraj1:	mfspr	%r3, SPR_L1CSR0
684186230Sraj	andi.	%r3, %r3, L1CSR0_DCFI
685186230Sraj	bne	1b
686176771Sraj	blr
687176771Sraj
688176771SrajENTRY(dcache_disable)
689176771Sraj	/* Disable d-cache */
690176771Sraj	mfspr	%r3, SPR_L1CSR0
691176771Sraj	li	%r4, L1CSR0_DCE@l
692176771Sraj	not	%r4, %r4
693176771Sraj	and	%r3, %r3, %r4
694176771Sraj	msync
695176771Sraj	isync
696176771Sraj	mtspr	SPR_L1CSR0, %r3
697176771Sraj	isync
698176771Sraj	blr
699176771Sraj
700176771SrajENTRY(dcache_enable)
701176771Sraj	/* Enable d-cache */
702176771Sraj	mfspr	%r3, SPR_L1CSR0
703176771Sraj	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
704176771Sraj	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
705176771Sraj	msync
706176771Sraj	isync
707176771Sraj	mtspr	SPR_L1CSR0, %r3
708176771Sraj	isync
709176771Sraj	blr
710176771Sraj
711176771SrajENTRY(icache_inval)
712176771Sraj	/* Invalidate i-cache */
713176771Sraj	mfspr	%r3, SPR_L1CSR1
714176771Sraj	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
715176771Sraj	isync
716176771Sraj	mtspr	SPR_L1CSR1, %r3
717176771Sraj	isync
718186230Sraj1:	mfspr	%r3, SPR_L1CSR1
719186230Sraj	andi.	%r3, %r3, L1CSR1_ICFI
720186230Sraj	bne	1b
721176771Sraj	blr
722176771Sraj
723176771SrajENTRY(icache_disable)
724176771Sraj	/* Disable i-cache */
725176771Sraj	mfspr	%r3, SPR_L1CSR1
726176771Sraj	li	%r4, L1CSR1_ICE@l
727176771Sraj	not	%r4, %r4
728176771Sraj	and	%r3, %r3, %r4
729176771Sraj	isync
730176771Sraj	mtspr	SPR_L1CSR1, %r3
731176771Sraj	isync
732176771Sraj	blr
733176771Sraj
734176771SrajENTRY(icache_enable)
735176771Sraj	/* Enable i-cache */
736176771Sraj	mfspr	%r3, SPR_L1CSR1
737176771Sraj	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
738176771Sraj	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
739176771Sraj	isync
740176771Sraj	mtspr	SPR_L1CSR1, %r3
741176771Sraj	isync
742176771Sraj	blr
743176771Sraj
744176771Sraj/*
745176771Sraj * int setfault()
746176771Sraj *
747176771Sraj * Similar to setjmp to setup for handling faults on accesses to user memory.
748176771Sraj * Any routine using this may only call bcopy, either the form below,
749176771Sraj * or the (currently used) C code optimized, so it doesn't use any non-volatile
750176771Sraj * registers.
751176771Sraj */
752176771Sraj	.globl	setfault
753176771Srajsetfault:
754176771Sraj	mflr	%r0
755176771Sraj	mfsprg0	%r4
756176771Sraj	lwz	%r4, PC_CURTHREAD(%r4)
757176771Sraj	lwz	%r4, TD_PCB(%r4)
758176771Sraj	stw	%r3, PCB_ONFAULT(%r4)
759176771Sraj	mfcr	%r10
760176771Sraj	mfctr	%r11
761176771Sraj	mfxer	%r12
762176771Sraj	stw	%r0, 0(%r3)
763176771Sraj	stw	%r1, 4(%r3)
764176771Sraj	stw	%r2, 8(%r3)
765176771Sraj	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
766176771Sraj	li	%r3, 0			/* return FALSE */
767176771Sraj	blr
768176771Sraj
769176771Sraj/************************************************************************/
770176771Sraj/* Data section								*/
771176771Sraj/************************************************************************/
772176771Sraj	.data
773176771Sraj	.align	4
774182198Srajtmpstack:
775182198Sraj	.space	TMPSTACKSZ
776176771Sraj
777176771Sraj/*
778176771Sraj * Compiled KERNBASE locations
779176771Sraj */
780176771Sraj	.globl	kernbase
781176771Sraj	.set	kernbase, KERNBASE
782176771Sraj
783176771Sraj/*
784176771Sraj * Globals
785176771Sraj */
786176771Sraj#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
787176771Sraj
788176771SrajGLOBAL(kernload)
789186229Sraj	.long	0
790176771SrajGLOBAL(intrnames)
791176771Sraj	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
792224187SattilioGLOBAL(sintrnames)
793224551Smarcel	.long	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
794224187Sattilio
795176771Sraj	.align 4
796176771SrajGLOBAL(intrcnt)
797176771Sraj	.space	INTRCNT_COUNT * 4 * 2
798224187SattilioGLOBAL(sintrcnt)
799224551Smarcel	.long	INTRCNT_COUNT * 4 * 2
800176771Sraj
801176771Sraj#include <powerpc/booke/trap_subr.S>
802