locore.S revision 215052
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 215052 2010-11-09 20:41:10Z jhb $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ	16384
41
42	.text
43	.globl	btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50	.globl	kernel_text
51kernel_text:
52
53/*
54 * Startup entry.  Note, this must be the first thing in the text segment!
55 */
56	.text
57	.globl	__start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 *  - system memory starts from physical address 0
63 *  - it's mapped by a single TBL1 entry
64 *  - TLB1 mapping is 1:1 pa to va
65 *  - kernel is loaded at 16MB boundary
66 *  - all PID registers are set to the same value
67 *  - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 *	r1	: stack pointer
71 *	r3	: metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 *  - find TLB1 entry we started in
75 *  - make sure it's protected, ivalidate other entries
76 *  - create temp entry in the second AS (make sure it's not TLB[1])
77 *  - switch to temp mapping
78 *  - map 16MB of RAM in TLB1[1]
79 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 *  - switch to to TLB1[1] mapping
81 *  - invalidate temp mapping
82 *
83 * locore registers use:
84 *	r1	: stack pointer
85 *	r2	: trace pointer (AP only, for early diagnostics)
86 *	r3-r27	: scratch registers
87 *	r28	: kernload
88 *	r29	: temp TLB1 entry
89 *	r30	: initial TLB1 entry we started in
90 *	r31	: metadata pointer
91 */
92
93/*
94 * Keep metadata ptr in r31 for later use.
95 */
96	mr	%r31, %r3
97
98/*
99 * Initial cleanup
100 */
101	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
102	mtmsr	%r3
103	isync
104
105	lis	%r3, HID0_E500_DEFAULT_SET@h
106	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
107	mtspr	SPR_HID0, %r3
108	isync
109	lis	%r3, HID1_E500_DEFAULT_SET@h
110	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
111	mtspr	SPR_HID1, %r3
112	isync
113
114	/* Invalidate all entries in TLB0 */
115	li	%r3, 0
116	bl	tlb_inval_all
117
118/*
119 * Locate the TLB1 entry that maps this code
120 */
121	bl	1f
1221:	mflr	%r3
123	bl	tlb1_find_current	/* the entry number found is returned in r30 */
124
125	bl	tlb1_inval_all_but_current
126/*
127 * Create temporary mapping in AS=1 and switch to it
128 */
129	bl	tlb1_temp_mapping_as1
130
131	mfmsr	%r3
132	ori	%r3, %r3, (PSL_IS | PSL_DS)
133	bl	2f
1342:	mflr	%r4
135	addi	%r4, %r4, 20
136	mtspr	SPR_SRR0, %r4
137	mtspr	SPR_SRR1, %r3
138	rfi				/* Switch context */
139
140/*
141 * Invalidate initial entry
142 */
143	mr	%r3, %r30
144	bl	tlb1_inval_entry
145
146/*
147 * Setup final mapping in TLB1[1] and switch to it
148 */
149	/* Final kernel mapping, map in 16 MB of RAM */
150	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
151	li	%r4, 1			/* Entry 1 */
152	rlwimi	%r3, %r4, 16, 12, 15
153	mtspr	SPR_MAS0, %r3
154	isync
155
156	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
157	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
158	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
159	isync
160
161	lis	%r3, KERNBASE@h
162	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
163#ifdef SMP
164	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
165#endif
166	mtspr	SPR_MAS2, %r3
167	isync
168
169	/* Discover phys load address */
170	bl	3f
1713:	mflr	%r4			/* Use current address */
172	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
173	mr	%r28, %r4		/* Keep kernel load address */
174	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
175	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
176	isync
177	tlbwe
178	isync
179	msync
180
181	/* Switch to the above TLB1[1] mapping */
182	bl	4f
1834:	mflr	%r4
184	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
185	rlwinm	%r3, %r3, 0, 0, 19
186	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
187	addi	%r4, %r4, 36
188	li	%r3, PSL_DE		/* Note AS=0 */
189	mtspr   SPR_SRR0, %r4
190	mtspr   SPR_SRR1, %r3
191	rfi
192
193/*
194 * Invalidate temp mapping
195 */
196	mr	%r3, %r29
197	bl	tlb1_inval_entry
198
199/*
200 * Save kernel load address for later use.
201 */
202	lis	%r3, kernload@ha
203	addi	%r3, %r3, kernload@l
204	stw	%r28, 0(%r3)
205#ifdef SMP
206	/*
207	 * APs need a separate copy of kernload info within the __boot_page
208	 * area so they can access this value very early, before their TLBs
209	 * are fully set up and the kernload global location is available.
210	 */
211	lis	%r3, kernload_ap@ha
212	addi	%r3, %r3, kernload_ap@l
213	stw	%r28, 0(%r3)
214	msync
215#endif
216
217/*
218 * Setup a temporary stack
219 */
220	lis	%r1, tmpstack@ha
221	addi	%r1, %r1, tmpstack@l
222	addi	%r1, %r1, (TMPSTACKSZ - 8)
223
224/*
225 * Initialise exception vector offsets
226 */
227	bl	ivor_setup
228
229/*
230 * Set up arguments and jump to system initialization code
231 */
232	lis	%r3, kernel_text@ha
233	addi	%r3, %r3, kernel_text@l
234	lis	%r4, _end@ha
235	addi	%r4, %r4, _end@l
236	mr	%r5, %r31		/* metadata ptr */
237
238	/* Prepare e500 core */
239	bl	e500_init
240
241	/* Switch to thread0.td_kstack now */
242	mr	%r1, %r3
243	li	%r3, 0
244	stw	%r3, 0(%r1)
245
246	/* Machine independet part, does not return */
247	bl	mi_startup
248	/* NOT REACHED */
2495:	b	5b
250
251
252#ifdef SMP
253/************************************************************************/
254/* AP Boot page */
255/************************************************************************/
256	.text
257	.globl	__boot_page
258	.align	12
259__boot_page:
260	bl	1f
261
262kernload_ap:
263	.long	0
264
265/*
266 * Initial configuration
267 */
2681:
269	/* Set HIDs */
270	lis	%r3, HID0_E500_DEFAULT_SET@h
271	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
272	mtspr	SPR_HID0, %r3
273	isync
274	lis	%r3, HID1_E500_DEFAULT_SET@h
275	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
276	mtspr	SPR_HID1, %r3
277	isync
278
279	/* Enable branch prediction */
280	li	%r3, BUCSR_BPEN
281	mtspr	SPR_BUCSR, %r3
282	isync
283
284	/* Invalidate all entries in TLB0 */
285	li	%r3, 0
286	bl	tlb_inval_all
287
288/*
289 * Find TLB1 entry which is translating us now
290 */
291	bl	2f
2922:	mflr	%r3
293	bl	tlb1_find_current	/* the entry number found is in r30 */
294
295	bl	tlb1_inval_all_but_current
296/*
297 * Create temporary translation in AS=1 and switch to it
298 */
299	bl	tlb1_temp_mapping_as1
300
301	mfmsr	%r3
302	ori	%r3, %r3, (PSL_IS | PSL_DS)
303	bl	3f
3043:	mflr	%r4
305	addi	%r4, %r4, 20
306	mtspr	SPR_SRR0, %r4
307	mtspr	SPR_SRR1, %r3
308	rfi				/* Switch context */
309
310/*
311 * Invalidate initial entry
312 */
313	mr	%r3, %r30
314	bl	tlb1_inval_entry
315
316/*
317 * Setup final mapping in TLB1[1] and switch to it
318 */
319	/* Final kernel mapping, map in 16 MB of RAM */
320	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
321	li	%r4, 1			/* Entry 1 */
322	rlwimi	%r3, %r4, 16, 4, 15
323	mtspr	SPR_MAS0, %r3
324	isync
325
326	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
327	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
328	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
329	isync
330
331	lis	%r3, KERNBASE@h
332	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
333	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
334	mtspr	SPR_MAS2, %r3
335	isync
336
337	/* Retrieve kernel load [physical] address from kernload_ap */
338	bl	4f
3394:	mflr	%r3
340	rlwinm	%r3, %r3, 0, 0, 19
341	lis	%r4, kernload_ap@h
342	ori	%r4, %r4, kernload_ap@l
343	lis	%r5, __boot_page@h
344	ori	%r5, %r5, __boot_page@l
345	sub	%r4, %r4, %r5	/* offset of kernload_ap within __boot_page */
346	lwzx	%r3, %r4, %r3
347
348	/* Set RPN and protection */
349	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
350	mtspr	SPR_MAS3, %r3
351	isync
352	tlbwe
353	isync
354	msync
355
356	/* Switch to the final mapping */
357	bl	5f
3585:	mflr	%r3
359	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
360	add	%r3, %r3, %r5		/* Make this virtual address */
361	addi	%r3, %r3, 32
362	li	%r4, 0			/* Note AS=0 */
363	mtspr	SPR_SRR0, %r3
364	mtspr	SPR_SRR1, %r4
365	rfi
366
367/*
368 * At this point we're running at virtual addresses KERNBASE and beyond so
369 * it's allowed to directly access all locations the kernel was linked
370 * against.
371 */
372
373/*
374 * Invalidate temp mapping
375 */
376	mr	%r3, %r29
377	bl	tlb1_inval_entry
378
379/*
380 * Setup a temporary stack
381 */
382	lis	%r1, tmpstack@ha
383	addi	%r1, %r1, tmpstack@l
384	addi	%r1, %r1, (TMPSTACKSZ - 8)
385
386/*
387 * Initialise exception vector offsets
388 */
389	bl	ivor_setup
390
391	/*
392	 * Assign our pcpu instance
393	 */
394	lis	%r3, ap_pcpu@h
395	ori	%r3, %r3, ap_pcpu@l
396	lwz	%r3, 0(%r3)
397	mtsprg0	%r3
398
399	bl	pmap_bootstrap_ap
400
401	bl	cpudep_ap_bootstrap
402	/* Switch to the idle thread's kstack */
403	mr	%r1, %r3
404
405	bl	machdep_ap_bootstrap
406
407	/* NOT REACHED */
4086:	b	6b
409#endif /* SMP */
410
411/*
412 * Invalidate all entries in the given TLB.
413 *
414 * r3	TLBSEL
415 */
416tlb_inval_all:
417	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
418	ori	%r3, %r3, 0x4		/* INVALL */
419	tlbivax	0, %r3
420	isync
421	msync
422
423	tlbsync
424	msync
425	blr
426
427/*
428 * expects address to look up in r3, returns entry number in r30
429 *
430 * FIXME: the hidden assumption is we are now running in AS=0, but we should
431 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
432 */
433tlb1_find_current:
434	mfspr	%r17, SPR_PID0
435	slwi	%r17, %r17, MAS6_SPID0_SHIFT
436	mtspr	SPR_MAS6, %r17
437	isync
438	tlbsx	0, %r3
439	mfspr	%r17, SPR_MAS0
440	rlwinm	%r30, %r17, 16, 20, 31		/* MAS0[ESEL] -> r30 */
441
442	/* Make sure we have IPROT set on the entry */
443	mfspr	%r17, SPR_MAS1
444	oris	%r17, %r17, MAS1_IPROT@h
445	mtspr	SPR_MAS1, %r17
446	isync
447	tlbwe
448	isync
449	msync
450	blr
451
452/*
453 * Invalidates a single entry in TLB1.
454 *
455 * r3		ESEL
456 * r4-r5	scratched
457 */
458tlb1_inval_entry:
459	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
460	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
461	mtspr	SPR_MAS0, %r4
462	isync
463	tlbre
464	li	%r5, 0			/* MAS1[V] = 0 */
465	mtspr	SPR_MAS1, %r5
466	isync
467	tlbwe
468	isync
469	msync
470	blr
471
472/*
473 * r30		current entry number
474 * r29		returned temp entry
475 * r3-r5	scratched
476 */
477tlb1_temp_mapping_as1:
478	/* Read our current translation */
479	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
480	rlwimi	%r3, %r30, 16, 12, 15	/* Select our current entry */
481	mtspr	SPR_MAS0, %r3
482	isync
483	tlbre
484
485	/*
486	 * Prepare and write temp entry
487	 *
488	 * FIXME this is not robust against overflow i.e. when the current
489	 * entry is the last in TLB1
490	 */
491	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
492	addi	%r29, %r30, 1		/* Use next entry. */
493	li	%r4, 1
494	cmpw	%r4, %r29
495	bne	1f
496	addi	%r29, %r29, 1
4971:	rlwimi	%r3, %r29, 16, 12, 15	/* Select temp entry */
498	mtspr	SPR_MAS0, %r3
499	isync
500	mfspr	%r5, SPR_MAS1
501	li	%r4, 1			/* AS=1 */
502	rlwimi	%r5, %r4, 12, 19, 19
503	li	%r4, 0			/* Global mapping, TID=0 */
504	rlwimi	%r5, %r4, 16, 8, 15
505	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
506	mtspr	SPR_MAS1, %r5
507	isync
508	tlbwe
509	isync
510	msync
511	blr
512
513/*
514 * Loops over TLB1, invalidates all entries skipping the one which currently
515 * maps this code.
516 *
517 * r30		current entry
518 * r3-r5	scratched
519 */
520tlb1_inval_all_but_current:
521	mr	%r6, %r3
522	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
523	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
524	li	%r4, 0			/* Start from Entry 0 */
5251:	lis	%r5, MAS0_TLBSEL1@h
526	rlwimi	%r5, %r4, 16, 12, 15
527	mtspr	SPR_MAS0, %r5
528	isync
529	tlbre
530	mfspr	%r5, SPR_MAS1
531	cmpw	%r4, %r30		/* our current entry? */
532	beq	2f
533	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
534	mtspr	SPR_MAS1, %r5
535	isync
536	tlbwe
537	isync
538	msync
5392:	addi	%r4, %r4, 1
540	cmpw	%r4, %r3		/* Check if this is the last entry */
541	bne	1b
542	blr
543
544#ifdef SMP
545__boot_page_padding:
546	/*
547	 * Boot page needs to be exactly 4K, with the last word of this page
548	 * acting as the reset vector, so we need to stuff the remainder.
549	 * Upon release from holdoff CPU fetches the last word of the boot
550	 * page.
551	 */
552	.space	4092 - (__boot_page_padding - __boot_page)
553	b	__boot_page
554#endif /* SMP */
555
556/************************************************************************/
557/* locore subroutines */
558/************************************************************************/
559
560ivor_setup:
561	/* Set base address of interrupt handler routines */
562	lis	%r3, interrupt_vector_base@h
563	mtspr	SPR_IVPR, %r3
564
565	/* Assign interrupt handler routines offsets */
566	li	%r3, int_critical_input@l
567	mtspr	SPR_IVOR0, %r3
568	li	%r3, int_machine_check@l
569	mtspr	SPR_IVOR1, %r3
570	li	%r3, int_data_storage@l
571	mtspr	SPR_IVOR2, %r3
572	li	%r3, int_instr_storage@l
573	mtspr	SPR_IVOR3, %r3
574	li	%r3, int_external_input@l
575	mtspr	SPR_IVOR4, %r3
576	li	%r3, int_alignment@l
577	mtspr	SPR_IVOR5, %r3
578	li	%r3, int_program@l
579	mtspr	SPR_IVOR6, %r3
580	li	%r3, int_syscall@l
581	mtspr	SPR_IVOR8, %r3
582	li	%r3, int_decrementer@l
583	mtspr	SPR_IVOR10, %r3
584	li	%r3, int_fixed_interval_timer@l
585	mtspr	SPR_IVOR11, %r3
586	li	%r3, int_watchdog@l
587	mtspr	SPR_IVOR12, %r3
588	li	%r3, int_data_tlb_error@l
589	mtspr	SPR_IVOR13, %r3
590	li	%r3, int_inst_tlb_error@l
591	mtspr	SPR_IVOR14, %r3
592	li	%r3, int_debug@l
593	mtspr	SPR_IVOR15, %r3
594	blr
595
596/*
597 * void tid_flush(tlbtid_t tid);
598 *
599 * Invalidate all TLB0 entries which match the given TID. Note this is
600 * dedicated for cases when invalidation(s) should NOT be propagated to other
601 * CPUs.
602 *
603 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
604 * correctly (by tlb0_get_tlbconf()).
605 *
606 */
607ENTRY(tid_flush)
608	cmpwi	%r3, TID_KERNEL
609	beq	tid_flush_end	/* don't evict kernel translations */
610
611	/* Number of TLB0 ways */
612	lis	%r4, tlb0_ways@h
613	ori	%r4, %r4, tlb0_ways@l
614	lwz	%r4, 0(%r4)
615
616	/* Number of entries / way */
617	lis	%r5, tlb0_entries_per_way@h
618	ori	%r5, %r5, tlb0_entries_per_way@l
619	lwz	%r5, 0(%r5)
620
621	/* Disable interrupts */
622	mfmsr	%r10
623	wrteei	0
624
625	li	%r6, 0		/* ways counter */
626loop_ways:
627	li	%r7, 0		/* entries [per way] counter */
628loop_entries:
629	/* Select TLB0 and ESEL (way) */
630	lis	%r8, MAS0_TLBSEL0@h
631	rlwimi	%r8, %r6, 16, 14, 15
632	mtspr	SPR_MAS0, %r8
633	isync
634
635	/* Select EPN (entry within the way) */
636	rlwinm	%r8, %r7, 12, 13, 19
637	mtspr	SPR_MAS2, %r8
638	isync
639	tlbre
640
641	/* Check if valid entry */
642	mfspr	%r8, SPR_MAS1
643	andis.	%r9, %r8, MAS1_VALID@h
644	beq	next_entry	/* invalid entry */
645
646	/* Check if this is our TID */
647	rlwinm	%r9, %r8, 16, 24, 31
648
649	cmplw	%r9, %r3
650	bne	next_entry	/* not our TID */
651
652	/* Clear VALID bit */
653	rlwinm	%r8, %r8, 0, 1, 31
654	mtspr	SPR_MAS1, %r8
655	isync
656	tlbwe
657	isync
658	msync
659
660next_entry:
661	addi	%r7, %r7, 1
662	cmpw	%r7, %r5
663	bne	loop_entries
664
665	/* Next way */
666	addi	%r6, %r6, 1
667	cmpw	%r6, %r4
668	bne	loop_ways
669
670	/* Restore MSR (possibly re-enable interrupts) */
671	mtmsr	%r10
672	isync
673
674tid_flush_end:
675	blr
676
677/*
678 * Cache disable/enable/inval sequences according
679 * to section 2.16 of E500CORE RM.
680 */
681ENTRY(dcache_inval)
682	/* Invalidate d-cache */
683	mfspr	%r3, SPR_L1CSR0
684	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
685	msync
686	isync
687	mtspr	SPR_L1CSR0, %r3
688	isync
6891:	mfspr	%r3, SPR_L1CSR0
690	andi.	%r3, %r3, L1CSR0_DCFI
691	bne	1b
692	blr
693
694ENTRY(dcache_disable)
695	/* Disable d-cache */
696	mfspr	%r3, SPR_L1CSR0
697	li	%r4, L1CSR0_DCE@l
698	not	%r4, %r4
699	and	%r3, %r3, %r4
700	msync
701	isync
702	mtspr	SPR_L1CSR0, %r3
703	isync
704	blr
705
706ENTRY(dcache_enable)
707	/* Enable d-cache */
708	mfspr	%r3, SPR_L1CSR0
709	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
710	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
711	msync
712	isync
713	mtspr	SPR_L1CSR0, %r3
714	isync
715	blr
716
717ENTRY(icache_inval)
718	/* Invalidate i-cache */
719	mfspr	%r3, SPR_L1CSR1
720	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
721	isync
722	mtspr	SPR_L1CSR1, %r3
723	isync
7241:	mfspr	%r3, SPR_L1CSR1
725	andi.	%r3, %r3, L1CSR1_ICFI
726	bne	1b
727	blr
728
729ENTRY(icache_disable)
730	/* Disable i-cache */
731	mfspr	%r3, SPR_L1CSR1
732	li	%r4, L1CSR1_ICE@l
733	not	%r4, %r4
734	and	%r3, %r3, %r4
735	isync
736	mtspr	SPR_L1CSR1, %r3
737	isync
738	blr
739
740ENTRY(icache_enable)
741	/* Enable i-cache */
742	mfspr	%r3, SPR_L1CSR1
743	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
744	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
745	isync
746	mtspr	SPR_L1CSR1, %r3
747	isync
748	blr
749
750/*
751 * int setfault()
752 *
753 * Similar to setjmp to setup for handling faults on accesses to user memory.
754 * Any routine using this may only call bcopy, either the form below,
755 * or the (currently used) C code optimized, so it doesn't use any non-volatile
756 * registers.
757 */
758	.globl	setfault
759setfault:
760	mflr	%r0
761	mfsprg0	%r4
762	lwz	%r4, PC_CURTHREAD(%r4)
763	lwz	%r4, TD_PCB(%r4)
764	stw	%r3, PCB_ONFAULT(%r4)
765	mfcr	%r10
766	mfctr	%r11
767	mfxer	%r12
768	stw	%r0, 0(%r3)
769	stw	%r1, 4(%r3)
770	stw	%r2, 8(%r3)
771	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
772	li	%r3, 0			/* return FALSE */
773	blr
774
775/************************************************************************/
776/* Data section								*/
777/************************************************************************/
778	.data
779	.align	4
780tmpstack:
781	.space	TMPSTACKSZ
782
783/*
784 * Compiled KERNBASE locations
785 */
786	.globl	kernbase
787	.set	kernbase, KERNBASE
788
789/*
790 * Globals
791 */
792#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
793
794GLOBAL(kernload)
795	.long	0
796GLOBAL(intrnames)
797	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
798GLOBAL(eintrnames)
799	.align 4
800GLOBAL(intrcnt)
801	.space	INTRCNT_COUNT * 4 * 2
802GLOBAL(eintrcnt)
803
804#include <powerpc/booke/trap_subr.S>
805