locore.S revision 192532
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 192532 2009-05-21 11:43:37Z raj $
27 */
28
29#include "assym.s"
30
31#include <sys/mutex.h>
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/psl.h>
38#include <machine/pte.h>
39#include <machine/trap.h>
40#include <machine/vmparam.h>
41#include <machine/tlb.h>
42#include <machine/bootinfo.h>
43
44#define TMPSTACKSZ	16384
45
46	.text
47	.globl	btext
48btext:
49
50/*
51 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
52 * mark the start of kernel text.
53 */
54	.globl	kernel_text
55kernel_text:
56
57/*
58 * Startup entry.  Note, this must be the first thing in the text segment!
59 */
60	.text
61	.globl	__start
62__start:
63
64/*
65 * Assumptions on the boot loader:
66 *  - system memory starts from physical address 0
67 *  - it's mapped by a single TBL1 entry
68 *  - TLB1 mapping is 1:1 pa to va
69 *  - kernel is loaded at 16MB boundary
70 *  - all PID registers are set to the same value
71 *  - CPU is running in AS=0
72 *
73 * Registers contents provided by the loader(8):
74 *	r1	: stack pointer
75 *	r3	: metadata pointer
76 *
77 * We rearrange the TLB1 layout as follows:
78 *  - find TLB1 entry we started in
79 *  - make sure it's protected, ivalidate other entries
80 *  - create temp entry in the second AS (make sure it's not TLB[1])
81 *  - switch to temp mapping
82 *  - map 16MB of RAM in TLB1[1]
83 *  - use AS=1, set EPN to KERNBASE and RPN to kernel load address
84 *  - switch to to TLB1[1] mapping
85 *  - invalidate temp mapping
86 *
87 * locore registers use:
88 *	r1	: stack pointer
89 *	r2	: trace pointer (AP only, for early diagnostics)
90 *	r3-r27	: scratch registers
91 *	r28	: kernload
92 *	r29	: temp TLB1 entry
93 *	r30	: initial TLB1 entry we started in
94 *	r31	: metadata pointer
95 */
96
97/*
98 * Keep metadata ptr in r31 for later use.
99 */
100	mr	%r31, %r3
101
102/*
103 * Initial cleanup
104 */
105	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
106	mtmsr	%r3
107	isync
108
109	lis	%r3, HID0_E500_DEFAULT_SET@h
110	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
111	mtspr	SPR_HID0, %r3
112	isync
113	lis	%r3, HID1_E500_DEFAULT_SET@h
114	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
115	mtspr	SPR_HID1, %r3
116	isync
117
118	/* Invalidate all entries in TLB0 */
119	li	%r3, 0
120	bl	tlb_inval_all
121
122/*
123 * Locate the TLB1 entry that maps this code
124 */
125	bl	1f
1261:	mflr	%r3
127	bl	tlb1_find_current	/* the entry number found is returned in r30 */
128
129	bl	tlb1_inval_all_but_current
130/*
131 * Create temporary mapping in AS=1 and switch to it
132 */
133	bl	tlb1_temp_mapping_as1
134
135	mfmsr	%r3
136	ori	%r3, %r3, (PSL_IS | PSL_DS)
137	bl	2f
1382:	mflr	%r4
139	addi	%r4, %r4, 20
140	mtspr	SPR_SRR0, %r4
141	mtspr	SPR_SRR1, %r3
142	rfi				/* Switch context */
143
144/*
145 * Invalidate initial entry
146 */
147	mr	%r3, %r30
148	bl	tlb1_inval_entry
149
150/*
151 * Setup final mapping in TLB1[1] and switch to it
152 */
153	/* Final kernel mapping, map in 16 MB of RAM */
154	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
155	li	%r4, 1			/* Entry 1 */
156	rlwimi	%r3, %r4, 16, 12, 15
157	mtspr	SPR_MAS0, %r3
158	isync
159
160	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
161	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
162	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
163	isync
164
165	lis	%r3, KERNBASE@h
166	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
167#ifdef SMP
168	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
169#endif
170	mtspr	SPR_MAS2, %r3
171	isync
172
173	/* Discover phys load address */
174	bl	3f
1753:	mflr	%r4			/* Use current address */
176	rlwinm	%r4, %r4, 0, 0, 7	/* 16MB alignment mask */
177	mr	%r28, %r4		/* Keep kernel load address */
178	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
179	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
180	isync
181	tlbwe
182	isync
183	msync
184
185	/* Switch to the above TLB1[1] mapping */
186	bl	4f
1874:	mflr	%r4
188	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
189	rlwinm	%r3, %r3, 0, 0, 19
190	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
191	addi	%r4, %r4, 36
192	li	%r3, PSL_DE		/* Note AS=0 */
193	mtspr   SPR_SRR0, %r4
194	mtspr   SPR_SRR1, %r3
195	rfi
196
197/*
198 * Invalidate temp mapping
199 */
200	mr	%r3, %r29
201	bl	tlb1_inval_entry
202
203/*
204 * Save kernel load address for later use.
205 */
206	lis	%r3, kernload@ha
207	addi	%r3, %r3, kernload@l
208	stw	%r28, 0(%r3)
209#ifdef SMP
210	/*
211	 * APs need a separate copy of kernload info within the __boot_page
212	 * area so they can access this value very early, before their TLBs
213	 * are fully set up and the kernload global location is available.
214	 */
215	lis	%r3, kernload_ap@ha
216	addi	%r3, %r3, kernload_ap@l
217	stw	%r28, 0(%r3)
218	msync
219#endif
220
221/*
222 * Setup a temporary stack
223 */
224	lis	%r1, tmpstack@ha
225	addi	%r1, %r1, tmpstack@l
226	addi	%r1, %r1, (TMPSTACKSZ - 8)
227
228/*
229 * Initialise exception vector offsets
230 */
231	bl	ivor_setup
232
233/*
234 * Set up arguments and jump to system initialization code
235 */
236	lis	%r3, kernel_text@ha
237	addi	%r3, %r3, kernel_text@l
238	lis	%r4, _end@ha
239	addi	%r4, %r4, _end@l
240	mr	%r5, %r31		/* metadata ptr */
241
242	/* Prepare e500 core */
243	bl	e500_init
244
245	/* Switch to thread0.td_kstack now */
246	mr	%r1, %r3
247	li	%r3, 0
248	stw	%r3, 0(%r1)
249
250	/* Machine independet part, does not return */
251	bl	mi_startup
252	/* NOT REACHED */
2535:	b	5b
254
255
256#ifdef SMP
257/************************************************************************/
258/* AP Boot page */
259/************************************************************************/
260	.text
261	.globl	__boot_page
262	.align	12
263__boot_page:
264	bl	1f
265
266kernload_ap:
267	.long	0
268
269/*
270 * Initial configuration
271 */
2721:
273	/* Set HIDs */
274	lis	%r3, HID0_E500_DEFAULT_SET@h
275	ori	%r3, %r3, HID0_E500_DEFAULT_SET@l
276	mtspr	SPR_HID0, %r3
277	isync
278	lis	%r3, HID1_E500_DEFAULT_SET@h
279	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
280	mtspr	SPR_HID1, %r3
281	isync
282
283	/* Enable branch prediction */
284	li	%r3, BUCSR_BPEN
285	mtspr	SPR_BUCSR, %r3
286	isync
287
288	/* Invalidate all entries in TLB0 */
289	li	%r3, 0
290	bl	tlb_inval_all
291
292/*
293 * Find TLB1 entry which is translating us now
294 */
295	bl	2f
2962:	mflr	%r3
297	bl	tlb1_find_current	/* the entry number found is in r30 */
298
299	bl	tlb1_inval_all_but_current
300/*
301 * Create temporary translation in AS=1 and switch to it
302 */
303	bl	tlb1_temp_mapping_as1
304
305	mfmsr	%r3
306	ori	%r3, %r3, (PSL_IS | PSL_DS)
307	bl	3f
3083:	mflr	%r4
309	addi	%r4, %r4, 20
310	mtspr	SPR_SRR0, %r4
311	mtspr	SPR_SRR1, %r3
312	rfi				/* Switch context */
313
314/*
315 * Invalidate initial entry
316 */
317	mr	%r3, %r30
318	bl	tlb1_inval_entry
319
320/*
321 * Setup final mapping in TLB1[1] and switch to it
322 */
323	/* Final kernel mapping, map in 16 MB of RAM */
324	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
325	li	%r4, 1			/* Entry 1 */
326	rlwimi	%r3, %r4, 16, 4, 15
327	mtspr	SPR_MAS0, %r3
328	isync
329
330	li	%r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
331	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
332	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
333	isync
334
335	lis	%r3, KERNBASE@h
336	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
337#if SMP
338	ori	%r3, %r3, MAS2_M@l	/* WIMGE = 0b00100 */
339#endif
340	mtspr	SPR_MAS2, %r3
341	isync
342
343	/* Retrieve kernel load [physical] address from kernload_ap */
344	bl	4f
3454:	mflr	%r3
346	rlwinm	%r3, %r3, 0, 0, 19
347	lis	%r4, kernload_ap@h
348	ori	%r4, %r4, kernload_ap@l
349	lis	%r5, __boot_page@h
350	ori	%r5, %r5, __boot_page@l
351	sub	%r4, %r4, %r5	/* offset of kernload_ap within __boot_page */
352	lwzx	%r3, %r4, %r3
353
354	/* Set RPN and protection */
355	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
356	mtspr	SPR_MAS3, %r3
357	isync
358	tlbwe
359	isync
360	msync
361
362	/* Switch to the final mapping */
363	bl	5f
3645:	mflr	%r3
365	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
366	add	%r3, %r3, %r5		/* Make this virtual address */
367	addi	%r3, %r3, 32
368	li	%r4, 0			/* Note AS=0 */
369	mtspr	SPR_SRR0, %r3
370	mtspr	SPR_SRR1, %r4
371	rfi
372
373/*
374 * At this point we're running at virtual addresses KERNBASE and beyond so
375 * it's allowed to directly access all locations the kernel was linked
376 * against.
377 */
378
379/*
380 * Invalidate temp mapping
381 */
382	mr	%r3, %r29
383	bl	tlb1_inval_entry
384
385/*
386 * Setup a temporary stack
387 */
388	lis	%r1, tmpstack@ha
389	addi	%r1, %r1, tmpstack@l
390	addi	%r1, %r1, (TMPSTACKSZ - 8)
391
392/*
393 * Initialise exception vector offsets
394 */
395	bl	ivor_setup
396
397	/*
398	 * Assign our pcpu instance
399	 */
400	lis	%r3, ap_pcpu@h
401	ori	%r3, %r3, ap_pcpu@l
402	lwz	%r3, 0(%r3)
403	mtsprg0	%r3
404
405	bl	pmap_bootstrap_ap
406
407	bl	cpudep_ap_bootstrap
408	/* Switch to the idle thread's kstack */
409	mr	%r1, %r3
410
411	bl	machdep_ap_bootstrap
412
413	/* NOT REACHED */
4146:	b	6b
415#endif /* SMP */
416
417/*
418 * Invalidate all entries in the given TLB.
419 *
420 * r3	TLBSEL
421 */
422tlb_inval_all:
423	rlwinm	%r3, %r3, 3, 0x18	/* TLBSEL */
424	ori	%r3, %r3, 0x4		/* INVALL */
425	tlbivax	0, %r3
426	isync
427	msync
428
429	tlbsync
430	msync
431	blr
432
433/*
434 * expects address to look up in r3, returns entry number in r30
435 *
436 * FIXME: the hidden assumption is we are now running in AS=0, but we should
437 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
438 */
439tlb1_find_current:
440	mfspr	%r17, SPR_PID0
441	slwi	%r17, %r17, MAS6_SPID0_SHIFT
442	mtspr	SPR_MAS6, %r17
443	isync
444	tlbsx	0, %r3
445	mfspr	%r17, SPR_MAS0
446	rlwinm	%r30, %r17, 16, 20, 31		/* MAS0[ESEL] -> r30 */
447
448	/* Make sure we have IPROT set on the entry */
449	mfspr	%r17, SPR_MAS1
450	oris	%r17, %r17, MAS1_IPROT@h
451	mtspr	SPR_MAS1, %r17
452	isync
453	tlbwe
454	isync
455	msync
456	blr
457
458/*
459 * Invalidates a single entry in TLB1.
460 *
461 * r3		ESEL
462 * r4-r5	scratched
463 */
464tlb1_inval_entry:
465	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
466	rlwimi	%r4, %r3, 16, 12, 15	/* Select our entry */
467	mtspr	SPR_MAS0, %r4
468	isync
469	tlbre
470	li	%r5, 0			/* MAS1[V] = 0 */
471	mtspr	SPR_MAS1, %r5
472	isync
473	tlbwe
474	isync
475	msync
476	blr
477
478/*
479 * r30		current entry number
480 * r29		returned temp entry
481 * r3-r5	scratched
482 */
483tlb1_temp_mapping_as1:
484	/* Read our current translation */
485	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
486	rlwimi	%r3, %r30, 16, 12, 15	/* Select our current entry */
487	mtspr	SPR_MAS0, %r3
488	isync
489	tlbre
490
491	/*
492	 * Prepare and write temp entry
493	 *
494	 * FIXME this is not robust against overflow i.e. when the current
495	 * entry is the last in TLB1
496	 */
497	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
498	addi	%r29, %r30, 1		/* Use next entry. */
499	li	%r4, 1
500	cmpw	%r4, %r29
501	bne	1f
502	addi	%r29, %r29, 1
5031:	rlwimi	%r3, %r29, 16, 12, 15	/* Select temp entry */
504	mtspr	SPR_MAS0, %r3
505	isync
506	mfspr	%r5, SPR_MAS1
507	li	%r4, 1			/* AS=1 */
508	rlwimi	%r5, %r4, 12, 19, 19
509	li	%r4, 0			/* Global mapping, TID=0 */
510	rlwimi	%r5, %r4, 16, 8, 15
511	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
512	mtspr	SPR_MAS1, %r5
513	isync
514	tlbwe
515	isync
516	msync
517	blr
518
519/*
520 * Loops over TLB1, invalidates all entries skipping the one which currently
521 * maps this code.
522 *
523 * r30		current entry
524 * r3-r5	scratched
525 */
526tlb1_inval_all_but_current:
527	mr	%r6, %r3
528	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
529	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
530	li	%r4, 0			/* Start from Entry 0 */
5311:	lis	%r5, MAS0_TLBSEL1@h
532	rlwimi	%r5, %r4, 16, 12, 15
533	mtspr	SPR_MAS0, %r5
534	isync
535	tlbre
536	mfspr	%r5, SPR_MAS1
537	cmpw	%r4, %r30		/* our current entry? */
538	beq	2f
539	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
540	mtspr	SPR_MAS1, %r5
541	isync
542	tlbwe
543	isync
544	msync
5452:	addi	%r4, %r4, 1
546	cmpw	%r4, %r3		/* Check if this is the last entry */
547	bne	1b
548	blr
549
550#ifdef SMP
551__boot_page_padding:
552	/*
553	 * Boot page needs to be exactly 4K, with the last word of this page
554	 * acting as the reset vector, so we need to stuff the remainder.
555	 * Upon release from holdoff CPU fetches the last word of the boot
556	 * page.
557	 */
558	.space	4092 - (__boot_page_padding - __boot_page)
559	b	__boot_page
560#endif /* SMP */
561
562/************************************************************************/
563/* locore subroutines */
564/************************************************************************/
565
566ivor_setup:
567	/* Set base address of interrupt handler routines */
568	lis	%r3, interrupt_vector_base@h
569	mtspr	SPR_IVPR, %r3
570
571	/* Assign interrupt handler routines offsets */
572	li	%r3, int_critical_input@l
573	mtspr	SPR_IVOR0, %r3
574	li	%r3, int_machine_check@l
575	mtspr	SPR_IVOR1, %r3
576	li	%r3, int_data_storage@l
577	mtspr	SPR_IVOR2, %r3
578	li	%r3, int_instr_storage@l
579	mtspr	SPR_IVOR3, %r3
580	li	%r3, int_external_input@l
581	mtspr	SPR_IVOR4, %r3
582	li	%r3, int_alignment@l
583	mtspr	SPR_IVOR5, %r3
584	li	%r3, int_program@l
585	mtspr	SPR_IVOR6, %r3
586	li	%r3, int_syscall@l
587	mtspr	SPR_IVOR8, %r3
588	li	%r3, int_decrementer@l
589	mtspr	SPR_IVOR10, %r3
590	li	%r3, int_fixed_interval_timer@l
591	mtspr	SPR_IVOR11, %r3
592	li	%r3, int_watchdog@l
593	mtspr	SPR_IVOR12, %r3
594	li	%r3, int_data_tlb_error@l
595	mtspr	SPR_IVOR13, %r3
596	li	%r3, int_inst_tlb_error@l
597	mtspr	SPR_IVOR14, %r3
598	li	%r3, int_debug@l
599	mtspr	SPR_IVOR15, %r3
600	blr
601
602/*
603 * void tid_flush(tlbtid_t tid);
604 *
605 * Invalidate all TLB0 entries which match the given TID. Note this is
606 * dedicated for cases when invalidation(s) should NOT be propagated to other
607 * CPUs.
608 *
609 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
610 * correctly (by tlb0_get_tlbconf()).
611 *
612 */
613ENTRY(tid_flush)
614	cmpwi	%r3, TID_KERNEL
615	beq	tid_flush_end	/* don't evict kernel translations */
616
617	/* Number of TLB0 ways */
618	lis	%r4, tlb0_ways@h
619	ori	%r4, %r4, tlb0_ways@l
620	lwz	%r4, 0(%r4)
621
622	/* Number of entries / way */
623	lis	%r5, tlb0_entries_per_way@h
624	ori	%r5, %r5, tlb0_entries_per_way@l
625	lwz	%r5, 0(%r5)
626
627	/* Disable interrupts */
628	mfmsr	%r10
629	wrteei	0
630
631	li	%r6, 0		/* ways counter */
632loop_ways:
633	li	%r7, 0		/* entries [per way] counter */
634loop_entries:
635	/* Select TLB0 and ESEL (way) */
636	lis	%r8, MAS0_TLBSEL0@h
637	rlwimi	%r8, %r6, 16, 14, 15
638	mtspr	SPR_MAS0, %r8
639	isync
640
641	/* Select EPN (entry within the way) */
642	rlwinm	%r8, %r7, 12, 13, 19
643	mtspr	SPR_MAS2, %r8
644	isync
645	tlbre
646
647	/* Check if valid entry */
648	mfspr	%r8, SPR_MAS1
649	andis.	%r9, %r8, MAS1_VALID@h
650	beq	next_entry	/* invalid entry */
651
652	/* Check if this is our TID */
653	rlwinm	%r9, %r8, 16, 24, 31
654
655	cmplw	%r9, %r3
656	bne	next_entry	/* not our TID */
657
658	/* Clear VALID bit */
659	rlwinm	%r8, %r8, 0, 1, 31
660	mtspr	SPR_MAS1, %r8
661	isync
662	tlbwe
663	isync
664	msync
665
666next_entry:
667	addi	%r7, %r7, 1
668	cmpw	%r7, %r5
669	bne	loop_entries
670
671	/* Next way */
672	addi	%r6, %r6, 1
673	cmpw	%r6, %r4
674	bne	loop_ways
675
676	/* Restore MSR (possibly re-enable interrupts) */
677	mtmsr	%r10
678	isync
679
680tid_flush_end:
681	blr
682
683/*
684 * Cache disable/enable/inval sequences according
685 * to section 2.16 of E500CORE RM.
686 */
687ENTRY(dcache_inval)
688	/* Invalidate d-cache */
689	mfspr	%r3, SPR_L1CSR0
690	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
691	msync
692	isync
693	mtspr	SPR_L1CSR0, %r3
694	isync
6951:	mfspr	%r3, SPR_L1CSR0
696	andi.	%r3, %r3, L1CSR0_DCFI
697	bne	1b
698	blr
699
700ENTRY(dcache_disable)
701	/* Disable d-cache */
702	mfspr	%r3, SPR_L1CSR0
703	li	%r4, L1CSR0_DCE@l
704	not	%r4, %r4
705	and	%r3, %r3, %r4
706	msync
707	isync
708	mtspr	SPR_L1CSR0, %r3
709	isync
710	blr
711
712ENTRY(dcache_enable)
713	/* Enable d-cache */
714	mfspr	%r3, SPR_L1CSR0
715	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
716	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
717	msync
718	isync
719	mtspr	SPR_L1CSR0, %r3
720	isync
721	blr
722
723ENTRY(icache_inval)
724	/* Invalidate i-cache */
725	mfspr	%r3, SPR_L1CSR1
726	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
727	isync
728	mtspr	SPR_L1CSR1, %r3
729	isync
7301:	mfspr	%r3, SPR_L1CSR1
731	andi.	%r3, %r3, L1CSR1_ICFI
732	bne	1b
733	blr
734
735ENTRY(icache_disable)
736	/* Disable i-cache */
737	mfspr	%r3, SPR_L1CSR1
738	li	%r4, L1CSR1_ICE@l
739	not	%r4, %r4
740	and	%r3, %r3, %r4
741	isync
742	mtspr	SPR_L1CSR1, %r3
743	isync
744	blr
745
746ENTRY(icache_enable)
747	/* Enable i-cache */
748	mfspr	%r3, SPR_L1CSR1
749	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
750	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
751	isync
752	mtspr	SPR_L1CSR1, %r3
753	isync
754	blr
755
756/*
757 * int setfault()
758 *
759 * Similar to setjmp to setup for handling faults on accesses to user memory.
760 * Any routine using this may only call bcopy, either the form below,
761 * or the (currently used) C code optimized, so it doesn't use any non-volatile
762 * registers.
763 */
764	.globl	setfault
765setfault:
766	mflr	%r0
767	mfsprg0	%r4
768	lwz	%r4, PC_CURTHREAD(%r4)
769	lwz	%r4, TD_PCB(%r4)
770	stw	%r3, PCB_ONFAULT(%r4)
771	mfcr	%r10
772	mfctr	%r11
773	mfxer	%r12
774	stw	%r0, 0(%r3)
775	stw	%r1, 4(%r3)
776	stw	%r2, 8(%r3)
777	stmw	%r10, 12(%r3)		/* store CR, CTR, XER, [r13 .. r31] */
778	li	%r3, 0			/* return FALSE */
779	blr
780
781/************************************************************************/
782/* Data section								*/
783/************************************************************************/
784	.data
785	.align	4
786tmpstack:
787	.space	TMPSTACKSZ
788
789/*
790 * Compiled KERNBASE locations
791 */
792	.globl	kernbase
793	.set	kernbase, KERNBASE
794
795/*
796 * Globals
797 */
798#define	INTRCNT_COUNT	256		/* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
799
800GLOBAL(kernload)
801	.long	0
802GLOBAL(intrnames)
803	.space	INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
804GLOBAL(eintrnames)
805	.align 4
806GLOBAL(intrcnt)
807	.space	INTRCNT_COUNT * 4 * 2
808GLOBAL(eintrcnt)
809
810#include <powerpc/booke/trap_subr.S>
811