1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/powerpc/booke/locore.S 330446 2018-03-05 06:59:30Z eadler $
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ	16384
43
44	.text
45	.globl	btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52	.globl	kernel_text
53kernel_text:
54
55/*
56 * Startup entry.  Note, this must be the first thing in the text segment!
57 */
58	.text
59	.globl	__start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 *  - System memory starts from physical address 0
65 *  - It's mapped by a single TLB1 entry
66 *  - TLB1 mapping is 1:1 pa to va
67 *  - Kernel is loaded at 64MB boundary
68 *  - All PID registers are set to the same value
69 *  - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 *	r1	: stack pointer
73 *	r3	: metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 *  - Find TLB1 entry we started in
77 *  - Make sure it's protected, invalidate other entries
78 *  - Create temp entry in the second AS (make sure it's not TLB[1])
79 *  - Switch to temp mapping
80 *  - Map 64MB of RAM in TLB1[1]
81 *  - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 *  - Switch to TLB1[1] mapping
83 *  - Invalidate temp mapping
84 *
85 * locore registers use:
86 *	r1	: stack pointer
87 *	r2	: trace pointer (AP only, for early diagnostics)
88 *	r3-r27	: scratch registers
89 *	r28	: temp TLB1 entry
90 *	r29	: initial TLB1 entry we started in
91 *	r30-r31	: arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97	mr	%r30, %r3
98	mr	%r31, %r4
99
100/*
101 * Initial cleanup
102 */
103	li	%r3, PSL_DE	/* Keep debug exceptions for CodeWarrior. */
104	mtmsr	%r3
105	isync
106
107/*
108 * Initial HIDs configuration
109 */
1101:
111	mfpvr	%r3
112	rlwinm	%r3, %r3, 16, 16, 31
113
114	lis	%r4, HID0_E500_DEFAULT_SET@h
115	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
116
117	/* Check for e500mc and e5500 */
118	cmpli	0, 0, %r3, FSL_E500mc
119	bne	2f
120
121	lis	%r4, HID0_E500MC_DEFAULT_SET@h
122	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
123	b	3f
1242:
125	cmpli	0, 0, %r3, FSL_E5500
126	bne	3f
127
128	lis	%r4, HID0_E5500_DEFAULT_SET@h
129	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
130
1313:
132	mtspr	SPR_HID0, %r4
133	isync
134
135/*
136 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
137 * this core.
138 */
139	cmpli	0, 0, %r3, FSL_E500mc
140	beq	1f
141	cmpli	0, 0, %r3, FSL_E5500
142	beq	1f
143
144	lis	%r3, HID1_E500_DEFAULT_SET@h
145	ori	%r3, %r3, HID1_E500_DEFAULT_SET@l
146	mtspr	SPR_HID1, %r3
147	isync
1481:
149	/* Invalidate all entries in TLB0 */
150	li	%r3, 0
151	bl	tlb_inval_all
152
153	cmpwi	%r30, 0
154	beq	done_mapping
155
156/*
157 * Locate the TLB1 entry that maps this code
158 */
159	bl	1f
1601:	mflr	%r3
161	bl	tlb1_find_current	/* the entry found is returned in r29 */
162
163	bl	tlb1_inval_all_but_current
164
165/*
166 * Create temporary mapping in AS=1 and switch to it
167 */
168	bl	tlb1_temp_mapping_as1
169
170	mfmsr	%r3
171	ori	%r3, %r3, (PSL_IS | PSL_DS)
172	bl	2f
1732:	mflr	%r4
174	addi	%r4, %r4, 20
175	mtspr	SPR_SRR0, %r4
176	mtspr	SPR_SRR1, %r3
177	rfi				/* Switch context */
178
179/*
180 * Invalidate initial entry
181 */
182	mr	%r3, %r29
183	bl	tlb1_inval_entry
184
185/*
186 * Setup final mapping in TLB1[1] and switch to it
187 */
188	/* Final kernel mapping, map in 64 MB of RAM */
189	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
190	li	%r4, 0			/* Entry 0 */
191	rlwimi	%r3, %r4, 16, 10, 15
192	mtspr	SPR_MAS0, %r3
193	isync
194
195	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
196	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
197	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
198	isync
199
200	lis	%r3, KERNBASE@h
201	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
202#ifdef SMP
203	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
204#endif
205	mtspr	SPR_MAS2, %r3
206	isync
207
208	/* Discover phys load address */
209	bl	3f
2103:	mflr	%r4			/* Use current address */
211	rlwinm	%r4, %r4, 0, 0, 5	/* 64MB alignment mask */
212	ori	%r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
213	mtspr	SPR_MAS3, %r4		/* Set RPN and protection */
214	isync
215	bl	zero_mas7
216	bl	zero_mas8
217	tlbwe
218	isync
219	msync
220
221	/* Switch to the above TLB1[1] mapping */
222	bl	4f
2234:	mflr	%r4
224	rlwinm	%r4, %r4, 0, 8, 31	/* Current offset from kernel load address */
225	rlwinm	%r3, %r3, 0, 0, 19
226	add	%r4, %r4, %r3		/* Convert to kernel virtual address */
227	addi	%r4, %r4, 36
228	li	%r3, PSL_DE		/* Note AS=0 */
229	mtspr   SPR_SRR0, %r4
230	mtspr   SPR_SRR1, %r3
231	rfi
232
233/*
234 * Invalidate temp mapping
235 */
236	mr	%r3, %r28
237	bl	tlb1_inval_entry
238
239done_mapping:
240
241/*
242 * Setup a temporary stack
243 */
244	bl	1f
245	.long tmpstack-.
2461:	mflr	%r1
247	lwz	%r2,0(%r1)
248	add	%r1,%r1,%r2
249	addi	%r1, %r1, (TMPSTACKSZ - 16)
250
251/*
252 * Relocate kernel
253 */
254	bl      1f
255	.long   _DYNAMIC-.
256	.long   _GLOBAL_OFFSET_TABLE_-.
2571:	mflr    %r5
258	lwz	%r3,0(%r5)	/* _DYNAMIC in %r3 */
259	add	%r3,%r3,%r5
260	lwz	%r4,4(%r5)	/* GOT pointer */
261	add	%r4,%r4,%r5
262	lwz	%r4,4(%r4)	/* got[0] is _DYNAMIC link addr */
263	subf	%r4,%r4,%r3	/* subtract to calculate relocbase */
264	bl	elf_reloc_self
265
266/*
267 * Initialise exception vector offsets
268 */
269	bl	ivor_setup
270
271/*
272 * Set up arguments and jump to system initialization code
273 */
274	mr	%r3, %r30
275	mr	%r4, %r31
276
277	/* Prepare core */
278	bl	booke_init
279
280	/* Switch to thread0.td_kstack now */
281	mr	%r1, %r3
282	li	%r3, 0
283	stw	%r3, 0(%r1)
284
285	/* Machine independet part, does not return */
286	bl	mi_startup
287	/* NOT REACHED */
2885:	b	5b
289
290
291#ifdef SMP
292/************************************************************************/
293/* AP Boot page */
294/************************************************************************/
295	.text
296	.globl	__boot_page
297	.align	12
298__boot_page:
299	bl	1f
300
301	.globl	bp_trace
302bp_trace:
303	.long	0
304
305	.globl	bp_kernload
306bp_kernload:
307	.long	0
308
309/*
310 * Initial configuration
311 */
3121:
313	mflr    %r31		/* r31 hold the address of bp_trace */
314
315	/* Set HIDs */
316	mfpvr	%r3
317	rlwinm	%r3, %r3, 16, 16, 31
318
319	/* HID0 for E500 is default */
320	lis	%r4, HID0_E500_DEFAULT_SET@h
321	ori	%r4, %r4, HID0_E500_DEFAULT_SET@l
322
323	cmpli	0, 0, %r3, FSL_E500mc
324	bne	2f
325	lis	%r4, HID0_E500MC_DEFAULT_SET@h
326	ori	%r4, %r4, HID0_E500MC_DEFAULT_SET@l
327	b	3f
3282:
329	cmpli	0, 0, %r3, FSL_E5500
330	bne	3f
331	lis	%r4, HID0_E5500_DEFAULT_SET@h
332	ori	%r4, %r4, HID0_E5500_DEFAULT_SET@l
3333:
334	mtspr	SPR_HID0, %r4
335	isync
336
337	/* Enable branch prediction */
338	li	%r3, BUCSR_BPEN
339	mtspr	SPR_BUCSR, %r3
340	isync
341
342	/* Invalidate all entries in TLB0 */
343	li	%r3, 0
344	bl	tlb_inval_all
345
346/*
347 * Find TLB1 entry which is translating us now
348 */
349	bl	2f
3502:	mflr	%r3
351	bl	tlb1_find_current	/* the entry number found is in r29 */
352
353	bl	tlb1_inval_all_but_current
354
355/*
356 * Create temporary translation in AS=1 and switch to it
357 */
358
359	bl	tlb1_temp_mapping_as1
360
361	mfmsr	%r3
362	ori	%r3, %r3, (PSL_IS | PSL_DS)
363	bl	3f
3643:	mflr	%r4
365	addi	%r4, %r4, 20
366	mtspr	SPR_SRR0, %r4
367	mtspr	SPR_SRR1, %r3
368	rfi				/* Switch context */
369
370/*
371 * Invalidate initial entry
372 */
373	mr	%r3, %r29
374	bl	tlb1_inval_entry
375
376/*
377 * Setup final mapping in TLB1[1] and switch to it
378 */
379	/* Final kernel mapping, map in 64 MB of RAM */
380	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
381	li	%r4, 0			/* Entry 0 */
382	rlwimi	%r3, %r4, 16, 4, 15
383	mtspr	SPR_MAS0, %r3
384	isync
385
386	li	%r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
387	oris	%r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
388	mtspr	SPR_MAS1, %r3		/* note TS was not filled, so it's TS=0 */
389	isync
390
391	lis	%r3, KERNBASE@h
392	ori	%r3, %r3, KERNBASE@l	/* EPN = KERNBASE */
393	ori	%r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
394	mtspr	SPR_MAS2, %r3
395	isync
396
397	/* Retrieve kernel load [physical] address from bp_kernload */
398	bl	4f
399	.long	bp_kernload
400	.long	__boot_page
4014:	mflr	%r3
402	lwz	%r4, 0(%r3)
403	lwz	%r5, 4(%r3)
404	rlwinm	%r3, %r3, 0, 0, 19
405	sub	%r4, %r4, %r5	/* offset of bp_kernload within __boot_page */
406	lwzx	%r3, %r4, %r3
407
408	/* Set RPN and protection */
409	ori	%r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
410	mtspr	SPR_MAS3, %r3
411	isync
412	bl	zero_mas7
413	bl	zero_mas8
414	tlbwe
415	isync
416	msync
417
418	/* Switch to the final mapping */
419	bl	5f
4205:	mflr	%r3
421	rlwinm	%r3, %r3, 0, 0xfff	/* Offset from boot page start */
422	add	%r3, %r3, %r5		/* Make this virtual address */
423	addi	%r3, %r3, 32
424	li	%r4, 0			/* Note AS=0 */
425	mtspr	SPR_SRR0, %r3
426	mtspr	SPR_SRR1, %r4
427	rfi
428
429/*
430 * At this point we're running at virtual addresses KERNBASE and beyond so
431 * it's allowed to directly access all locations the kernel was linked
432 * against.
433 */
434
435/*
436 * Invalidate temp mapping
437 */
438	mr	%r3, %r28
439	bl	tlb1_inval_entry
440
441/*
442 * Setup a temporary stack
443 */
444	bl	1f
445	.long tmpstack-.
4461:	mflr	%r1
447	lwz	%r2,0(%r1)
448	add	%r1,%r1,%r2
449	stw	%r1, 0(%r1)
450	addi	%r1, %r1, (TMPSTACKSZ - 16)
451
452/*
453 * Initialise exception vector offsets
454 */
455	bl	ivor_setup
456
457	/*
458	 * Assign our pcpu instance
459	 */
460	bl	1f
461	.long ap_pcpu-.
4621:	mflr	%r4
463	lwz	%r3, 0(%r4)
464	add	%r3, %r3, %r4
465	lwz	%r3, 0(%r3)
466	mtsprg0	%r3
467
468	bl	pmap_bootstrap_ap
469
470	bl	cpudep_ap_bootstrap
471	/* Switch to the idle thread's kstack */
472	mr	%r1, %r3
473
474	bl	machdep_ap_bootstrap
475
476	/* NOT REACHED */
4776:	b	6b
478#endif /* SMP */
479
480#if defined (BOOKE_E500)
481/*
482 * Invalidate all entries in the given TLB.
483 *
484 * r3	TLBSEL
485 */
486tlb_inval_all:
487	rlwinm	%r3, %r3, 3, (1 << 3)	/* TLBSEL */
488	ori	%r3, %r3, (1 << 2)	/* INVALL */
489	tlbivax	0, %r3
490	isync
491	msync
492
493	tlbsync
494	msync
495	blr
496
497/*
498 * expects address to look up in r3, returns entry number in r29
499 *
500 * FIXME: the hidden assumption is we are now running in AS=0, but we should
501 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
502 */
503tlb1_find_current:
504	mfspr	%r17, SPR_PID0
505	slwi	%r17, %r17, MAS6_SPID0_SHIFT
506	mtspr	SPR_MAS6, %r17
507	isync
508	tlbsx	0, %r3
509	mfspr	%r17, SPR_MAS0
510	rlwinm	%r29, %r17, 16, 26, 31		/* MAS0[ESEL] -> r29 */
511
512	/* Make sure we have IPROT set on the entry */
513	mfspr	%r17, SPR_MAS1
514	oris	%r17, %r17, MAS1_IPROT@h
515	mtspr	SPR_MAS1, %r17
516	isync
517	tlbwe
518	isync
519	msync
520	blr
521
522/*
523 * Invalidates a single entry in TLB1.
524 *
525 * r3		ESEL
526 * r4-r5	scratched
527 */
528tlb1_inval_entry:
529	lis	%r4, MAS0_TLBSEL1@h	/* Select TLB1 */
530	rlwimi	%r4, %r3, 16, 10, 15	/* Select our entry */
531	mtspr	SPR_MAS0, %r4
532	isync
533	tlbre
534	li	%r5, 0			/* MAS1[V] = 0 */
535	mtspr	SPR_MAS1, %r5
536	isync
537	tlbwe
538	isync
539	msync
540	blr
541
542/*
543 * r29		current entry number
544 * r28		returned temp entry
545 * r3-r5	scratched
546 */
547tlb1_temp_mapping_as1:
548	/* Read our current translation */
549	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
550	rlwimi	%r3, %r29, 16, 10, 15	/* Select our current entry */
551	mtspr	SPR_MAS0, %r3
552	isync
553	tlbre
554
555	/*
556	 * Prepare and write temp entry
557	 *
558	 * FIXME this is not robust against overflow i.e. when the current
559	 * entry is the last in TLB1
560	 */
561	lis	%r3, MAS0_TLBSEL1@h	/* Select TLB1 */
562	addi	%r28, %r29, 1		/* Use next entry. */
563	rlwimi	%r3, %r28, 16, 10, 15	/* Select temp entry */
564	mtspr	SPR_MAS0, %r3
565	isync
566	mfspr	%r5, SPR_MAS1
567	li	%r4, 1			/* AS=1 */
568	rlwimi	%r5, %r4, 12, 19, 19
569	li	%r4, 0			/* Global mapping, TID=0 */
570	rlwimi	%r5, %r4, 16, 8, 15
571	oris	%r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
572	mtspr	SPR_MAS1, %r5
573	isync
574	mflr	%r3
575	bl	zero_mas7
576	bl	zero_mas8
577	mtlr	%r3
578	tlbwe
579	isync
580	msync
581	blr
582
583/*
584 * Loops over TLB1, invalidates all entries skipping the one which currently
585 * maps this code.
586 *
587 * r29		current entry
588 * r3-r5	scratched
589 */
590tlb1_inval_all_but_current:
591	mr	%r6, %r3
592	mfspr	%r3, SPR_TLB1CFG	/* Get number of entries */
593	andi.	%r3, %r3, TLBCFG_NENTRY_MASK@l
594	li	%r4, 0			/* Start from Entry 0 */
5951:	lis	%r5, MAS0_TLBSEL1@h
596	rlwimi	%r5, %r4, 16, 10, 15
597	mtspr	SPR_MAS0, %r5
598	isync
599	tlbre
600	mfspr	%r5, SPR_MAS1
601	cmpw	%r4, %r29		/* our current entry? */
602	beq	2f
603	rlwinm	%r5, %r5, 0, 2, 31	/* clear VALID and IPROT bits */
604	mtspr	SPR_MAS1, %r5
605	isync
606	tlbwe
607	isync
608	msync
6092:	addi	%r4, %r4, 1
610	cmpw	%r4, %r3		/* Check if this is the last entry */
611	bne	1b
612	blr
613
614/*
615 * MAS7 and MAS8 conditional zeroing.
616 */
617.globl zero_mas7
618zero_mas7:
619	mfpvr	%r20
620	rlwinm	%r20, %r20, 16, 16, 31
621	cmpli	0, 0, %r20, FSL_E500v1
622	beq	1f
623
624	li	%r20, 0
625	mtspr	SPR_MAS7, %r20
626	isync
6271:
628	blr
629
630.globl zero_mas8
631zero_mas8:
632	mfpvr	%r20
633	rlwinm	%r20, %r20, 16, 16, 31
634	cmpli	0, 0, %r20, FSL_E500mc
635	beq	1f
636	cmpli	0, 0, %r20, FSL_E5500
637	beq	1f
638
639	blr
6401:
641	li	%r20, 0
642	mtspr	SPR_MAS8, %r20
643	isync
644	blr
645#endif
646
647#ifdef SMP
648.globl __boot_tlb1
649	/*
650	 * The __boot_tlb1 table is used to hold BSP TLB1 entries
651	 * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
652	 * The BSP fills in the table in tlb_ap_prep() function. Next,
653	 * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
654	 */
655__boot_tlb1:
656	.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
657
658__boot_page_padding:
659	/*
660	 * Boot page needs to be exactly 4K, with the last word of this page
661	 * acting as the reset vector, so we need to stuff the remainder.
662	 * Upon release from holdoff CPU fetches the last word of the boot
663	 * page.
664	 */
665	.space	4092 - (__boot_page_padding - __boot_page)
666	b	__boot_page
667#endif /* SMP */
668
669/************************************************************************/
670/* locore subroutines */
671/************************************************************************/
672
673/*
674 * Cache disable/enable/inval sequences according
675 * to section 2.16 of E500CORE RM.
676 */
677ENTRY(dcache_inval)
678	/* Invalidate d-cache */
679	mfspr	%r3, SPR_L1CSR0
680	ori	%r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
681	msync
682	isync
683	mtspr	SPR_L1CSR0, %r3
684	isync
6851:	mfspr	%r3, SPR_L1CSR0
686	andi.	%r3, %r3, L1CSR0_DCFI
687	bne	1b
688	blr
689
690ENTRY(dcache_disable)
691	/* Disable d-cache */
692	mfspr	%r3, SPR_L1CSR0
693	li	%r4, L1CSR0_DCE@l
694	not	%r4, %r4
695	and	%r3, %r3, %r4
696	msync
697	isync
698	mtspr	SPR_L1CSR0, %r3
699	isync
700	blr
701
702ENTRY(dcache_enable)
703	/* Enable d-cache */
704	mfspr	%r3, SPR_L1CSR0
705	oris	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
706	ori	%r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
707	msync
708	isync
709	mtspr	SPR_L1CSR0, %r3
710	isync
711	blr
712
713ENTRY(icache_inval)
714	/* Invalidate i-cache */
715	mfspr	%r3, SPR_L1CSR1
716	ori	%r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
717	isync
718	mtspr	SPR_L1CSR1, %r3
719	isync
7201:	mfspr	%r3, SPR_L1CSR1
721	andi.	%r3, %r3, L1CSR1_ICFI
722	bne	1b
723	blr
724
725ENTRY(icache_disable)
726	/* Disable i-cache */
727	mfspr	%r3, SPR_L1CSR1
728	li	%r4, L1CSR1_ICE@l
729	not	%r4, %r4
730	and	%r3, %r3, %r4
731	isync
732	mtspr	SPR_L1CSR1, %r3
733	isync
734	blr
735
736ENTRY(icache_enable)
737	/* Enable i-cache */
738	mfspr	%r3, SPR_L1CSR1
739	oris	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
740	ori	%r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
741	isync
742	mtspr	SPR_L1CSR1, %r3
743	isync
744	blr
745
746/*
747 * L2 cache disable/enable/inval sequences for E500mc.
748 */
749
750ENTRY(l2cache_inval)
751	mfspr	%r3, SPR_L2CSR0
752	oris	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
753	ori	%r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
754	isync
755	mtspr	SPR_L2CSR0, %r3
756	isync
7571:	mfspr   %r3, SPR_L2CSR0
758	andis.	%r3, %r3, L2CSR0_L2FI@h
759	bne	1b
760	blr
761
762ENTRY(l2cache_enable)
763	mfspr	%r3, SPR_L2CSR0
764	oris	%r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
765	isync
766	mtspr	SPR_L2CSR0, %r3
767	isync
768	blr
769
770/*
771 * Branch predictor setup.
772 */
773ENTRY(bpred_enable)
774	mfspr	%r3, SPR_BUCSR
775	ori	%r3, %r3, BUCSR_BBFI
776	isync
777	mtspr	SPR_BUCSR, %r3
778	isync
779	ori	%r3, %r3, BUCSR_BPEN
780	isync
781	mtspr	SPR_BUCSR, %r3
782	isync
783	blr
784
785ENTRY(dataloss_erratum_access)
786	/* Lock two cache lines into I-Cache */
787	sync
788	mfspr	%r11, SPR_L1CSR1
789	rlwinm	%r11, %r11, 0, ~L1CSR1_ICUL
790	sync
791	isync
792	mtspr	SPR_L1CSR1, %r11
793	isync
794
795	lis	%r8, 2f@h
796	ori	%r8, %r8, 2f@l
797	icbtls	0, 0, %r8
798	addi	%r9, %r8, 64
799
800	sync
801	mfspr	%r11, SPR_L1CSR1
8023:	andi.	%r11, %r11, L1CSR1_ICUL
803	bne	3b
804
805	icbtls	0, 0, %r9
806
807	sync
808	mfspr	%r11, SPR_L1CSR1
8093:	andi.	%r11, %r11, L1CSR1_ICUL
810	bne	3b
811
812	b	2f
813	.align	6
814	/* Inside a locked cacheline, wait a while, write, then wait a while */
8152:	sync
816
817	mfspr	%r5, TBR_TBL
8184:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
819	mfspr	%r5, TBR_TBL
820	subf.	%r5, %r5, %r11
821	bgt	4b
822
823	stw	%r4, 0(%r3)
824
825	mfspr	%r5, TBR_TBL
8264:	addis	%r11, %r5, 0x100000@h	/* wait around one million timebase ticks */
827	mfspr	%r5, TBR_TBL
828	subf.	%r5, %r5, %r11
829	bgt	4b
830
831	sync
832
833	/*
834	 * Fill out the rest of this cache line and the next with nops,
835	 * to ensure that nothing outside the locked area will be
836	 * fetched due to a branch.
837	 */
838	.rept 19
839	nop
840	.endr
841
842	icblc	0, 0, %r8
843	icblc	0, 0, %r9
844
845	blr
846
847/************************************************************************/
848/* Data section								*/
849/************************************************************************/
850	.data
851	.align 3
852GLOBAL(__startkernel)
853	.long   begin
854GLOBAL(__endkernel)
855	.long   end
856	.align	4
857tmpstack:
858	.space	TMPSTACKSZ
859tmpstackbound:
860	.space 10240	/* XXX: this really should not be necessary */
861
862/*
863 * Compiled KERNBASE locations
864 */
865	.globl	kernbase
866	.set	kernbase, KERNBASE
867
868#include <powerpc/booke/trap_subr.S>
869