1/*
2 * arch/ppc/boot/common/util.S
3 *
4 * Useful bootup functions, which are more easily done in asm than C.
5 *
6 * NOTE:  Be very very careful about the registers you use here.
7 *	We don't follow any ABI calling convention among the
8 *	assembler functions that call each other, especially early
9 *	in the initialization.  Please preserve at least r3 and r4
10 *	for these early functions, as they often contain information
11 *	passed from boot roms into the C decompress function.
12 *
13 * Author: Tom Rini
14 *	   trini@mvista.com
15 * Derived from arch/ppc/boot/prep/head.S (Cort Dougan, many others).
16 *
17 * Copyright 2001 MontaVista Software Inc.
18 *
19 * This program is free software; you can redistribute  it and/or modify it
20 * under  the terms of  the GNU General  Public License as published by the
21 * Free Software Foundation;  either version 2 of the  License, or (at your
22 * option) any later version.
23 */
24
25#include <asm/processor.h>
26#include <asm/cache.h>
27#include <asm/ppc_asm.h>
28
29
30	.text
31
32	.globl	disable_6xx_mmu
33disable_6xx_mmu:
34	/* Establish default MSR value, exception prefix 0xFFF.
35	 * If necessary, this function must fix up the LR if we
36	 * return to a different address space once the MMU is
37	 * disabled.
38	 */
39	li	r8,MSR_IP|MSR_FP
40	mtmsr	r8
41
42	/* Clear BATs */
43	li	r8,0
44	mtspr	DBAT0U,r8
45	mtspr	DBAT0L,r8
46	mtspr	DBAT1U,r8
47	mtspr	DBAT1L,r8
48	mtspr	DBAT2U,r8
49	mtspr	DBAT2L,r8
50	mtspr	DBAT3U,r8
51	mtspr	DBAT3L,r8
52	mtspr	IBAT0U,r8
53	mtspr	IBAT0L,r8
54	mtspr	IBAT1U,r8
55	mtspr	IBAT1L,r8
56	mtspr	IBAT2U,r8
57	mtspr	IBAT2L,r8
58	mtspr	IBAT3U,r8
59	mtspr	IBAT3L,r8
60	isync
61	sync
62	sync
63
64	/* Set segment registers */
65	li	r8,16		/* load up segment register values */
66	mtctr	r8		/* for context 0 */
67	lis	r8,0x2000	/* Ku = 1, VSID = 0 */
68	li	r10,0
693:	mtsrin	r8,r10
70	addi	r8,r8,0x111	/* increment VSID */
71	addis	r10,r10,0x1000	/* address of next segment */
72	bdnz	3b
73
74	.globl	disable_6xx_l1cache
75disable_6xx_l1cache:
76	/* Enable, invalidate and then disable the L1 icache/dcache. */
77	li	r8,0
78	ori	r8,r8,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
79	mfspr	r11,HID0
80	or	r11,r11,r8
81	andc	r10,r11,r8
82	isync
83	mtspr	HID0,r8
84	sync
85	isync
86	mtspr	HID0,r10
87	sync
88	isync
89	blr
90
91	.globl	_setup_L2CR
92_setup_L2CR:
93/*
94 * We should be skipping this section on CPUs where this results in an
95 * illegal instruction.  If not, please send trini@kernel.crashing.org
96 * the PVR of your CPU.
97 */
98	/* Invalidate/disable L2 cache */
99	sync
100	isync
101	mfspr	r8,L2CR
102	rlwinm	r8,r8,0,1,31
103	oris	r8,r8,0x0020
104	sync
105	isync
106	mtspr	L2CR,r8
107	sync
108	isync
109
110	/* Wait for the invalidation to complete */
1111:	mfspr	r8,L2CR
112	rlwinm.	r9,r8,0,31,31
113	bne	1b
114
115	rlwinm	r8,r8,0,11,9		/* Turn off L2I bit */
116	sync
117	isync
118	mtspr	L2CR,r8
119	sync
120	isync
121	blr
122
123
124/*
125 * Delay for a number of microseconds
126 * -- Use the BUS timer (assumes 66MHz)
127 */
128	.globl	udelay
129udelay:
130	mfspr	r4,PVR
131	srwi	r4,r4,16
132	cmpi	0,r4,1		/* 601 ? */
133	bne	.udelay_not_601
13400:	li	r0,86	/* Instructions / microsecond? */
135	mtctr	r0
13610:	addi	r0,r0,0 /* NOP */
137	bdnz	10b
138	subic.	r3,r3,1
139	bne	00b
140	blr
141
142.udelay_not_601:
143	mulli	r4,r3,1000	/* nanoseconds */
144	addi	r4,r4,59
145	li	r5,60
146	divw	r4,r4,r5	/* BUS ticks */
1471:	mftbu	r5
148	mftb	r6
149	mftbu	r7
150	cmp	0,r5,r7
151	bne	1b		/* Get [synced] base time */
152	addc	r9,r6,r4	/* Compute end time */
153	addze	r8,r5
1542:	mftbu	r5
155	cmp	0,r5,r8
156	blt	2b
157	bgt	3f
158	mftb	r6
159	cmp	0,r6,r9
160	blt	2b
1613:	blr
162
163.globl _put_MSR
164_put_MSR:
165	mtmsr	r3
166	blr
167
168	.section ".relocate_code","xa"
169/*
170 * Flush and enable instruction cache
171 * First, flush the data cache in case it was enabled and may be
172 * holding instructions for copy back.
173 */
174_GLOBAL(flush_instruction_cache)
175	mflr	r6
176	bl	flush_data_cache
177
178#ifdef CONFIG_8xx
179	lis	r3, IDC_INVALL@h
180	mtspr	IC_CST, r3
181	lis	r3, IDC_ENABLE@h
182	mtspr	IC_CST, r3
183	lis	r3, IDC_DISABLE@h
184	mtspr	DC_CST, r3
185#elif CONFIG_4xx
186	lis	r3,start@h		# r9 = &_start
187	lis	r4,_etext@ha
188	addi	r4,r4,_etext@l		# r8 = &_etext
1891:	dcbf	r0,r3			# Flush the data cache
190	icbi	r0,r3			# Invalidate the instruction cache
191	addi	r3,r3,0x10		# Increment by one cache line
192	cmplwi	cr0,r3,r4		# Are we at the end yet?
193	blt	1b			# No, keep flushing and invalidating
194#else
195	/* Enable, invalidate and then disable the L1 icache/dcache. */
196	li	r3,0
197	ori	r3,r3,(HID0_ICE|HID0_DCE|HID0_ICFI|HID0_DCI)
198	mfspr	r4,HID0
199	or	r5,r4,r3
200	isync
201	mtspr	HID0,r5
202	sync
203	isync
204	ori	r5,r4,HID0_ICE	/* Enable cache */
205	mtspr	HID0,r5
206	sync
207	isync
208#endif
209	mtlr	r6
210	blr
211
212#define NUM_CACHE_LINES 128*8
213#define cache_flush_buffer 0x1000
214
215/*
216 * Flush data cache
217 * Do this by just reading lots of stuff into the cache.
218 */
219_GLOBAL(flush_data_cache)
220	lis	r3,cache_flush_buffer@h
221	ori	r3,r3,cache_flush_buffer@l
222	li	r4,NUM_CACHE_LINES
223	mtctr	r4
22400:	lwz	r4,0(r3)
225	addi	r3,r3,L1_CACHE_BYTES	/* Next line, please */
226	bdnz	00b
22710:	blr
228
229	.previous
230
231