1
2#ifndef _MIPS_R4KCACHE_H
3#define _MIPS_R4KCACHE_H
4
5#include <asm/asm.h>
6#include <asm/cacheops.h>
7
8static inline void flush_icache_line_indexed(unsigned long addr)
9{
10	unsigned long waystep = icache_size/mips_cpu.icache.ways;
11	unsigned int way;
12
13	for (way = 0; way < mips_cpu.icache.ways; way++)
14	{
15		__asm__ __volatile__(
16			".set noreorder\n\t"
17			".set mips3\n\t"
18			"cache %1, (%0)\n\t"
19			".set mips0\n\t"
20			".set reorder"
21			:
22			: "r" (addr),
23			"i" (Index_Invalidate_I));
24
25		addr += waystep;
26	}
27}
28
29static inline void flush_dcache_line_indexed(unsigned long addr)
30{
31	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
32	unsigned int way;
33
34	for (way = 0; way < mips_cpu.dcache.ways; way++)
35	{
36		__asm__ __volatile__(
37			".set noreorder\n\t"
38			".set mips3\n\t"
39			"cache %1, (%0)\n\t"
40			".set mips0\n\t"
41			".set reorder"
42			:
43			: "r" (addr),
44			"i" (Index_Writeback_Inv_D));
45
46		addr += waystep;
47	}
48}
49
50static inline void flush_scache_line_indexed(unsigned long addr)
51{
52	unsigned long waystep = scache_size/mips_cpu.scache.ways;
53	unsigned int way;
54
55	for (way = 0; way < mips_cpu.scache.ways; way++)
56	{
57		__asm__ __volatile__(
58			".set noreorder\n\t"
59			".set mips3\n\t"
60			"cache %1, (%0)\n\t"
61			".set mips0\n\t"
62			".set reorder"
63			:
64			: "r" (addr),
65			"i" (Index_Writeback_Inv_SD));
66
67		addr += waystep;
68	}
69}
70
71static inline void flush_icache_line(unsigned long addr)
72{
73
74	__asm__ __volatile__(
75		".set noreorder\n\t"
76		".set mips3\n\t"
77		"cache %1, (%0)\n\t"
78		".set mips0\n\t"
79		".set reorder"
80		:
81		: "r" (addr),
82		  "i" (Hit_Invalidate_I));
83}
84
85static inline void flush_dcache_line(unsigned long addr)
86{
87	__asm__ __volatile__(
88		".set noreorder\n\t"
89		".set mips3\n\t"
90		"cache %1, (%0)\n\t"
91		".set mips0\n\t"
92		".set reorder"
93		:
94		: "r" (addr),
95		  "i" (Hit_Writeback_Inv_D));
96}
97
98static inline void invalidate_dcache_line(unsigned long addr)
99{
100	__asm__ __volatile__(
101		".set noreorder\n\t"
102		".set mips3\n\t"
103		"cache %1, (%0)\n\t"
104		".set mips0\n\t"
105		".set reorder"
106		:
107		: "r" (addr),
108		  "i" (Hit_Invalidate_D));
109}
110
111static inline void invalidate_scache_line(unsigned long addr)
112{
113	__asm__ __volatile__(
114		".set noreorder\n\t"
115		".set mips3\n\t"
116		"cache %1, (%0)\n\t"
117		".set mips0\n\t"
118		".set reorder"
119		:
120		: "r" (addr),
121		  "i" (Hit_Invalidate_SD));
122}
123
124static inline void flush_scache_line(unsigned long addr)
125{
126	__asm__ __volatile__(
127		".set noreorder\n\t"
128		".set mips3\n\t"
129		"cache %1, (%0)\n\t"
130		".set mips0\n\t"
131		".set reorder"
132		:
133		: "r" (addr),
134		  "i" (Hit_Writeback_Inv_SD));
135}
136
137/*
138 * The next two are for badland addresses like signal trampolines.
139 */
140static inline void protected_flush_icache_line(unsigned long addr)
141{
142	__asm__ __volatile__(
143		".set noreorder\n\t"
144		".set mips3\n"
145		"1:\tcache %1,(%0)\n"
146		"2:\t.set mips0\n\t"
147		".set reorder\n\t"
148		".section\t__ex_table,\"a\"\n\t"
149		STR(PTR)"\t1b,2b\n\t"
150		".previous"
151		:
152		: "r" (addr),
153		  "i" (Hit_Invalidate_I));
154}
155
156static inline void protected_writeback_dcache_line(unsigned long addr)
157{
158	__asm__ __volatile__(
159		".set noreorder\n\t"
160		".set mips3\n"
161		"1:\tcache %1,(%0)\n"
162		"2:\t.set mips0\n\t"
163		".set reorder\n\t"
164		".section\t__ex_table,\"a\"\n\t"
165		STR(PTR)"\t1b,2b\n\t"
166		".previous"
167		:
168		: "r" (addr),
169		  "i" (Hit_Writeback_D));
170}
171
172#define cache_unroll(base,op)	        	\
173	__asm__ __volatile__("	         	\
174		.set noreorder;		        \
175		.set mips3;		        \
176                cache %1, (%0);	                \
177		.set mips0;			\
178		.set reorder"			\
179		:				\
180		: "r" (base),			\
181		  "i" (op));
182
183
184static inline void blast_dcache(void)
185{
186	unsigned long start = KSEG0;
187	unsigned long end = (start + dcache_size);
188
189	while(start < end) {
190		cache_unroll(start,Index_Writeback_Inv_D);
191		start += dc_lsize;
192	}
193}
194
195static inline void blast_dcache_page(unsigned long page)
196{
197	unsigned long start = page;
198	unsigned long end = (start + PAGE_SIZE);
199
200	while(start < end) {
201		cache_unroll(start,Hit_Writeback_Inv_D);
202		start += dc_lsize;
203	}
204}
205
206static inline void blast_dcache_page_indexed(unsigned long page)
207{
208	unsigned long start;
209	unsigned long end = (page + PAGE_SIZE);
210	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
211	unsigned int way;
212
213	for (way = 0; way < mips_cpu.dcache.ways; way++) {
214		start = page + way*waystep;
215		while(start < end) {
216			cache_unroll(start,Index_Writeback_Inv_D);
217			start += dc_lsize;
218		}
219	}
220}
221
222static inline void blast_icache(void)
223{
224	unsigned long start = KSEG0;
225	unsigned long end = (start + icache_size);
226
227	while(start < end) {
228		cache_unroll(start,Index_Invalidate_I);
229		start += ic_lsize;
230	}
231}
232
233static inline void blast_icache_page(unsigned long page)
234{
235	unsigned long start = page;
236	unsigned long end = (start + PAGE_SIZE);
237
238	while(start < end) {
239		cache_unroll(start,Hit_Invalidate_I);
240		start += ic_lsize;
241	}
242}
243
244static inline void blast_icache_page_indexed(unsigned long page)
245{
246	unsigned long start;
247	unsigned long end = (page + PAGE_SIZE);
248	unsigned long waystep = icache_size/mips_cpu.icache.ways;
249	unsigned int way;
250
251	for (way = 0; way < mips_cpu.icache.ways; way++) {
252		start = page + way*waystep;
253		while(start < end) {
254			cache_unroll(start,Index_Invalidate_I);
255			start += ic_lsize;
256		}
257	}
258}
259
260static inline void blast_scache(void)
261{
262	unsigned long start = KSEG0;
263	unsigned long end = KSEG0 + scache_size;
264
265	while(start < end) {
266		cache_unroll(start,Index_Writeback_Inv_SD);
267		start += sc_lsize;
268	}
269}
270
271static inline void blast_scache_page(unsigned long page)
272{
273	unsigned long start = page;
274	unsigned long end = page + PAGE_SIZE;
275
276	while(start < end) {
277		cache_unroll(start,Hit_Writeback_Inv_SD);
278		start += sc_lsize;
279	}
280}
281
282static inline void blast_scache_page_indexed(unsigned long page)
283{
284	unsigned long start;
285	unsigned long end = (page + PAGE_SIZE);
286	unsigned long waystep = scache_size/mips_cpu.scache.ways;
287	unsigned int way;
288
289	for (way = 0; way < mips_cpu.scache.ways; way++) {
290		start = page + way*waystep;
291		while(start < end) {
292			cache_unroll(start,Index_Writeback_Inv_SD);
293			start += sc_lsize;
294		}
295	}
296}
297
298extern inline void fill_icache_line(unsigned long addr)
299{
300	__asm__ __volatile__(
301		".set noreorder\n\t"
302		".set mips3\n\t"
303		"cache %1, (%0)\n\t"
304		".set mips0\n\t"
305		".set reorder"
306		:
307		: "r" (addr),
308		"i" (Fill));
309}
310
311#endif /* !(_MIPS_R4KCACHE_H) */
312