1
2#ifndef _ASM_R10KCACHE_H
3#define _ASM_R10KCACHE_H
4
5#include <asm/asm.h>
6#include <asm/r10kcacheops.h>
7
8/* These are fixed for the current R10000.  */
9#define icache_size	0x8000
10#define dcache_size	0x8000
11#define icache_way_size	0x4000
12#define dcache_way_size	0x4000
13#define ic_lsize	64
14#define dc_lsize	32
15
16/* These are configuration dependant.  */
17#define scache_size()	({						\
18	unsigned long __res;						\
19	__res = (read_c0_config() >> 16) & 3;				\
20	__res = 1 << (__res + 19);					\
21	__res;								\
22})
23
24#define sc_lsize()	({						\
25	unsigned long __res;						\
26	__res = (read_c0_config() >> 13) & 1;				\
27	__res = 1 << (__res + 6);					\
28	__res;								\
29})
30
31static inline void flush_icache_line_indexed(unsigned long addr)
32{
33	__asm__ __volatile__(
34		".set noreorder\n\t"
35		"cache %1, (%0)\n\t"
36		".set reorder"
37		:
38		: "r" (addr), "i" (Index_Invalidate_I));
39}
40
41static inline void flush_dcache_line_indexed(unsigned long addr)
42{
43	__asm__ __volatile__(
44		".set noreorder\n\t"
45		"cache %1, (%0)\n\t"
46		".set reorder"
47		:
48		: "r" (addr), "i" (Index_Writeback_Inv_D));
49}
50
51static inline void flush_scache_line_indexed(unsigned long addr)
52{
53	__asm__ __volatile__(
54		".set noreorder\n\t"
55		"cache %1, (%0)\n\t"
56		".set reorder"
57		:
58		: "r" (addr), "i" (Index_Writeback_Inv_S));
59}
60
61static inline void flush_icache_line(unsigned long addr)
62{
63	__asm__ __volatile__(
64		".set noreorder\n\t"
65		"cache %1, (%0)\n\t"
66		".set reorder"
67		:
68		: "r" (addr), "i" (Hit_Invalidate_I));
69}
70
71static inline void flush_dcache_line(unsigned long addr)
72{
73	__asm__ __volatile__(
74		".set noreorder\n\t"
75		"cache %1, (%0)\n\t"
76		".set reorder"
77		:
78		: "r" (addr), "i" (Hit_Writeback_Inv_D));
79}
80
81static inline void invalidate_dcache_line(unsigned long addr)
82{
83	__asm__ __volatile__(
84		".set noreorder\n\t"
85		"cache %1, (%0)\n\t"
86		".set reorder"
87		:
88		: "r" (addr), "i" (Hit_Invalidate_D));
89}
90
91static inline void invalidate_scache_line(unsigned long addr)
92{
93	__asm__ __volatile__(
94		".set noreorder\n\t"
95		"cache %1, (%0)\n\t"
96		".set reorder"
97		:
98		: "r" (addr), "i" (Hit_Invalidate_S));
99}
100
101static inline void flush_scache_line(unsigned long addr)
102{
103	__asm__ __volatile__(
104		".set noreorder\n\t"
105		"cache %1, (%0)\n\t"
106		".set reorder"
107		:
108		: "r" (addr), "i" (Hit_Writeback_Inv_S));
109}
110
111/*
112 * The next two are for badland addresses like signal trampolines.
113 */
114static inline void protected_flush_icache_line(unsigned long addr)
115{
116	__asm__ __volatile__(
117		".set noreorder\n\t"
118		"1:\tcache %1,(%0)\n"
119		"2:\t.set reorder\n\t"
120		".section\t__ex_table,\"a\"\n\t"
121		".dword\t1b,2b\n\t"
122		".previous"
123		:
124		: "r" (addr), "i" (Hit_Invalidate_I));
125}
126
127static inline void protected_writeback_dcache_line(unsigned long addr)
128{
129	__asm__ __volatile__(
130		".set noreorder\n\t"
131		"1:\tcache %1,(%0)\n"
132		"2:\t.set reorder\n\t"
133		".section\t__ex_table,\"a\"\n\t"
134		".dword\t1b,2b\n\t"
135		".previous"
136		:
137		: "r" (addr), "i" (Hit_Writeback_Inv_D));
138}
139
140#define cache32_unroll16(base,op)				\
141	__asm__ __volatile__("					\
142		.set noreorder;					\
143		cache %1, 0x000(%0); cache %1, 0x020(%0);	\
144		cache %1, 0x040(%0); cache %1, 0x060(%0);	\
145		cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\
146		cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\
147		cache %1, 0x100(%0); cache %1, 0x120(%0);	\
148		cache %1, 0x140(%0); cache %1, 0x160(%0);	\
149		cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\
150		cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\
151		.set reorder"					\
152		:						\
153		: "r" (base),					\
154		  "i" (op));
155
156#define cache32_unroll32(base,op)				\
157	__asm__ __volatile__("					\
158		.set noreorder;					\
159		cache %1, 0x000(%0); cache %1, 0x020(%0);	\
160		cache %1, 0x040(%0); cache %1, 0x060(%0);	\
161		cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\
162		cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\
163		cache %1, 0x100(%0); cache %1, 0x120(%0);	\
164		cache %1, 0x140(%0); cache %1, 0x160(%0);	\
165		cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\
166		cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\
167		cache %1, 0x200(%0); cache %1, 0x220(%0);	\
168		cache %1, 0x240(%0); cache %1, 0x260(%0);	\
169		cache %1, 0x280(%0); cache %1, 0x2a0(%0);	\
170		cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);	\
171		cache %1, 0x300(%0); cache %1, 0x320(%0);	\
172		cache %1, 0x340(%0); cache %1, 0x360(%0);	\
173		cache %1, 0x380(%0); cache %1, 0x3a0(%0);	\
174		cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);	\
175		.set reorder"					\
176		:						\
177		: "r" (base),					\
178		  "i" (op));
179
180static inline void blast_dcache32(void)
181{
182	unsigned long way0 = KSEG0;
183	unsigned long way1 = way0 ^ 1;
184	unsigned long end = (way0 + dcache_way_size);
185
186	while (way0 < end) {
187		cache32_unroll16(way0, Index_Writeback_Inv_D);
188		cache32_unroll16(way1, Index_Writeback_Inv_D);
189		way0 += 0x200;
190		way1 += 0x200;
191	}
192}
193
194static inline void blast_dcache32_page(unsigned long page)
195{
196	unsigned long start = page;
197	unsigned long end = page + PAGE_SIZE;
198
199	while (start < end) {
200		cache32_unroll32(start, Hit_Writeback_Inv_D);
201		start += 0x400;
202	}
203}
204
205static inline void blast_dcache32_page_indexed(unsigned long page)
206{
207	unsigned long way0 = page;
208	unsigned long way1 = page ^ 1;
209	unsigned long end = page + PAGE_SIZE;
210
211	while (way0 < end) {
212		cache32_unroll16(way0, Index_Writeback_Inv_D);
213		cache32_unroll16(way1, Index_Writeback_Inv_D);
214		way0 += 0x200;
215		way1 += 0x200;
216	}
217}
218
219#define cache64_unroll16(base,op)				\
220	__asm__ __volatile__("					\
221		.set noreorder;					\
222		cache %1, 0x000(%0); cache %1, 0x040(%0);	\
223		cache %1, 0x080(%0); cache %1, 0x0c0(%0);	\
224		cache %1, 0x100(%0); cache %1, 0x140(%0);	\
225		cache %1, 0x180(%0); cache %1, 0x1c0(%0);	\
226		cache %1, 0x200(%0); cache %1, 0x240(%0);	\
227		cache %1, 0x280(%0); cache %1, 0x2c0(%0);	\
228		cache %1, 0x300(%0); cache %1, 0x340(%0);	\
229		cache %1, 0x380(%0); cache %1, 0x3c0(%0);	\
230		.set reorder"					\
231		:						\
232		: "r" (base),					\
233		  "i" (op));
234
235#define cache64_unroll32(base,op)				\
236	__asm__ __volatile__("					\
237		.set noreorder;					\
238		cache %1, 0x000(%0); cache %1, 0x040(%0);	\
239		cache %1, 0x080(%0); cache %1, 0x0c0(%0);	\
240		cache %1, 0x100(%0); cache %1, 0x140(%0);	\
241		cache %1, 0x180(%0); cache %1, 0x1c0(%0);	\
242		cache %1, 0x200(%0); cache %1, 0x240(%0);	\
243		cache %1, 0x280(%0); cache %1, 0x2c0(%0);	\
244		cache %1, 0x300(%0); cache %1, 0x340(%0);	\
245		cache %1, 0x380(%0); cache %1, 0x3c0(%0);	\
246		cache %1, 0x400(%0); cache %1, 0x440(%0);	\
247		cache %1, 0x480(%0); cache %1, 0x4c0(%0);	\
248		cache %1, 0x500(%0); cache %1, 0x540(%0);	\
249		cache %1, 0x580(%0); cache %1, 0x5c0(%0);	\
250		cache %1, 0x600(%0); cache %1, 0x640(%0);	\
251		cache %1, 0x680(%0); cache %1, 0x6c0(%0);	\
252		cache %1, 0x700(%0); cache %1, 0x740(%0);	\
253		cache %1, 0x780(%0); cache %1, 0x7c0(%0);	\
254		.set reorder"					\
255		:						\
256		: "r" (base),					\
257		  "i" (op));
258
259static inline void blast_icache64(void)
260{
261	unsigned long way0 = KSEG0;
262	unsigned long way1 = way0 ^ 1;
263	unsigned long end = way0 + icache_way_size;
264
265	while (way0 < end) {
266		cache64_unroll16(way0,Index_Invalidate_I);
267		cache64_unroll16(way1,Index_Invalidate_I);
268		way0 += 0x400;
269		way1 += 0x400;
270	}
271}
272
273static inline void blast_icache64_page(unsigned long page)
274{
275	unsigned long start = page;
276	unsigned long end = page + PAGE_SIZE;
277
278	while (start < end) {
279		cache64_unroll32(start,Hit_Invalidate_I);
280		start += 0x800;
281	}
282}
283
284static inline void blast_icache64_page_indexed(unsigned long page)
285{
286	unsigned long way0 = page;
287	unsigned long way1 = page ^ 1;
288	unsigned long end = page + PAGE_SIZE;
289
290	while (way0 < end) {
291		cache64_unroll16(way0,Index_Invalidate_I);
292		cache64_unroll16(way1,Index_Invalidate_I);
293		way0 += 0x400;
294		way1 += 0x400;
295	}
296}
297
298static inline void blast_scache64(void)
299{
300	unsigned long way0 = KSEG0;
301	unsigned long way1 = way0 ^ 1;
302	unsigned long end = KSEG0 + scache_size();
303
304	while (way0 < end) {
305		cache64_unroll16(way0,Index_Writeback_Inv_S);
306		cache64_unroll16(way1,Index_Writeback_Inv_S);
307		way0 += 0x400;
308		way1 += 0x400;
309	}
310}
311
312static inline void blast_scache64_page(unsigned long page)
313{
314	unsigned long start = page;
315	unsigned long end = page + PAGE_SIZE;
316
317	while (start < end) {
318		cache64_unroll32(start,Hit_Writeback_Inv_S);
319		start += 0x800;
320	}
321}
322
323static inline void blast_scache64_page_indexed(unsigned long page)
324{
325	unsigned long way0 = page;
326	unsigned long way1 = page ^ 1;
327	unsigned long end = page + PAGE_SIZE;
328
329	while (way0 < end) {
330		cache64_unroll16(way0,Index_Writeback_Inv_S);
331		cache64_unroll16(way1,Index_Writeback_Inv_S);
332		way0 += 0x400;
333		way1 += 0x400;
334	}
335}
336
337#define cache128_unroll16(base,op)				\
338	__asm__ __volatile__("					\
339		.set noreorder;					\
340		cache %1, 0x000(%0); cache %1, 0x080(%0);	\
341		cache %1, 0x100(%0); cache %1, 0x180(%0);	\
342		cache %1, 0x200(%0); cache %1, 0x280(%0);	\
343		cache %1, 0x300(%0); cache %1, 0x380(%0);	\
344		cache %1, 0x400(%0); cache %1, 0x480(%0);	\
345		cache %1, 0x500(%0); cache %1, 0x580(%0);	\
346		cache %1, 0x600(%0); cache %1, 0x680(%0);	\
347		cache %1, 0x700(%0); cache %1, 0x780(%0);	\
348		.set reorder"					\
349		:						\
350		: "r" (base),					\
351		  "i" (op));
352
353#define cache128_unroll32(base,op)				\
354	__asm__ __volatile__("					\
355		.set noreorder;					\
356		cache %1, 0x000(%0); cache %1, 0x080(%0);	\
357		cache %1, 0x100(%0); cache %1, 0x180(%0);	\
358		cache %1, 0x200(%0); cache %1, 0x280(%0);	\
359		cache %1, 0x300(%0); cache %1, 0x380(%0);	\
360		cache %1, 0x400(%0); cache %1, 0x480(%0);	\
361		cache %1, 0x500(%0); cache %1, 0x580(%0);	\
362		cache %1, 0x600(%0); cache %1, 0x680(%0);	\
363		cache %1, 0x700(%0); cache %1, 0x780(%0);	\
364		cache %1, 0x800(%0); cache %1, 0x880(%0);	\
365		cache %1, 0x900(%0); cache %1, 0x980(%0);	\
366		cache %1, 0xa00(%0); cache %1, 0xa80(%0);	\
367		cache %1, 0xb00(%0); cache %1, 0xb80(%0);	\
368		cache %1, 0xc00(%0); cache %1, 0xc80(%0);	\
369		cache %1, 0xd00(%0); cache %1, 0xd80(%0);	\
370		cache %1, 0xe00(%0); cache %1, 0xe80(%0);	\
371		cache %1, 0xf00(%0); cache %1, 0xf80(%0);	\
372		.set reorder"					\
373		:						\
374		: "r" (base),					\
375		  "i" (op));
376
377static inline void blast_scache128(void)
378{
379	unsigned long way0 = KSEG0;
380	unsigned long way1 = way0 ^ 1;
381	unsigned long end = way0 + scache_size();
382
383	while (way0 < end) {
384		cache128_unroll16(way0, Index_Writeback_Inv_S);
385		cache128_unroll16(way1, Index_Writeback_Inv_S);
386		way0 += 0x800;
387		way1 += 0x800;
388	}
389}
390
391static inline void blast_scache128_page(unsigned long page)
392{
393	cache128_unroll32(page, Hit_Writeback_Inv_S);
394}
395
396static inline void blast_scache128_page_indexed(unsigned long page)
397{
398	cache128_unroll32(page    , Index_Writeback_Inv_S);
399	cache128_unroll32(page ^ 1, Index_Writeback_Inv_S);
400}
401
402#endif /* _ASM_R10KCACHE_H */
403