1
2#ifndef _MIPS_MIPS64_CACHE_H
3#define _MIPS_MIPS64_CACHE_H
4
5#include <asm/asm.h>
6#include <asm/cacheops.h>
7
8static inline void flush_icache_line_indexed(unsigned long addr)
9{
10	unsigned long waystep = icache_size/mips_cpu.icache.ways;
11	unsigned int way;
12
13	for (way = 0; way < mips_cpu.icache.ways; way++)
14	{
15		__asm__ __volatile__(
16			".set noreorder\n\t"
17			"cache %1, (%0)\n\t"
18			".set reorder"
19			:
20			: "r" (addr),
21			"i" (Index_Invalidate_I));
22
23		addr += waystep;
24	}
25}
26
27static inline void flush_dcache_line_indexed(unsigned long addr)
28{
29	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
30	unsigned int way;
31
32	for (way = 0; way < mips_cpu.dcache.ways; way++)
33	{
34		__asm__ __volatile__(
35			".set noreorder\n\t"
36			"cache %1, (%0)\n\t"
37			".set reorder"
38			:
39			: "r" (addr),
40			"i" (Index_Writeback_Inv_D));
41
42		addr += waystep;
43	}
44}
45
46static inline void flush_scache_line_indexed(unsigned long addr)
47{
48	unsigned long waystep = scache_size/mips_cpu.scache.ways;
49	unsigned int way;
50
51	for (way = 0; way < mips_cpu.scache.ways; way++)
52	{
53		__asm__ __volatile__(
54			".set noreorder\n\t"
55			"cache %1, (%0)\n\t"
56			".set reorder"
57			:
58			: "r" (addr),
59			"i" (Index_Writeback_Inv_SD));
60
61		addr += waystep;
62	}
63}
64
65static inline void flush_icache_line(unsigned long addr)
66{
67	__asm__ __volatile__(
68		".set noreorder\n\t"
69		"cache %1, (%0)\n\t"
70		".set reorder"
71		:
72		: "r" (addr),
73		  "i" (Hit_Invalidate_I));
74}
75
76static inline void flush_dcache_line(unsigned long addr)
77{
78	__asm__ __volatile__(
79		".set noreorder\n\t"
80		"cache %1, (%0)\n\t"
81		".set reorder"
82		:
83		: "r" (addr),
84		  "i" (Hit_Writeback_Inv_D));
85}
86
87static inline void invalidate_dcache_line(unsigned long addr)
88{
89	__asm__ __volatile__(
90		".set noreorder\n\t"
91		"cache %1, (%0)\n\t"
92		".set reorder"
93		:
94		: "r" (addr),
95		  "i" (Hit_Invalidate_D));
96}
97
98static inline void invalidate_scache_line(unsigned long addr)
99{
100	__asm__ __volatile__(
101		".set noreorder\n\t"
102		"cache %1, (%0)\n\t"
103		".set reorder"
104		:
105		: "r" (addr),
106		  "i" (Hit_Invalidate_SD));
107}
108
109static inline void flush_scache_line(unsigned long addr)
110{
111	__asm__ __volatile__(
112		".set noreorder\n\t"
113		"cache %1, (%0)\n\t"
114		".set reorder"
115		:
116		: "r" (addr),
117		  "i" (Hit_Writeback_Inv_SD));
118}
119
120/*
121 * The next two are for badland addresses like signal trampolines.
122 */
123static inline void protected_flush_icache_line(unsigned long addr)
124{
125	__asm__ __volatile__(
126		".set noreorder\n\t"
127		"1:\tcache %1,(%0)\n"
128		"2:\t.set reorder\n\t"
129		".section\t__ex_table,\"a\"\n\t"
130		".dword\t1b,2b\n\t"
131		".previous"
132		:
133		: "r" (addr), "i" (Hit_Invalidate_I));
134}
135
136static inline void protected_writeback_dcache_line(unsigned long addr)
137{
138	__asm__ __volatile__(
139		".set noreorder\n\t"
140		"1:\tcache %1,(%0)\n"
141		"2:\t.set reorder\n\t"
142		".section\t__ex_table,\"a\"\n\t"
143		".dword\t1b,2b\n\t"
144		".previous"
145		:
146		: "r" (addr), "i" (Hit_Writeback_D));
147}
148
149#define cache_unroll(base,op)			\
150	__asm__ __volatile__("			\
151		.set noreorder;			\
152		cache %1, (%0);			\
153		.set reorder"			\
154		:				\
155		: "r" (base),			\
156		  "i" (op));
157
158
159static inline void blast_dcache(void)
160{
161	unsigned long start = KSEG0;
162	unsigned long end = (start + dcache_size);
163
164	while(start < end) {
165		cache_unroll(start,Index_Writeback_Inv_D);
166		start += dc_lsize;
167	}
168}
169
170static inline void blast_dcache_page(unsigned long page)
171{
172	unsigned long start = page;
173	unsigned long end = (start + PAGE_SIZE);
174
175	while(start < end) {
176		cache_unroll(start,Hit_Writeback_Inv_D);
177		start += dc_lsize;
178	}
179}
180
181static inline void blast_dcache_page_indexed(unsigned long page)
182{
183	unsigned long start;
184	unsigned long end = (page + PAGE_SIZE);
185	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
186	unsigned int way;
187
188	for (way = 0; way < mips_cpu.dcache.ways; way++) {
189		start = page + way*waystep;
190		while(start < end) {
191			cache_unroll(start,Index_Writeback_Inv_D);
192			start += dc_lsize;
193		}
194	}
195}
196
197static inline void blast_icache(void)
198{
199	unsigned long start = KSEG0;
200	unsigned long end = (start + icache_size);
201
202	while(start < end) {
203		cache_unroll(start,Index_Invalidate_I);
204		start += ic_lsize;
205	}
206}
207
208static inline void blast_icache_page(unsigned long page)
209{
210	unsigned long start = page;
211	unsigned long end = (start + PAGE_SIZE);
212
213	while(start < end) {
214		cache_unroll(start,Hit_Invalidate_I);
215		start += ic_lsize;
216	}
217}
218
219static inline void blast_icache_page_indexed(unsigned long page)
220{
221	unsigned long start;
222	unsigned long end = (page + PAGE_SIZE);
223	unsigned long waystep = icache_size/mips_cpu.icache.ways;
224	unsigned int way;
225
226	for (way = 0; way < mips_cpu.icache.ways; way++) {
227		start = page + way*waystep;
228		while(start < end) {
229			cache_unroll(start,Index_Invalidate_I);
230			start += ic_lsize;
231		}
232	}
233}
234
235static inline void blast_scache(void)
236{
237	unsigned long start = KSEG0;
238	unsigned long end = KSEG0 + scache_size;
239
240	while(start < end) {
241		cache_unroll(start,Index_Writeback_Inv_SD);
242		start += sc_lsize;
243	}
244}
245
246static inline void blast_scache_page(unsigned long page)
247{
248	unsigned long start = page;
249	unsigned long end = page + PAGE_SIZE;
250
251	while(start < end) {
252		cache_unroll(start,Hit_Writeback_Inv_SD);
253		start += sc_lsize;
254	}
255}
256
257static inline void blast_scache_page_indexed(unsigned long page)
258{
259	unsigned long start;
260	unsigned long end = (page + PAGE_SIZE);
261	unsigned long waystep = scache_size/mips_cpu.scache.ways;
262	unsigned int way;
263
264	for (way = 0; way < mips_cpu.scache.ways; way++) {
265		start = page + way*waystep;
266		while(start < end) {
267			cache_unroll(start,Index_Writeback_Inv_SD);
268			start += sc_lsize;
269		}
270	}
271}
272
273#endif /* !(_MIPS_MIPS64_CACHE_H) */
274
275
276