1
2#ifndef _MIPS_R4KCACHE_H
3#define _MIPS_R4KCACHE_H
4
5#include <asm/asm.h>
6#include <asm/cacheops.h>
7
8#include <typedefs.h>
9#include <sbconfig.h>
10#include <bcm4710.h>
11#include <asm/paccess.h>
12#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(BCM4710_REG_SDRAM + SBCONFIGOFF)))->sbimstate)
13#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
14#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
15
16static inline void flush_icache_line_indexed(unsigned long addr)
17{
18	unsigned long waystep = icache_size/mips_cpu.icache.ways;
19	unsigned int way;
20
21	for (way = 0; way < mips_cpu.icache.ways; way++)
22	{
23		__asm__ __volatile__(
24			".set noreorder\n\t"
25			".set mips3\n\t"
26			"cache %1, (%0)\n\t"
27			".set mips0\n\t"
28			".set reorder"
29			:
30			: "r" (addr),
31			"i" (Index_Invalidate_I));
32
33		addr += waystep;
34	}
35}
36
37static inline void flush_dcache_line_indexed(unsigned long addr)
38{
39	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
40	unsigned int way;
41
42	for (way = 0; way < mips_cpu.dcache.ways; way++)
43	{
44		BCM4710_DUMMY_RREG();
45		__asm__ __volatile__(
46			".set noreorder\n\t"
47			".set mips3\n\t"
48			"cache %1, (%0)\n\t"
49			".set mips0\n\t"
50			".set reorder"
51			:
52			: "r" (addr),
53			"i" (Index_Writeback_Inv_D));
54
55		addr += waystep;
56	}
57}
58
59static inline void flush_icache_line(unsigned long addr)
60{
61
62	__asm__ __volatile__(
63		".set noreorder\n\t"
64		".set mips3\n\t"
65		"cache %1, (%0)\n\t"
66		".set mips0\n\t"
67		".set reorder"
68		:
69		: "r" (addr),
70		  "i" (Hit_Invalidate_I));
71}
72
73static inline void flush_dcache_line(unsigned long addr)
74{
75	BCM4710_DUMMY_RREG();
76	__asm__ __volatile__(
77		".set noreorder\n\t"
78		".set mips3\n\t"
79		"cache %1, (%0)\n\t"
80		".set mips0\n\t"
81		".set reorder"
82		:
83		: "r" (addr),
84		  "i" (Hit_Writeback_Inv_D));
85}
86
87static inline void invalidate_dcache_line(unsigned long addr)
88{
89	__asm__ __volatile__(
90		".set noreorder\n\t"
91		".set mips3\n\t"
92		"cache %1, (%0)\n\t"
93		".set mips0\n\t"
94		".set reorder"
95		:
96		: "r" (addr),
97		  "i" (Hit_Invalidate_D));
98}
99
100/*
101 * The next two are for badland addresses like signal trampolines.
102 */
103static inline void protected_flush_icache_line(unsigned long addr)
104{
105	__asm__ __volatile__(
106		".set noreorder\n\t"
107		".set mips3\n"
108		"1:\tcache %1,(%0)\n"
109		"2:\t.set mips0\n\t"
110		".set reorder\n\t"
111		".section\t__ex_table,\"a\"\n\t"
112		STR(PTR)"\t1b,2b\n\t"
113		".previous"
114		:
115		: "r" (addr),
116		  "i" (Hit_Invalidate_I));
117}
118
119static inline void protected_writeback_dcache_line(unsigned long addr)
120{
121	BCM4710_DUMMY_RREG();
122	__asm__ __volatile__(
123		".set noreorder\n\t"
124		".set mips3\n"
125		"1:\tcache %1,(%0)\n"
126		"2:\t.set mips0\n\t"
127		".set reorder\n\t"
128		".section\t__ex_table,\"a\"\n\t"
129		STR(PTR)"\t1b,2b\n\t"
130		".previous"
131		:
132		: "r" (addr),
133		  "i" (Hit_Writeback_D));
134}
135
136#define cache_unroll(base,op)	        	\
137	__asm__ __volatile__("	         	\
138		.set noreorder;		        \
139		.set mips3;		        \
140                cache %1, (%0);	                \
141		.set mips0;			\
142		.set reorder"			\
143		:				\
144		: "r" (base),			\
145		  "i" (op));
146
147
148static inline void blast_dcache(void)
149{
150	unsigned long start = KSEG0;
151	unsigned long end = (start + dcache_size);
152
153	while(start < end) {
154		BCM4710_DUMMY_RREG();
155		cache_unroll(start,Index_Writeback_Inv_D);
156		start += dc_lsize;
157	}
158}
159
160static inline void blast_dcache_page(unsigned long page)
161{
162	unsigned long start = page;
163	unsigned long end = (start + PAGE_SIZE);
164
165	BCM4710_FILL_TLB(start);
166	while(start < end) {
167		BCM4710_DUMMY_RREG();
168		cache_unroll(start,Hit_Writeback_Inv_D);
169		start += dc_lsize;
170	}
171}
172
173static inline void blast_dcache_page_indexed(unsigned long page)
174{
175	unsigned long start;
176	unsigned long end = (page + PAGE_SIZE);
177	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
178	unsigned int way;
179
180	for (way = 0; way < mips_cpu.dcache.ways; way++) {
181		start = page + way*waystep;
182		while(start < end) {
183			BCM4710_DUMMY_RREG();
184			cache_unroll(start,Index_Writeback_Inv_D);
185			start += dc_lsize;
186		}
187	}
188}
189
190static inline void blast_icache(void)
191{
192	unsigned long start = KSEG0;
193	unsigned long end = (start + icache_size);
194
195	while(start < end) {
196		cache_unroll(start,Index_Invalidate_I);
197		start += ic_lsize;
198	}
199}
200
201static inline void blast_icache_page(unsigned long page)
202{
203	unsigned long start = page;
204	unsigned long end = (start + PAGE_SIZE);
205
206	BCM4710_FILL_TLB(start);
207	while(start < end) {
208		cache_unroll(start,Hit_Invalidate_I);
209		start += ic_lsize;
210	}
211}
212
213static inline void blast_icache_page_indexed(unsigned long page)
214{
215	unsigned long start;
216	unsigned long end = (page + PAGE_SIZE);
217	unsigned long waystep = icache_size/mips_cpu.icache.ways;
218	unsigned int way;
219
220	for (way = 0; way < mips_cpu.icache.ways; way++) {
221		start = page + way*waystep;
222		while(start < end) {
223			cache_unroll(start,Index_Invalidate_I);
224			start += ic_lsize;
225		}
226	}
227}
228
229#endif /* !(_MIPS_R4KCACHE_H) */
230