1
2#ifndef _MIPS_R4KCACHE_H
3#define _MIPS_R4KCACHE_H
4
5#include <asm/asm.h>
6#include <asm/cacheops.h>
7
8#include <typedefs.h>
9#include <sbconfig.h>
10#include <asm/paccess.h>
11
12#define BCM4710_DUMMY_RREG() (((sbconfig_t *)(KSEG1ADDR(SB_ENUM_BASE + SBCONFIGOFF)))->sbimstate)
13
14#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
15#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
16
17static inline void flush_icache_line_indexed(unsigned long addr)
18{
19	unsigned long waystep = icache_size/mips_cpu.icache.ways;
20	unsigned int way;
21
22	for (way = 0; way < mips_cpu.icache.ways; way++)
23	{
24		__asm__ __volatile__(
25			".set noreorder\n\t"
26			".set mips3\n\t"
27			"cache %1, (%0)\n\t"
28			".set mips0\n\t"
29			".set reorder"
30			:
31			: "r" (addr),
32			"i" (Index_Invalidate_I));
33
34		addr += waystep;
35	}
36}
37
38static inline void flush_dcache_line_indexed(unsigned long addr)
39{
40	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
41	unsigned int way;
42
43	for (way = 0; way < mips_cpu.dcache.ways; way++)
44	{
45		BCM4710_DUMMY_RREG();
46		__asm__ __volatile__(
47			".set noreorder\n\t"
48			".set mips3\n\t"
49			"cache %1, (%0)\n\t"
50			".set mips0\n\t"
51			".set reorder"
52			:
53			: "r" (addr),
54			"i" (Index_Writeback_Inv_D));
55
56		addr += waystep;
57	}
58}
59
60static inline void flush_icache_line(unsigned long addr)
61{
62
63	__asm__ __volatile__(
64		".set noreorder\n\t"
65		".set mips3\n\t"
66		"cache %1, (%0)\n\t"
67		".set mips0\n\t"
68		".set reorder"
69		:
70		: "r" (addr),
71		  "i" (Hit_Invalidate_I));
72}
73
74static inline void flush_dcache_line(unsigned long addr)
75{
76	BCM4710_DUMMY_RREG();
77	__asm__ __volatile__(
78		".set noreorder\n\t"
79		".set mips3\n\t"
80		"cache %1, (%0)\n\t"
81		".set mips0\n\t"
82		".set reorder"
83		:
84		: "r" (addr),
85		  "i" (Hit_Writeback_Inv_D));
86}
87
88static inline void invalidate_dcache_line(unsigned long addr)
89{
90	__asm__ __volatile__(
91		".set noreorder\n\t"
92		".set mips3\n\t"
93		"cache %1, (%0)\n\t"
94		".set mips0\n\t"
95		".set reorder"
96		:
97		: "r" (addr),
98		  "i" (Hit_Invalidate_D));
99}
100
101/*
102 * The next two are for badland addresses like signal trampolines.
103 */
104static inline void protected_flush_icache_line(unsigned long addr)
105{
106	__asm__ __volatile__(
107		".set noreorder\n\t"
108		".set mips3\n"
109		"1:\tcache %1,(%0)\n"
110		"2:\t.set mips0\n\t"
111		".set reorder\n\t"
112		".section\t__ex_table,\"a\"\n\t"
113		STR(PTR)"\t1b,2b\n\t"
114		".previous"
115		:
116		: "r" (addr),
117		  "i" (Hit_Invalidate_I));
118}
119
120static inline void protected_writeback_dcache_line(unsigned long addr)
121{
122	BCM4710_DUMMY_RREG();
123	__asm__ __volatile__(
124		".set noreorder\n\t"
125		".set mips3\n"
126		"1:\tcache %1,(%0)\n"
127		"2:\t.set mips0\n\t"
128		".set reorder\n\t"
129		".section\t__ex_table,\"a\"\n\t"
130		STR(PTR)"\t1b,2b\n\t"
131		".previous"
132		:
133		: "r" (addr),
134		  "i" (Hit_Writeback_D));
135}
136
137#define cache_unroll(base,op)	        	\
138	__asm__ __volatile__("	         	\
139		.set noreorder;		        \
140		.set mips3;		        \
141                cache %1, (%0);	                \
142		.set mips0;			\
143		.set reorder"			\
144		:				\
145		: "r" (base),			\
146		  "i" (op));
147
148
149static inline void blast_dcache(void)
150{
151	unsigned long start = KSEG0;
152	unsigned long end = (start + dcache_size);
153
154	while(start < end) {
155		BCM4710_DUMMY_RREG();
156		cache_unroll(start,Index_Writeback_Inv_D);
157		start += dc_lsize;
158	}
159}
160
161static inline void blast_dcache_page(unsigned long page)
162{
163	unsigned long start = page;
164	unsigned long end = (start + PAGE_SIZE);
165
166	BCM4710_FILL_TLB(start);
167	while(start < end) {
168		BCM4710_DUMMY_RREG();
169		cache_unroll(start,Hit_Writeback_Inv_D);
170		start += dc_lsize;
171	}
172}
173
174static inline void blast_dcache_page_indexed(unsigned long page)
175{
176	unsigned long start;
177	unsigned long end = (page + PAGE_SIZE);
178	unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
179	unsigned int way;
180
181	for (way = 0; way < mips_cpu.dcache.ways; way++) {
182		start = page + way*waystep;
183		while(start < end) {
184			BCM4710_DUMMY_RREG();
185			cache_unroll(start,Index_Writeback_Inv_D);
186			start += dc_lsize;
187		}
188	}
189}
190
191static inline void blast_icache(void)
192{
193	unsigned long start = KSEG0;
194	unsigned long end = (start + icache_size);
195
196	while(start < end) {
197		cache_unroll(start,Index_Invalidate_I);
198		start += ic_lsize;
199	}
200}
201
202static inline void blast_icache_page(unsigned long page)
203{
204	unsigned long start = page;
205	unsigned long end = (start + PAGE_SIZE);
206
207	BCM4710_FILL_TLB(start);
208	while(start < end) {
209		cache_unroll(start,Hit_Invalidate_I);
210		start += ic_lsize;
211	}
212}
213
214static inline void blast_icache_page_indexed(unsigned long page)
215{
216	unsigned long start;
217	unsigned long end = (page + PAGE_SIZE);
218	unsigned long waystep = icache_size/mips_cpu.icache.ways;
219	unsigned int way;
220
221	for (way = 0; way < mips_cpu.icache.ways; way++) {
222		start = page + way*waystep;
223		while(start < end) {
224			cache_unroll(start,Index_Invalidate_I);
225			start += ic_lsize;
226		}
227	}
228}
229
230#endif /* !(_MIPS_R4KCACHE_H) */
231