1178172Simp/*	$NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $	*/
2178172Simp
3178172Simp/*
4178172Simp * Copyright 2001 Wasabi Systems, Inc.
5178172Simp * All rights reserved.
6178172Simp *
7178172Simp * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8178172Simp *
9178172Simp * Redistribution and use in source and binary forms, with or without
10178172Simp * modification, are permitted provided that the following conditions
11178172Simp * are met:
12178172Simp * 1. Redistributions of source code must retain the above copyright
13178172Simp *    notice, this list of conditions and the following disclaimer.
14178172Simp * 2. Redistributions in binary form must reproduce the above copyright
15178172Simp *    notice, this list of conditions and the following disclaimer in the
16178172Simp *    documentation and/or other materials provided with the distribution.
17178172Simp * 3. All advertising materials mentioning features or use of this software
18178172Simp *    must display the following acknowledgement:
19178172Simp *	This product includes software developed for the NetBSD Project by
20178172Simp *	Wasabi Systems, Inc.
21178172Simp * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22178172Simp *    or promote products derived from this software without specific prior
23178172Simp *    written permission.
24178172Simp *
25178172Simp * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27178172Simp * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28178172Simp * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29178172Simp * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30178172Simp * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31178172Simp * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32178172Simp * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33178172Simp * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34178172Simp * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35178172Simp * POSSIBILITY OF SUCH DAMAGE.
36178172Simp *
37178172Simp * $FreeBSD$
38178172Simp */
39178172Simp
40178172Simp/*
41178172Simp * Cache definitions/operations for R4000-style caches.
42178172Simp */
43178172Simp
44178172Simp#define	CACHE_R4K_I			0
45178172Simp#define	CACHE_R4K_D			1
46178172Simp#define	CACHE_R4K_SI			2
47178172Simp#define	CACHE_R4K_SD			3
48178172Simp
49178172Simp#define	CACHEOP_R4K_INDEX_INV		(0 << 2)	/* I, SI */
50178172Simp#define	CACHEOP_R4K_INDEX_WB_INV	(0 << 2)	/* D, SD */
51178172Simp#define	CACHEOP_R4K_INDEX_LOAD_TAG	(1 << 2)	/* all */
52178172Simp#define	CACHEOP_R4K_INDEX_STORE_TAG	(2 << 2)	/* all */
53178172Simp#define	CACHEOP_R4K_CREATE_DIRTY_EXCL	(3 << 2)	/* D, SD */
54178172Simp#define	CACHEOP_R4K_HIT_INV		(4 << 2)	/* all */
55178172Simp#define	CACHEOP_R4K_HIT_WB_INV		(5 << 2)	/* D, SD */
56178172Simp#define	CACHEOP_R4K_FILL		(5 << 2)	/* I */
57178172Simp#define	CACHEOP_R4K_HIT_WB		(6 << 2)	/* I, D, SD */
58178172Simp#define	CACHEOP_R4K_HIT_SET_VIRTUAL	(7 << 2)	/* SI, SD */
59178172Simp
60178172Simp#if !defined(LOCORE)
61178172Simp
62178172Simp/*
63178172Simp * cache_r4k_op_line:
64178172Simp *
65178172Simp *	Perform the specified cache operation on a single line.
66178172Simp */
67178172Simp#define	cache_op_r4k_line(va, op)					\
68178172Simpdo {									\
69178172Simp	__asm __volatile(						\
70178172Simp		".set noreorder					\n\t"	\
71178172Simp		"cache %1, 0(%0)				\n\t"	\
72178172Simp		".set reorder"						\
73178172Simp	    :								\
74178172Simp	    : "r" (va), "i" (op)					\
75178172Simp	    : "memory");						\
76178172Simp} while (/*CONSTCOND*/0)
77178172Simp
78178172Simp/*
79178172Simp * cache_r4k_op_8lines_16:
80178172Simp *
81178172Simp *	Perform the specified cache operation on 8 16-byte cache lines.
82178172Simp */
83178172Simp#define	cache_r4k_op_8lines_16(va, op)					\
84178172Simpdo {									\
85178172Simp	__asm __volatile(						\
86178172Simp		".set noreorder					\n\t"	\
87178172Simp		"cache %1, 0x00(%0); cache %1, 0x10(%0)		\n\t"	\
88178172Simp		"cache %1, 0x20(%0); cache %1, 0x30(%0)		\n\t"	\
89178172Simp		"cache %1, 0x40(%0); cache %1, 0x50(%0)		\n\t"	\
90178172Simp		"cache %1, 0x60(%0); cache %1, 0x70(%0)		\n\t"	\
91178172Simp		".set reorder"						\
92178172Simp	    :								\
93178172Simp	    : "r" (va), "i" (op)					\
94178172Simp	    : "memory");						\
95178172Simp} while (/*CONSTCOND*/0)
96178172Simp
97178172Simp/*
98178172Simp * cache_r4k_op_8lines_32:
99178172Simp *
100178172Simp *	Perform the specified cache operation on 8 32-byte cache lines.
101178172Simp */
102178172Simp#define	cache_r4k_op_8lines_32(va, op)					\
103178172Simpdo {									\
104178172Simp	__asm __volatile(						\
105178172Simp		".set noreorder					\n\t"	\
106178172Simp		"cache %1, 0x00(%0); cache %1, 0x20(%0)		\n\t"	\
107178172Simp		"cache %1, 0x40(%0); cache %1, 0x60(%0)		\n\t"	\
108178172Simp		"cache %1, 0x80(%0); cache %1, 0xa0(%0)		\n\t"	\
109178172Simp		"cache %1, 0xc0(%0); cache %1, 0xe0(%0)		\n\t"	\
110178172Simp		".set reorder"						\
111178172Simp	    :								\
112178172Simp	    : "r" (va), "i" (op)					\
113178172Simp	    : "memory");						\
114178172Simp} while (/*CONSTCOND*/0)
115178172Simp
116178172Simp/*
117178172Simp * cache_r4k_op_32lines_16:
118178172Simp *
119178172Simp *	Perform the specified cache operation on 32 16-byte
120178172Simp *	cache lines.
121178172Simp */
122178172Simp#define	cache_r4k_op_32lines_16(va, op)					\
123178172Simpdo {									\
124178172Simp	__asm __volatile(						\
125178172Simp		".set noreorder					\n\t"	\
126178172Simp		"cache %1, 0x000(%0); cache %1, 0x010(%0);	\n\t"	\
127178172Simp		"cache %1, 0x020(%0); cache %1, 0x030(%0);	\n\t"	\
128178172Simp		"cache %1, 0x040(%0); cache %1, 0x050(%0);	\n\t"	\
129178172Simp		"cache %1, 0x060(%0); cache %1, 0x070(%0);	\n\t"	\
130178172Simp		"cache %1, 0x080(%0); cache %1, 0x090(%0);	\n\t"	\
131178172Simp		"cache %1, 0x0a0(%0); cache %1, 0x0b0(%0);	\n\t"	\
132178172Simp		"cache %1, 0x0c0(%0); cache %1, 0x0d0(%0);	\n\t"	\
133178172Simp		"cache %1, 0x0e0(%0); cache %1, 0x0f0(%0);	\n\t"	\
134178172Simp		"cache %1, 0x100(%0); cache %1, 0x110(%0);	\n\t"	\
135178172Simp		"cache %1, 0x120(%0); cache %1, 0x130(%0);	\n\t"	\
136178172Simp		"cache %1, 0x140(%0); cache %1, 0x150(%0);	\n\t"	\
137178172Simp		"cache %1, 0x160(%0); cache %1, 0x170(%0);	\n\t"	\
138178172Simp		"cache %1, 0x180(%0); cache %1, 0x190(%0);	\n\t"	\
139178172Simp		"cache %1, 0x1a0(%0); cache %1, 0x1b0(%0);	\n\t"	\
140178172Simp		"cache %1, 0x1c0(%0); cache %1, 0x1d0(%0);	\n\t"	\
141178172Simp		"cache %1, 0x1e0(%0); cache %1, 0x1f0(%0);	\n\t"	\
142178172Simp		".set reorder"						\
143178172Simp	    :								\
144178172Simp	    : "r" (va), "i" (op)					\
145178172Simp	    : "memory");						\
146178172Simp} while (/*CONSTCOND*/0)
147178172Simp
148178172Simp/*
149178172Simp * cache_r4k_op_32lines_32:
150178172Simp *
151178172Simp *	Perform the specified cache operation on 32 32-byte
152178172Simp *	cache lines.
153178172Simp */
154178172Simp#define	cache_r4k_op_32lines_32(va, op)					\
155178172Simpdo {									\
156178172Simp	__asm __volatile(						\
157178172Simp		".set noreorder					\n\t"	\
158178172Simp		"cache %1, 0x000(%0); cache %1, 0x020(%0);	\n\t"	\
159178172Simp		"cache %1, 0x040(%0); cache %1, 0x060(%0);	\n\t"	\
160178172Simp		"cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\n\t"	\
161178172Simp		"cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\n\t"	\
162178172Simp		"cache %1, 0x100(%0); cache %1, 0x120(%0);	\n\t"	\
163178172Simp		"cache %1, 0x140(%0); cache %1, 0x160(%0);	\n\t"	\
164178172Simp		"cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\n\t"	\
165178172Simp		"cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\n\t"	\
166178172Simp		"cache %1, 0x200(%0); cache %1, 0x220(%0);	\n\t"	\
167178172Simp		"cache %1, 0x240(%0); cache %1, 0x260(%0);	\n\t"	\
168178172Simp		"cache %1, 0x280(%0); cache %1, 0x2a0(%0);	\n\t"	\
169178172Simp		"cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);	\n\t"	\
170178172Simp		"cache %1, 0x300(%0); cache %1, 0x320(%0);	\n\t"	\
171178172Simp		"cache %1, 0x340(%0); cache %1, 0x360(%0);	\n\t"	\
172178172Simp		"cache %1, 0x380(%0); cache %1, 0x3a0(%0);	\n\t"	\
173178172Simp		"cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);	\n\t"	\
174178172Simp		".set reorder"						\
175178172Simp	    :								\
176178172Simp	    : "r" (va), "i" (op)					\
177178172Simp	    : "memory");						\
178178172Simp} while (/*CONSTCOND*/0)
179178172Simp
180178172Simp/*
181178172Simp * cache_r4k_op_32lines_128:
182178172Simp *
183178172Simp *	Perform the specified cache operation on 32 128-byte
184178172Simp *	cache lines.
185178172Simp */
186178172Simp#define	cache_r4k_op_32lines_128(va, op)				\
187178172Simpdo {									\
188178172Simp	__asm __volatile(						\
189178172Simp		".set noreorder					\n\t"	\
190178172Simp		"cache %1, 0x0000(%0); cache %1, 0x0080(%0);	\n\t"	\
191178172Simp		"cache %1, 0x0100(%0); cache %1, 0x0180(%0);	\n\t"	\
192178172Simp		"cache %1, 0x0200(%0); cache %1, 0x0280(%0);	\n\t"	\
193178172Simp		"cache %1, 0x0300(%0); cache %1, 0x0380(%0);	\n\t"	\
194178172Simp		"cache %1, 0x0400(%0); cache %1, 0x0480(%0);	\n\t"	\
195178172Simp		"cache %1, 0x0500(%0); cache %1, 0x0580(%0);	\n\t"	\
196178172Simp		"cache %1, 0x0600(%0); cache %1, 0x0680(%0);	\n\t"	\
197178172Simp		"cache %1, 0x0700(%0); cache %1, 0x0780(%0);	\n\t"	\
198178172Simp		"cache %1, 0x0800(%0); cache %1, 0x0880(%0);	\n\t"	\
199178172Simp		"cache %1, 0x0900(%0); cache %1, 0x0980(%0);	\n\t"	\
200178172Simp		"cache %1, 0x0a00(%0); cache %1, 0x0a80(%0);	\n\t"	\
201178172Simp		"cache %1, 0x0b00(%0); cache %1, 0x0b80(%0);	\n\t"	\
202178172Simp		"cache %1, 0x0c00(%0); cache %1, 0x0c80(%0);	\n\t"	\
203178172Simp		"cache %1, 0x0d00(%0); cache %1, 0x0d80(%0);	\n\t"	\
204178172Simp		"cache %1, 0x0e00(%0); cache %1, 0x0e80(%0);	\n\t"	\
205178172Simp		"cache %1, 0x0f00(%0); cache %1, 0x0f80(%0);	\n\t"	\
206178172Simp		".set reorder"						\
207178172Simp	    :								\
208178172Simp	    : "r" (va), "i" (op)					\
209178172Simp	    : "memory");						\
210178172Simp} while (/*CONSTCOND*/0)
211178172Simp
212178172Simp/*
213178172Simp * cache_r4k_op_16lines_16_2way:
214178172Simp *
215178172Simp *	Perform the specified cache operation on 16 16-byte
216178172Simp * 	cache lines, 2-ways.
217178172Simp */
218178172Simp#define	cache_r4k_op_16lines_16_2way(va1, va2, op)			\
219178172Simpdo {									\
220178172Simp	__asm __volatile(						\
221178172Simp		".set noreorder					\n\t"	\
222178172Simp		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
223178172Simp		"cache %2, 0x010(%0); cache %2, 0x010(%1);	\n\t"	\
224178172Simp		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
225178172Simp		"cache %2, 0x030(%0); cache %2, 0x030(%1);	\n\t"	\
226178172Simp		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
227178172Simp		"cache %2, 0x050(%0); cache %2, 0x050(%1);	\n\t"	\
228178172Simp		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
229178172Simp		"cache %2, 0x070(%0); cache %2, 0x070(%1);	\n\t"	\
230178172Simp		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
231178172Simp		"cache %2, 0x090(%0); cache %2, 0x090(%1);	\n\t"	\
232178172Simp		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
233178172Simp		"cache %2, 0x0b0(%0); cache %2, 0x0b0(%1);	\n\t"	\
234178172Simp		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
235178172Simp		"cache %2, 0x0d0(%0); cache %2, 0x0d0(%1);	\n\t"	\
236178172Simp		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
237178172Simp		"cache %2, 0x0f0(%0); cache %2, 0x0f0(%1);	\n\t"	\
238178172Simp		".set reorder"						\
239178172Simp	    :								\
240178172Simp	    : "r" (va1), "r" (va2), "i" (op)				\
241178172Simp	    : "memory");						\
242178172Simp} while (/*CONSTCOND*/0)
243178172Simp
244178172Simp/*
245178172Simp * cache_r4k_op_16lines_32_2way:
246178172Simp *
247178172Simp *	Perform the specified cache operation on 16 32-byte
248178172Simp * 	cache lines, 2-ways.
249178172Simp */
250178172Simp#define	cache_r4k_op_16lines_32_2way(va1, va2, op)			\
251178172Simpdo {									\
252178172Simp	__asm __volatile(						\
253178172Simp		".set noreorder					\n\t"	\
254178172Simp		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
255178172Simp		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
256178172Simp		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
257178172Simp		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
258178172Simp		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
259178172Simp		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
260178172Simp		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
261178172Simp		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
262178172Simp		"cache %2, 0x100(%0); cache %2, 0x100(%1);	\n\t"	\
263178172Simp		"cache %2, 0x120(%0); cache %2, 0x120(%1);	\n\t"	\
264178172Simp		"cache %2, 0x140(%0); cache %2, 0x140(%1);	\n\t"	\
265178172Simp		"cache %2, 0x160(%0); cache %2, 0x160(%1);	\n\t"	\
266178172Simp		"cache %2, 0x180(%0); cache %2, 0x180(%1);	\n\t"	\
267178172Simp		"cache %2, 0x1a0(%0); cache %2, 0x1a0(%1);	\n\t"	\
268178172Simp		"cache %2, 0x1c0(%0); cache %2, 0x1c0(%1);	\n\t"	\
269178172Simp		"cache %2, 0x1e0(%0); cache %2, 0x1e0(%1);	\n\t"	\
270178172Simp		".set reorder"						\
271178172Simp	    :								\
272178172Simp	    : "r" (va1), "r" (va2), "i" (op)				\
273178172Simp	    : "memory");						\
274178172Simp} while (/*CONSTCOND*/0)
275178172Simp
276178172Simp/*
277178172Simp * cache_r4k_op_8lines_16_4way:
278178172Simp *
279178172Simp *	Perform the specified cache operation on 8 16-byte
280178172Simp * 	cache lines, 4-ways.
281178172Simp */
282178172Simp#define	cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op)		\
283178172Simpdo {									\
284178172Simp	__asm __volatile(						\
285178172Simp		".set noreorder					\n\t"	\
286178172Simp		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
287178172Simp		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
288178172Simp		"cache %4, 0x010(%0); cache %4, 0x010(%1);	\n\t"	\
289178172Simp		"cache %4, 0x010(%2); cache %4, 0x010(%3);	\n\t"	\
290178172Simp		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
291178172Simp		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
292178172Simp		"cache %4, 0x030(%0); cache %4, 0x030(%1);	\n\t"	\
293178172Simp		"cache %4, 0x030(%2); cache %4, 0x030(%3);	\n\t"	\
294178172Simp		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
295178172Simp		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
296178172Simp		"cache %4, 0x050(%0); cache %4, 0x050(%1);	\n\t"	\
297178172Simp		"cache %4, 0x050(%2); cache %4, 0x050(%3);	\n\t"	\
298178172Simp		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
299178172Simp		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
300178172Simp		"cache %4, 0x070(%0); cache %4, 0x070(%1);	\n\t"	\
301178172Simp		"cache %4, 0x070(%2); cache %4, 0x070(%3);	\n\t"	\
302178172Simp		".set reorder"						\
303178172Simp	    :								\
304178172Simp	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
305178172Simp	    : "memory");						\
306178172Simp} while (/*CONSTCOND*/0)
307178172Simp
308178172Simp/*
309178172Simp * cache_r4k_op_8lines_32_4way:
310178172Simp *
311178172Simp *	Perform the specified cache operation on 8 32-byte
312178172Simp * 	cache lines, 4-ways.
313178172Simp */
314178172Simp#define	cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op)		\
315178172Simpdo {									\
316178172Simp	__asm __volatile(						\
317178172Simp		".set noreorder					\n\t"	\
318178172Simp		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
319178172Simp		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
320178172Simp		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
321178172Simp		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
322178172Simp		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
323178172Simp		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
324178172Simp		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
325178172Simp		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
326178172Simp		"cache %4, 0x080(%0); cache %4, 0x080(%1);	\n\t"	\
327178172Simp		"cache %4, 0x080(%2); cache %4, 0x080(%3);	\n\t"	\
328178172Simp		"cache %4, 0x0a0(%0); cache %4, 0x0a0(%1);	\n\t"	\
329178172Simp		"cache %4, 0x0a0(%2); cache %4, 0x0a0(%3);	\n\t"	\
330178172Simp		"cache %4, 0x0c0(%0); cache %4, 0x0c0(%1);	\n\t"	\
331178172Simp		"cache %4, 0x0c0(%2); cache %4, 0x0c0(%3);	\n\t"	\
332178172Simp		"cache %4, 0x0e0(%0); cache %4, 0x0e0(%1);	\n\t"	\
333178172Simp		"cache %4, 0x0e0(%2); cache %4, 0x0e0(%3);	\n\t"	\
334178172Simp		".set reorder"						\
335178172Simp	    :								\
336178172Simp	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
337178172Simp	    : "memory");						\
338178172Simp} while (/*CONSTCOND*/0)
339178172Simp
340178172Simpvoid	r4k_icache_sync_all_16(void);
341178172Simpvoid	r4k_icache_sync_range_16(vm_paddr_t, vm_size_t);
342178172Simpvoid	r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t);
343178172Simp
344178172Simpvoid	r4k_icache_sync_all_32(void);
345178172Simpvoid	r4k_icache_sync_range_32(vm_paddr_t, vm_size_t);
346178172Simpvoid	r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t);
347178172Simp
348178172Simpvoid	r4k_pdcache_wbinv_all_16(void);
349178172Simpvoid	r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t);
350178172Simpvoid	r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t);
351178172Simp
352178172Simpvoid	r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t);
353178172Simpvoid	r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t);
354178172Simp
355178172Simpvoid	r4k_pdcache_wbinv_all_32(void);
356178172Simpvoid	r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
357178172Simpvoid	r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
358178172Simp
359178172Simpvoid	r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t);
360178172Simpvoid	r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t);
361178172Simp
362178172Simpvoid	r4k_sdcache_wbinv_all_32(void);
363178172Simpvoid	r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
364178172Simpvoid	r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
365178172Simp
366178172Simpvoid	r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t);
367178172Simpvoid	r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t);
368178172Simp
369178172Simpvoid	r4k_sdcache_wbinv_all_128(void);
370178172Simpvoid	r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t);
371178172Simpvoid	r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t);
372178172Simp
373178172Simpvoid	r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t);
374178172Simpvoid	r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t);
375178172Simp
376178172Simpvoid	r4k_sdcache_wbinv_all_generic(void);
377178172Simpvoid	r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t);
378178172Simpvoid	r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t);
379178172Simp
380178172Simpvoid	r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t);
381178172Simpvoid	r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t);
382178172Simp
383178172Simp#endif /* !LOCORE */
384