1178172Simp/*	$NetBSD: cache_r4k.h,v 1.10 2003/03/08 04:43:26 rafal Exp $	*/
2178172Simp
3331722Seadler/*
4178172Simp * Copyright 2001 Wasabi Systems, Inc.
5178172Simp * All rights reserved.
6178172Simp *
7178172Simp * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8178172Simp *
9178172Simp * Redistribution and use in source and binary forms, with or without
10178172Simp * modification, are permitted provided that the following conditions
11178172Simp * are met:
12178172Simp * 1. Redistributions of source code must retain the above copyright
13178172Simp *    notice, this list of conditions and the following disclaimer.
14178172Simp * 2. Redistributions in binary form must reproduce the above copyright
15178172Simp *    notice, this list of conditions and the following disclaimer in the
16178172Simp *    documentation and/or other materials provided with the distribution.
17178172Simp * 3. All advertising materials mentioning features or use of this software
18178172Simp *    must display the following acknowledgement:
19178172Simp *	This product includes software developed for the NetBSD Project by
20178172Simp *	Wasabi Systems, Inc.
21178172Simp * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22178172Simp *    or promote products derived from this software without specific prior
23178172Simp *    written permission.
24178172Simp *
25178172Simp * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27178172Simp * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28178172Simp * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29178172Simp * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30178172Simp * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31178172Simp * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32178172Simp * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33178172Simp * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34178172Simp * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35178172Simp * POSSIBILITY OF SUCH DAMAGE.
36178172Simp *
37178172Simp * $FreeBSD$
38178172Simp */
39178172Simp
40178172Simp/*
41178172Simp * Cache definitions/operations for R4000-style caches.
42178172Simp */
43178172Simp
44178172Simp#define	CACHE_R4K_I			0
45178172Simp#define	CACHE_R4K_D			1
46178172Simp#define	CACHE_R4K_SI			2
47178172Simp#define	CACHE_R4K_SD			3
48178172Simp
49178172Simp#define	CACHEOP_R4K_INDEX_INV		(0 << 2)	/* I, SI */
50178172Simp#define	CACHEOP_R4K_INDEX_WB_INV	(0 << 2)	/* D, SD */
51178172Simp#define	CACHEOP_R4K_INDEX_LOAD_TAG	(1 << 2)	/* all */
52178172Simp#define	CACHEOP_R4K_INDEX_STORE_TAG	(2 << 2)	/* all */
53178172Simp#define	CACHEOP_R4K_CREATE_DIRTY_EXCL	(3 << 2)	/* D, SD */
54178172Simp#define	CACHEOP_R4K_HIT_INV		(4 << 2)	/* all */
55178172Simp#define	CACHEOP_R4K_HIT_WB_INV		(5 << 2)	/* D, SD */
56178172Simp#define	CACHEOP_R4K_FILL		(5 << 2)	/* I */
57178172Simp#define	CACHEOP_R4K_HIT_WB		(6 << 2)	/* I, D, SD */
58178172Simp#define	CACHEOP_R4K_HIT_SET_VIRTUAL	(7 << 2)	/* SI, SD */
59178172Simp
60178172Simp#if !defined(LOCORE)
61178172Simp
62178172Simp/*
63178172Simp * cache_r4k_op_line:
64178172Simp *
65178172Simp *	Perform the specified cache operation on a single line.
66178172Simp */
67178172Simp#define	cache_op_r4k_line(va, op)					\
68178172Simpdo {									\
69178172Simp	__asm __volatile(						\
70178172Simp		".set noreorder					\n\t"	\
71178172Simp		"cache %1, 0(%0)				\n\t"	\
72178172Simp		".set reorder"						\
73178172Simp	    :								\
74178172Simp	    : "r" (va), "i" (op)					\
75178172Simp	    : "memory");						\
76178172Simp} while (/*CONSTCOND*/0)
77178172Simp
78178172Simp/*
79178172Simp * cache_r4k_op_8lines_16:
80178172Simp *
81178172Simp *	Perform the specified cache operation on 8 16-byte cache lines.
82178172Simp */
83178172Simp#define	cache_r4k_op_8lines_16(va, op)					\
84178172Simpdo {									\
85178172Simp	__asm __volatile(						\
86178172Simp		".set noreorder					\n\t"	\
87178172Simp		"cache %1, 0x00(%0); cache %1, 0x10(%0)		\n\t"	\
88178172Simp		"cache %1, 0x20(%0); cache %1, 0x30(%0)		\n\t"	\
89178172Simp		"cache %1, 0x40(%0); cache %1, 0x50(%0)		\n\t"	\
90178172Simp		"cache %1, 0x60(%0); cache %1, 0x70(%0)		\n\t"	\
91178172Simp		".set reorder"						\
92178172Simp	    :								\
93178172Simp	    : "r" (va), "i" (op)					\
94178172Simp	    : "memory");						\
95178172Simp} while (/*CONSTCOND*/0)
96178172Simp
97178172Simp/*
98178172Simp * cache_r4k_op_8lines_32:
99178172Simp *
100178172Simp *	Perform the specified cache operation on 8 32-byte cache lines.
101178172Simp */
102178172Simp#define	cache_r4k_op_8lines_32(va, op)					\
103178172Simpdo {									\
104178172Simp	__asm __volatile(						\
105178172Simp		".set noreorder					\n\t"	\
106178172Simp		"cache %1, 0x00(%0); cache %1, 0x20(%0)		\n\t"	\
107178172Simp		"cache %1, 0x40(%0); cache %1, 0x60(%0)		\n\t"	\
108178172Simp		"cache %1, 0x80(%0); cache %1, 0xa0(%0)		\n\t"	\
109178172Simp		"cache %1, 0xc0(%0); cache %1, 0xe0(%0)		\n\t"	\
110178172Simp		".set reorder"						\
111178172Simp	    :								\
112178172Simp	    : "r" (va), "i" (op)					\
113178172Simp	    : "memory");						\
114178172Simp} while (/*CONSTCOND*/0)
115178172Simp
116178172Simp/*
117280691Sbr * cache_r4k_op_8lines_64:
118280691Sbr *
119280691Sbr *	Perform the specified cache operation on 8 64-byte cache lines.
120280691Sbr */
121280691Sbr#define	cache_r4k_op_8lines_64(va, op)					\
122280691Sbrdo {									\
123280691Sbr	__asm __volatile(						\
124280691Sbr		".set noreorder					\n\t"	\
125280691Sbr		"cache %1, 0x000(%0); cache %1, 0x040(%0)	\n\t"	\
126280691Sbr		"cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n\t"	\
127280691Sbr		"cache %1, 0x100(%0); cache %1, 0x140(%0)	\n\t"	\
128280691Sbr		"cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n\t"	\
129280691Sbr		".set reorder"						\
130280691Sbr	    :								\
131280691Sbr	    : "r" (va), "i" (op)					\
132280691Sbr	    : "memory");						\
133280691Sbr} while (/*CONSTCOND*/0)
134280691Sbr
135280691Sbr/*
136178172Simp * cache_r4k_op_32lines_16:
137178172Simp *
138178172Simp *	Perform the specified cache operation on 32 16-byte
139178172Simp *	cache lines.
140178172Simp */
141178172Simp#define	cache_r4k_op_32lines_16(va, op)					\
142178172Simpdo {									\
143178172Simp	__asm __volatile(						\
144178172Simp		".set noreorder					\n\t"	\
145178172Simp		"cache %1, 0x000(%0); cache %1, 0x010(%0);	\n\t"	\
146178172Simp		"cache %1, 0x020(%0); cache %1, 0x030(%0);	\n\t"	\
147178172Simp		"cache %1, 0x040(%0); cache %1, 0x050(%0);	\n\t"	\
148178172Simp		"cache %1, 0x060(%0); cache %1, 0x070(%0);	\n\t"	\
149178172Simp		"cache %1, 0x080(%0); cache %1, 0x090(%0);	\n\t"	\
150178172Simp		"cache %1, 0x0a0(%0); cache %1, 0x0b0(%0);	\n\t"	\
151178172Simp		"cache %1, 0x0c0(%0); cache %1, 0x0d0(%0);	\n\t"	\
152178172Simp		"cache %1, 0x0e0(%0); cache %1, 0x0f0(%0);	\n\t"	\
153178172Simp		"cache %1, 0x100(%0); cache %1, 0x110(%0);	\n\t"	\
154178172Simp		"cache %1, 0x120(%0); cache %1, 0x130(%0);	\n\t"	\
155178172Simp		"cache %1, 0x140(%0); cache %1, 0x150(%0);	\n\t"	\
156178172Simp		"cache %1, 0x160(%0); cache %1, 0x170(%0);	\n\t"	\
157178172Simp		"cache %1, 0x180(%0); cache %1, 0x190(%0);	\n\t"	\
158178172Simp		"cache %1, 0x1a0(%0); cache %1, 0x1b0(%0);	\n\t"	\
159178172Simp		"cache %1, 0x1c0(%0); cache %1, 0x1d0(%0);	\n\t"	\
160178172Simp		"cache %1, 0x1e0(%0); cache %1, 0x1f0(%0);	\n\t"	\
161178172Simp		".set reorder"						\
162178172Simp	    :								\
163178172Simp	    : "r" (va), "i" (op)					\
164178172Simp	    : "memory");						\
165178172Simp} while (/*CONSTCOND*/0)
166178172Simp
167178172Simp/*
168178172Simp * cache_r4k_op_32lines_32:
169178172Simp *
170178172Simp *	Perform the specified cache operation on 32 32-byte
171178172Simp *	cache lines.
172178172Simp */
173178172Simp#define	cache_r4k_op_32lines_32(va, op)					\
174178172Simpdo {									\
175178172Simp	__asm __volatile(						\
176178172Simp		".set noreorder					\n\t"	\
177178172Simp		"cache %1, 0x000(%0); cache %1, 0x020(%0);	\n\t"	\
178178172Simp		"cache %1, 0x040(%0); cache %1, 0x060(%0);	\n\t"	\
179178172Simp		"cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\n\t"	\
180178172Simp		"cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\n\t"	\
181178172Simp		"cache %1, 0x100(%0); cache %1, 0x120(%0);	\n\t"	\
182178172Simp		"cache %1, 0x140(%0); cache %1, 0x160(%0);	\n\t"	\
183178172Simp		"cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\n\t"	\
184178172Simp		"cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\n\t"	\
185178172Simp		"cache %1, 0x200(%0); cache %1, 0x220(%0);	\n\t"	\
186178172Simp		"cache %1, 0x240(%0); cache %1, 0x260(%0);	\n\t"	\
187178172Simp		"cache %1, 0x280(%0); cache %1, 0x2a0(%0);	\n\t"	\
188178172Simp		"cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);	\n\t"	\
189178172Simp		"cache %1, 0x300(%0); cache %1, 0x320(%0);	\n\t"	\
190178172Simp		"cache %1, 0x340(%0); cache %1, 0x360(%0);	\n\t"	\
191178172Simp		"cache %1, 0x380(%0); cache %1, 0x3a0(%0);	\n\t"	\
192178172Simp		"cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);	\n\t"	\
193178172Simp		".set reorder"						\
194178172Simp	    :								\
195178172Simp	    : "r" (va), "i" (op)					\
196178172Simp	    : "memory");						\
197178172Simp} while (/*CONSTCOND*/0)
198178172Simp
199178172Simp/*
200280691Sbr * cache_r4k_op_32lines_64:
201280691Sbr *
202280691Sbr *	Perform the specified cache operation on 32 64-byte
203280691Sbr *	cache lines.
204280691Sbr */
205280691Sbr#define	cache_r4k_op_32lines_64(va, op)					\
206280691Sbrdo {									\
207280691Sbr	__asm __volatile(						\
208280691Sbr		".set noreorder					\n\t"	\
209280691Sbr		"cache %1, 0x000(%0); cache %1, 0x040(%0);	\n\t"	\
210280691Sbr		"cache %1, 0x080(%0); cache %1, 0x0c0(%0);	\n\t"	\
211280691Sbr		"cache %1, 0x100(%0); cache %1, 0x140(%0);	\n\t"	\
212280691Sbr		"cache %1, 0x180(%0); cache %1, 0x1c0(%0);	\n\t"	\
213280691Sbr		"cache %1, 0x200(%0); cache %1, 0x240(%0);	\n\t"	\
214280691Sbr		"cache %1, 0x280(%0); cache %1, 0x2c0(%0);	\n\t"	\
215280691Sbr		"cache %1, 0x300(%0); cache %1, 0x340(%0);	\n\t"	\
216280691Sbr		"cache %1, 0x380(%0); cache %1, 0x3c0(%0);	\n\t"	\
217280691Sbr		"cache %1, 0x400(%0); cache %1, 0x440(%0);	\n\t"	\
218280691Sbr		"cache %1, 0x480(%0); cache %1, 0x4c0(%0);	\n\t"	\
219280691Sbr		"cache %1, 0x500(%0); cache %1, 0x540(%0);	\n\t"	\
220280691Sbr		"cache %1, 0x580(%0); cache %1, 0x5c0(%0);	\n\t"	\
221280691Sbr		"cache %1, 0x600(%0); cache %1, 0x640(%0);	\n\t"	\
222280691Sbr		"cache %1, 0x680(%0); cache %1, 0x6c0(%0);	\n\t"	\
223280691Sbr		"cache %1, 0x700(%0); cache %1, 0x740(%0);	\n\t"	\
224280691Sbr		"cache %1, 0x780(%0); cache %1, 0x7c0(%0);	\n\t"	\
225280691Sbr		".set reorder"						\
226280691Sbr	    :								\
227280691Sbr	    : "r" (va), "i" (op)					\
228280691Sbr	    : "memory");						\
229280691Sbr} while (/*CONSTCOND*/0)
230280691Sbr
231280691Sbr/*
232178172Simp * cache_r4k_op_32lines_128:
233178172Simp *
234178172Simp *	Perform the specified cache operation on 32 128-byte
235178172Simp *	cache lines.
236178172Simp */
237178172Simp#define	cache_r4k_op_32lines_128(va, op)				\
238178172Simpdo {									\
239178172Simp	__asm __volatile(						\
240178172Simp		".set noreorder					\n\t"	\
241178172Simp		"cache %1, 0x0000(%0); cache %1, 0x0080(%0);	\n\t"	\
242178172Simp		"cache %1, 0x0100(%0); cache %1, 0x0180(%0);	\n\t"	\
243178172Simp		"cache %1, 0x0200(%0); cache %1, 0x0280(%0);	\n\t"	\
244178172Simp		"cache %1, 0x0300(%0); cache %1, 0x0380(%0);	\n\t"	\
245178172Simp		"cache %1, 0x0400(%0); cache %1, 0x0480(%0);	\n\t"	\
246178172Simp		"cache %1, 0x0500(%0); cache %1, 0x0580(%0);	\n\t"	\
247178172Simp		"cache %1, 0x0600(%0); cache %1, 0x0680(%0);	\n\t"	\
248178172Simp		"cache %1, 0x0700(%0); cache %1, 0x0780(%0);	\n\t"	\
249178172Simp		"cache %1, 0x0800(%0); cache %1, 0x0880(%0);	\n\t"	\
250178172Simp		"cache %1, 0x0900(%0); cache %1, 0x0980(%0);	\n\t"	\
251178172Simp		"cache %1, 0x0a00(%0); cache %1, 0x0a80(%0);	\n\t"	\
252178172Simp		"cache %1, 0x0b00(%0); cache %1, 0x0b80(%0);	\n\t"	\
253178172Simp		"cache %1, 0x0c00(%0); cache %1, 0x0c80(%0);	\n\t"	\
254178172Simp		"cache %1, 0x0d00(%0); cache %1, 0x0d80(%0);	\n\t"	\
255178172Simp		"cache %1, 0x0e00(%0); cache %1, 0x0e80(%0);	\n\t"	\
256178172Simp		"cache %1, 0x0f00(%0); cache %1, 0x0f80(%0);	\n\t"	\
257178172Simp		".set reorder"						\
258178172Simp	    :								\
259178172Simp	    : "r" (va), "i" (op)					\
260178172Simp	    : "memory");						\
261178172Simp} while (/*CONSTCOND*/0)
262178172Simp
263178172Simp/*
264178172Simp * cache_r4k_op_16lines_16_2way:
265178172Simp *
266178172Simp *	Perform the specified cache operation on 16 16-byte
267178172Simp * 	cache lines, 2-ways.
268178172Simp */
269178172Simp#define	cache_r4k_op_16lines_16_2way(va1, va2, op)			\
270178172Simpdo {									\
271178172Simp	__asm __volatile(						\
272178172Simp		".set noreorder					\n\t"	\
273178172Simp		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
274178172Simp		"cache %2, 0x010(%0); cache %2, 0x010(%1);	\n\t"	\
275178172Simp		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
276178172Simp		"cache %2, 0x030(%0); cache %2, 0x030(%1);	\n\t"	\
277178172Simp		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
278178172Simp		"cache %2, 0x050(%0); cache %2, 0x050(%1);	\n\t"	\
279178172Simp		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
280178172Simp		"cache %2, 0x070(%0); cache %2, 0x070(%1);	\n\t"	\
281178172Simp		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
282178172Simp		"cache %2, 0x090(%0); cache %2, 0x090(%1);	\n\t"	\
283178172Simp		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
284178172Simp		"cache %2, 0x0b0(%0); cache %2, 0x0b0(%1);	\n\t"	\
285178172Simp		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
286178172Simp		"cache %2, 0x0d0(%0); cache %2, 0x0d0(%1);	\n\t"	\
287178172Simp		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
288178172Simp		"cache %2, 0x0f0(%0); cache %2, 0x0f0(%1);	\n\t"	\
289178172Simp		".set reorder"						\
290178172Simp	    :								\
291178172Simp	    : "r" (va1), "r" (va2), "i" (op)				\
292178172Simp	    : "memory");						\
293178172Simp} while (/*CONSTCOND*/0)
294178172Simp
295178172Simp/*
296178172Simp * cache_r4k_op_16lines_32_2way:
297178172Simp *
298178172Simp *	Perform the specified cache operation on 16 32-byte
299178172Simp * 	cache lines, 2-ways.
300178172Simp */
301178172Simp#define	cache_r4k_op_16lines_32_2way(va1, va2, op)			\
302178172Simpdo {									\
303178172Simp	__asm __volatile(						\
304178172Simp		".set noreorder					\n\t"	\
305178172Simp		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
306178172Simp		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
307178172Simp		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
308178172Simp		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
309178172Simp		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
310178172Simp		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
311178172Simp		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
312178172Simp		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
313178172Simp		"cache %2, 0x100(%0); cache %2, 0x100(%1);	\n\t"	\
314178172Simp		"cache %2, 0x120(%0); cache %2, 0x120(%1);	\n\t"	\
315178172Simp		"cache %2, 0x140(%0); cache %2, 0x140(%1);	\n\t"	\
316178172Simp		"cache %2, 0x160(%0); cache %2, 0x160(%1);	\n\t"	\
317178172Simp		"cache %2, 0x180(%0); cache %2, 0x180(%1);	\n\t"	\
318178172Simp		"cache %2, 0x1a0(%0); cache %2, 0x1a0(%1);	\n\t"	\
319178172Simp		"cache %2, 0x1c0(%0); cache %2, 0x1c0(%1);	\n\t"	\
320178172Simp		"cache %2, 0x1e0(%0); cache %2, 0x1e0(%1);	\n\t"	\
321178172Simp		".set reorder"						\
322178172Simp	    :								\
323178172Simp	    : "r" (va1), "r" (va2), "i" (op)				\
324178172Simp	    : "memory");						\
325178172Simp} while (/*CONSTCOND*/0)
326178172Simp
327178172Simp/*
328178172Simp * cache_r4k_op_8lines_16_4way:
329178172Simp *
330178172Simp *	Perform the specified cache operation on 8 16-byte
331178172Simp * 	cache lines, 4-ways.
332178172Simp */
333178172Simp#define	cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op)		\
334178172Simpdo {									\
335178172Simp	__asm __volatile(						\
336178172Simp		".set noreorder					\n\t"	\
337178172Simp		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
338178172Simp		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
339178172Simp		"cache %4, 0x010(%0); cache %4, 0x010(%1);	\n\t"	\
340178172Simp		"cache %4, 0x010(%2); cache %4, 0x010(%3);	\n\t"	\
341178172Simp		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
342178172Simp		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
343178172Simp		"cache %4, 0x030(%0); cache %4, 0x030(%1);	\n\t"	\
344178172Simp		"cache %4, 0x030(%2); cache %4, 0x030(%3);	\n\t"	\
345178172Simp		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
346178172Simp		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
347178172Simp		"cache %4, 0x050(%0); cache %4, 0x050(%1);	\n\t"	\
348178172Simp		"cache %4, 0x050(%2); cache %4, 0x050(%3);	\n\t"	\
349178172Simp		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
350178172Simp		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
351178172Simp		"cache %4, 0x070(%0); cache %4, 0x070(%1);	\n\t"	\
352178172Simp		"cache %4, 0x070(%2); cache %4, 0x070(%3);	\n\t"	\
353178172Simp		".set reorder"						\
354178172Simp	    :								\
355178172Simp	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
356178172Simp	    : "memory");						\
357178172Simp} while (/*CONSTCOND*/0)
358178172Simp
359178172Simp/*
360178172Simp * cache_r4k_op_8lines_32_4way:
361178172Simp *
362178172Simp *	Perform the specified cache operation on 8 32-byte
363178172Simp * 	cache lines, 4-ways.
364178172Simp */
365178172Simp#define	cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op)		\
366178172Simpdo {									\
367178172Simp	__asm __volatile(						\
368178172Simp		".set noreorder					\n\t"	\
369178172Simp		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
370178172Simp		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
371178172Simp		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
372178172Simp		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
373178172Simp		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
374178172Simp		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
375178172Simp		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
376178172Simp		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
377178172Simp		"cache %4, 0x080(%0); cache %4, 0x080(%1);	\n\t"	\
378178172Simp		"cache %4, 0x080(%2); cache %4, 0x080(%3);	\n\t"	\
379178172Simp		"cache %4, 0x0a0(%0); cache %4, 0x0a0(%1);	\n\t"	\
380178172Simp		"cache %4, 0x0a0(%2); cache %4, 0x0a0(%3);	\n\t"	\
381178172Simp		"cache %4, 0x0c0(%0); cache %4, 0x0c0(%1);	\n\t"	\
382178172Simp		"cache %4, 0x0c0(%2); cache %4, 0x0c0(%3);	\n\t"	\
383178172Simp		"cache %4, 0x0e0(%0); cache %4, 0x0e0(%1);	\n\t"	\
384178172Simp		"cache %4, 0x0e0(%2); cache %4, 0x0e0(%3);	\n\t"	\
385178172Simp		".set reorder"						\
386178172Simp	    :								\
387178172Simp	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
388178172Simp	    : "memory");						\
389178172Simp} while (/*CONSTCOND*/0)
390178172Simp
391178172Simpvoid	r4k_icache_sync_all_16(void);
392178172Simpvoid	r4k_icache_sync_range_16(vm_paddr_t, vm_size_t);
393178172Simpvoid	r4k_icache_sync_range_index_16(vm_paddr_t, vm_size_t);
394178172Simp
395178172Simpvoid	r4k_icache_sync_all_32(void);
396178172Simpvoid	r4k_icache_sync_range_32(vm_paddr_t, vm_size_t);
397178172Simpvoid	r4k_icache_sync_range_index_32(vm_paddr_t, vm_size_t);
398178172Simp
399178172Simpvoid	r4k_pdcache_wbinv_all_16(void);
400178172Simpvoid	r4k_pdcache_wbinv_range_16(vm_paddr_t, vm_size_t);
401178172Simpvoid	r4k_pdcache_wbinv_range_index_16(vm_paddr_t, vm_size_t);
402178172Simp
403178172Simpvoid	r4k_pdcache_inv_range_16(vm_paddr_t, vm_size_t);
404178172Simpvoid	r4k_pdcache_wb_range_16(vm_paddr_t, vm_size_t);
405178172Simp
406178172Simpvoid	r4k_pdcache_wbinv_all_32(void);
407178172Simpvoid	r4k_pdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
408178172Simpvoid	r4k_pdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
409178172Simp
410178172Simpvoid	r4k_pdcache_inv_range_32(vm_paddr_t, vm_size_t);
411178172Simpvoid	r4k_pdcache_wb_range_32(vm_paddr_t, vm_size_t);
412178172Simp
413178172Simpvoid	r4k_sdcache_wbinv_all_32(void);
414178172Simpvoid	r4k_sdcache_wbinv_range_32(vm_paddr_t, vm_size_t);
415178172Simpvoid	r4k_sdcache_wbinv_range_index_32(vm_paddr_t, vm_size_t);
416178172Simp
417178172Simpvoid	r4k_sdcache_inv_range_32(vm_paddr_t, vm_size_t);
418178172Simpvoid	r4k_sdcache_wb_range_32(vm_paddr_t, vm_size_t);
419178172Simp
420178172Simpvoid	r4k_sdcache_wbinv_all_128(void);
421178172Simpvoid	r4k_sdcache_wbinv_range_128(vm_paddr_t, vm_size_t);
422178172Simpvoid	r4k_sdcache_wbinv_range_index_128(vm_paddr_t, vm_size_t);
423178172Simp
424178172Simpvoid	r4k_sdcache_inv_range_128(vm_paddr_t, vm_size_t);
425178172Simpvoid	r4k_sdcache_wb_range_128(vm_paddr_t, vm_size_t);
426178172Simp
427178172Simpvoid	r4k_sdcache_wbinv_all_generic(void);
428178172Simpvoid	r4k_sdcache_wbinv_range_generic(vm_paddr_t, vm_size_t);
429178172Simpvoid	r4k_sdcache_wbinv_range_index_generic(vm_paddr_t, vm_size_t);
430178172Simp
431178172Simpvoid	r4k_sdcache_inv_range_generic(vm_paddr_t, vm_size_t);
432178172Simpvoid	r4k_sdcache_wb_range_generic(vm_paddr_t, vm_size_t);
433178172Simp
434178172Simp#endif /* !LOCORE */
435