1/*	$NetBSD: cache.h,v 1.16 2020/07/27 10:59:10 skrll Exp $	*/
2
3/*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed for the NetBSD Project by
20 *	Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#ifndef _MIPS_CACHE_H_
39#define	_MIPS_CACHE_H_
40
41/*
42 * Cache operations.
43 *
44 * We define the following primitives:
45 *
46 * --- Instruction cache synchronization (mandatory):
47 *
48 *	icache_sync_all		Synchronize I-cache
49 *
50 *	icache_sync_range	Synchronize I-cache range
51 *
52 *	icache_sync_range_index	(index ops)
53 *
54 * --- Primary data cache (mandatory):
55 *
56 *	pdcache_wbinv_all	Write-back Invalidate primary D-cache
57 *
58 *	pdcache_wbinv_range	Write-back Invalidate primary D-cache range
59 *
60 *	pdcache_wbinv_range_index (index ops)
61 *
62 *	pdcache_inv_range	Invalidate primary D-cache range
63 *
64 *	pdcache_wb_range	Write-back primary D-cache range
65 *
66 * --- Secondary data cache (optional):
67 *
68 *	sdcache_wbinv_all	Write-back Invalidate secondary D-cache
69 *
70 *	sdcache_wbinv_range	Write-back Invalidate secondary D-cache range
71 *
72 *	sdcache_wbinv_range_index (index ops)
73 *
74 *	sdcache_inv_range	Invalidate secondary D-cache range
75 *
76 *	sdcache_wb_range	Write-back secondary D-cache range
77 *
78 * There are some rules that must be followed:
79 *
80 *	I-cache Synch (all or range):
81 *		The goal is to synchronize the instruction stream,
82 *		so you may need to write-back dirty data cache
83 *		blocks first.  If a range is requested, and you
84 *		can't synchronize just a range, you have to hit
85 *		the whole thing.
86 *
87 *	D-cache Write-back Invalidate range:
88 *		If you can't WB-Inv a range, you must WB-Inv the
89 *		entire D-cache.
90 *
91 *	D-cache Invalidate:
92 *		If you can't Inv the D-cache without doing a
93 *		Write-back, YOU MUST PANIC.  This is to catch
94 *		errors in calling code.  Callers must be aware
95 *		of this scenario, and must handle it appropriately
96 *		(consider the bus_dma(9) operations).
97 *
98 *	D-cache Write-back:
99 *		If you can't Write-back without doing an invalidate,
100 *		that's fine.  Then treat this as a WB-Inv.  Skipping
101 *		the invalidate is merely an optimization.
102 *
103 *	All operations:
104 *		Valid virtual addresses must be passed to the
105 *		cache operation.
106 *
107 * Finally, these primitives are grouped together in reasonable
108 * ways.  For all operations described here, first the primary
109 * cache is frobbed, then the secondary cache frobbed, if the
110 * operation for the secondary cache exists.
111 *
112 *	mips_icache_sync_all	Synchronize I-cache
113 *
114 *	mips_icache_sync_range	Synchronize I-cache range
115 *
116 *	mips_icache_sync_range_index (index ops)
117 *
118 *	mips_dcache_wbinv_all	Write-back Invalidate D-cache
119 *
120 *	mips_dcache_wbinv_range	Write-back Invalidate D-cache range
121 *
122 *	mips_dcache_wbinv_range_index (index ops)
123 *
124 *	mips_dcache_inv_range	Invalidate D-cache range
125 *
126 *	mips_dcache_wb_range	Write-back D-cache range
127 */
128
129struct mips_cache_ops {
130	void	(*mco_icache_sync_all)(void);
131	void	(*mco_icache_sync_range)(register_t, vsize_t);
132	void	(*mco_icache_sync_range_index)(vaddr_t, vsize_t);
133
134	void	(*mco_pdcache_wbinv_all)(void);
135	void	(*mco_pdcache_wbinv_range)(register_t, vsize_t);
136	void	(*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
137	void	(*mco_pdcache_inv_range)(register_t, vsize_t);
138	void	(*mco_pdcache_wb_range)(register_t, vsize_t);
139
140	/* These are called only by the (mipsNN) icache functions. */
141	void	(*mco_intern_icache_sync_range_index)(vaddr_t, vsize_t);
142	void	(*mco_intern_icache_sync_range)(register_t, vsize_t);
143	void	(*mco_intern_pdcache_sync_all)(void);
144	void	(*mco_intern_pdcache_sync_range_index)(vaddr_t, vsize_t);
145	void	(*mco_intern_pdcache_sync_range)(register_t, vsize_t);
146	/* This is used internally by the (mipsNN) pdcache functions. */
147	void	(*mco_intern_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
148
149	void	(*mco_sdcache_wbinv_all)(void);
150	void	(*mco_sdcache_wbinv_range)(register_t, vsize_t);
151	void	(*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
152	void	(*mco_sdcache_inv_range)(register_t, vsize_t);
153	void	(*mco_sdcache_wb_range)(register_t, vsize_t);
154
155	/* These are called only by the (mipsNN) icache functions. */
156	void	(*mco_intern_sdcache_sync_all)(void);
157	void	(*mco_intern_sdcache_sync_range_index)(vaddr_t, vsize_t);
158	void	(*mco_intern_sdcache_sync_range)(register_t, vsize_t);
159
160	/* This is used internally by the (mipsNN) sdcache functions. */
161	void	(*mco_intern_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
162};
163
164extern struct mips_cache_ops mips_cache_ops;
165
166/* PRIMARY CACHE VARIABLES */
167struct mips_cache_info {
168	u_int mci_picache_size;
169	u_int mci_picache_line_size;
170	u_int mci_picache_ways;
171	u_int mci_picache_way_size;
172	u_int mci_picache_way_mask;
173	bool mci_picache_vivt;		/* virtually indexed and tagged */
174
175	u_int mci_pdcache_size;		/* and unified */
176	u_int mci_pdcache_line_size;
177	u_int mci_pdcache_ways;
178	u_int mci_pdcache_way_size;
179	u_int mci_pdcache_way_mask;
180	bool mci_pdcache_write_through;
181
182	bool mci_pcache_unified;
183
184	/* SECONDARY CACHE VARIABLES */
185	u_int mci_sicache_size;
186	u_int mci_sicache_line_size;
187	u_int mci_sicache_ways;
188	u_int mci_sicache_way_size;
189	u_int mci_sicache_way_mask;
190
191	u_int mci_sdcache_size;		/* and unified */
192	u_int mci_sdcache_line_size;
193	u_int mci_sdcache_ways;
194	u_int mci_sdcache_way_size;
195	u_int mci_sdcache_way_mask;
196	bool mci_sdcache_write_through;
197
198	bool mci_scache_unified;
199
200	/* TERTIARY CACHE VARIABLES */
201	u_int mci_tcache_size;		/* always unified */
202	u_int mci_tcache_line_size;
203	u_int mci_tcache_ways;
204	u_int mci_tcache_way_size;
205	u_int mci_tcache_way_mask;
206	bool mci_tcache_write_through;
207
208	/*
209	 * These two variables inform the rest of the kernel about the
210	 * size of the largest D-cache line present in the system.  The
211	 * mask can be used to determine if a region of memory is cache
212	 * line size aligned.
213	 *
214	 * Whenever any code updates a data cache line size, it should
215	 * call mips_dcache_compute_align() to recompute these values.
216	 */
217	u_int mci_dcache_align;
218	u_int mci_dcache_align_mask;
219
220	u_int mci_cache_prefer_mask;
221	u_int mci_cache_alias_mask;
222	u_int mci_icache_alias_mask;
223
224	bool mci_cache_virtual_alias;
225	bool mci_icache_virtual_alias;
226};
227
228
229#if (MIPS1 + MIPS64_RMIXL + MIPS64R2_RMIXL + MIPS64_OCTEON) > 0 && \
230    (MIPS3 + MIPS4) == 0 \
231     && !defined(_MODULE)
232#define	MIPS_CACHE_ALIAS_MASK		0
233#define	MIPS_CACHE_VIRTUAL_ALIAS	false
234#else
235#define	MIPS_CACHE_ALIAS_MASK		mips_cache_info.mci_cache_alias_mask
236#define	MIPS_CACHE_VIRTUAL_ALIAS	mips_cache_info.mci_cache_virtual_alias
237#endif
238#if (MIPS1 + MIPS64_RMIXL + MIPS64_OCTEON) > 0 && \
239    (MIPS3 + MIPS4) == 0 \
240    && !defined(_MODULE)
241#define	MIPS_ICACHE_ALIAS_MASK		0
242#define	MIPS_ICACHE_VIRTUAL_ALIAS	false
243#else
244#define	MIPS_ICACHE_ALIAS_MASK		mips_cache_info.mci_icache_alias_mask
245#define	MIPS_ICACHE_VIRTUAL_ALIAS	mips_cache_info.mci_icache_virtual_alias
246#endif
247
248extern struct mips_cache_info mips_cache_info;
249
250
251/*
252 * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
253 */
254#define	mips_cache_indexof(x)	(((vaddr_t)(x)) & MIPS_CACHE_ALIAS_MASK)
255#define	mips_cache_badalias(x,y) (((vaddr_t)(x)^(vaddr_t)(y)) & MIPS_CACHE_ALIAS_MASK)
256
257#define	__mco_noargs(prefix, x)						\
258do {									\
259	(*mips_cache_ops.mco_ ## prefix ## p ## x )();			\
260	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
261		(*mips_cache_ops.mco_ ## prefix ## s ## x )();		\
262} while (/*CONSTCOND*/0)
263
264#define	__mco_2args(prefix, x, a, b)					\
265do {									\
266	(*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b));		\
267	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
268		(*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b));	\
269} while (/*CONSTCOND*/0)
270
271#define	mips_icache_sync_all()						\
272	(*mips_cache_ops.mco_icache_sync_all)()
273
274#define	mips_icache_sync_range(v, s)					\
275	(*mips_cache_ops.mco_icache_sync_range)((v), (s))
276
277#define	mips_icache_sync_range_index(v, s)				\
278	(*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
279
280#define	mips_dcache_wbinv_all()						\
281	__mco_noargs(, dcache_wbinv_all)
282
283#define	mips_dcache_wbinv_range(v, s)					\
284	__mco_2args(, dcache_wbinv_range, (v), (s))
285
286#define	mips_dcache_wbinv_range_index(v, s)				\
287	__mco_2args(, dcache_wbinv_range_index, (v), (s))
288
289#define	mips_dcache_inv_range(v, s)					\
290	__mco_2args(, dcache_inv_range, (v), (s))
291
292#define	mips_dcache_wb_range(v, s)					\
293	__mco_2args(, dcache_wb_range, (v), (s))
294
295
296/*
297 * Private D-cache functions only called from (currently only the
298 * mipsNN) I-cache functions.
299 */
300#define	mips_intern_dcache_sync_all()					\
301	__mco_noargs(intern_, dcache_sync_all)
302
303#define	mips_intern_dcache_sync_range_index(v, s)			\
304	__mco_2args(intern_, dcache_sync_range_index, (v), (s))
305
306#define	mips_intern_dcache_sync_range(v, s)				\
307	__mco_2args(intern_, dcache_sync_range, (v), (s))
308
309#define	mips_intern_pdcache_wbinv_range_index(v, s)			\
310	(*mips_cache_ops.mco_intern_pdcache_wbinv_range_index)((v), (s))
311
312#define	mips_intern_sdcache_wbinv_range_index(v, s)			\
313	(*mips_cache_ops.mco_intern_sdcache_wbinv_range_index)((v), (s))
314
315#define	mips_intern_icache_sync_range(v, s)				\
316	(*mips_cache_ops.mco_intern_icache_sync_range)((v), (s))
317
318#define	mips_intern_icache_sync_range_index(v, s)			\
319	(*mips_cache_ops.mco_intern_icache_sync_range_index)((v), (s))
320
321void	mips_config_cache(void);
322void	mips_dcache_compute_align(void);
323
324#include <mips/cache_mipsNN.h>
325
326#endif /* _MIPS_CACHE_H_ */
327