cpu.h revision 1.133
1/*	$OpenBSD: cpu.h,v 1.133 2021/05/28 16:33:36 visa Exp $	*/
2
3/*-
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell and Rick Macklem.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	Copyright (C) 1989 Digital Equipment Corporation.
35 *	Permission to use, copy, modify, and distribute this software and
36 *	its documentation for any purpose and without fee is hereby granted,
37 *	provided that the above copyright notice appears in all copies.
38 *	Digital Equipment Corporation makes no representations about the
39 *	suitability of this software for any purpose.  It is provided "as is"
40 *	without express or implied warranty.
41 *
42 *	from: @(#)cpu.h	8.4 (Berkeley) 1/4/94
43 */
44
45#ifndef _MIPS64_CPU_H_
46#define	_MIPS64_CPU_H_
47
48#ifndef _LOCORE
49
50/*
51 * MIPS32-style segment definitions.
52 * They only cover the first 512MB of physical addresses.
53 */
54#define	CKSEG0_BASE		0xffffffff80000000UL
55#define	CKSEG1_BASE		0xffffffffa0000000UL
56#define	CKSSEG_BASE		0xffffffffc0000000UL
57#define	CKSEG3_BASE		0xffffffffe0000000UL
58#define	CKSEG_SIZE		0x0000000020000000UL
59
60#define	CKSEG0_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
61#define	CKSEG1_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
62#define	PHYS_TO_CKSEG0(x)	((u_long)(x) | CKSEG0_BASE)
63#define	PHYS_TO_CKSEG1(x)	((u_long)(x) | CKSEG1_BASE)
64
65/*
66 * MIPS64-style segment definitions.
67 * These allow for 36 bits of addressable physical memory, thus 64GB.
68 */
69
70/*
71 * Cache Coherency Attributes.
72 */
73/* r8k only */
74#define	CCA_NC_COPROCESSOR	0UL	/* uncached, coprocessor ordered */
75/* common to r4, r5k, r8k and r1xk */
76#define	CCA_NC			2UL	/* uncached, write-around */
77#define	CCA_NONCOHERENT		3UL	/* cached, non-coherent, write-back */
78/* r8k, r1xk only */
79#define	CCA_COHERENT_EXCL	4UL	/* cached, coherent, exclusive */
80#define	CCA_COHERENT_EXCLWRITE	5UL	/* cached, coherent, exclusive write */
81/* r4k only */
82#define	CCA_COHERENT_UPDWRITE	6UL	/* cached, coherent, update on write */
83/* r1xk only */
84#define	CCA_NC_ACCELERATED	7UL	/* uncached accelerated */
85
86#ifdef TGT_COHERENT
87#define	CCA_CACHED		CCA_COHERENT_EXCLWRITE
88#else
89#define	CCA_CACHED		CCA_NONCOHERENT
90#endif
91
92#define	XKSSSEG_BASE		0x4000000000000000UL
93#define	XKPHYS_BASE		0x8000000000000000UL
94#define	XKSSEG_BASE		0xc000000000000000UL
95
96#define	XKPHYS_TO_PHYS(x)	((paddr_t)(x) & 0x0000000fffffffffUL)
97#define	PHYS_TO_XKPHYS(x,c)	((paddr_t)(x) | XKPHYS_BASE | ((c) << 59))
98#define	IS_XKPHYS(va)		(((va) >> 62) == 2)
99#define	XKPHYS_TO_CCA(x)	(((x) >> 59) & 0x07)
100
101#endif	/* _LOCORE */
102
103/*
104 * Exported definitions unique to mips cpu support.
105 */
106
107#if defined(_KERNEL) && !defined(_LOCORE)
108
109#include <sys/device.h>
110#include <machine/intr.h>
111#include <sys/sched.h>
112#include <sys/srp.h>
113
114struct cpu_hwinfo {
115	uint32_t	c0prid;
116	uint32_t	c1prid;
117	uint32_t	clock;	/* Hz */
118	uint32_t	tlbsize;
119	uint		type;
120	uint32_t	l2size;
121};
122
123/*
124 * Cache memory configuration. One struct per cache.
125 */
126struct cache_info {
127	uint		size;		/* total cache size */
128	uint		linesize;	/* line size */
129	uint		setsize;	/* set size */
130	uint		sets;		/* number of sets */
131};
132
133struct cpu_info {
134	struct device	*ci_dev;	/* our device */
135	struct cpu_info	*ci_self;	/* pointer to this structure */
136	struct cpu_info	*ci_next;	/* next cpu */
137	struct proc	*ci_curproc;
138	struct user	*ci_curprocpaddr;
139	struct proc	*ci_fpuproc;	/* pointer to last proc to use FP */
140	uint32_t	 ci_delayconst;
141	struct cpu_hwinfo
142			ci_hw;
143
144#if defined(MULTIPROCESSOR)
145	struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
146#endif
147
148	/* cache information and pending flush state */
149	uint		ci_cacheconfiguration;
150	uint64_t	ci_cachepending_l1i;
151	struct cache_info
152			ci_l1inst,
153			ci_l1data,
154			ci_l2,
155			ci_l3;
156
157	/* function pointers for the cache handling routines */
158	void		(*ci_SyncCache)(struct cpu_info *);
159	void		(*ci_InvalidateICache)(struct cpu_info *, vaddr_t,
160			    size_t);
161	void		(*ci_InvalidateICachePage)(struct cpu_info *, vaddr_t);
162	void		(*ci_SyncICache)(struct cpu_info *);
163	void		(*ci_SyncDCachePage)(struct cpu_info *, vaddr_t,
164			    paddr_t);
165	void		(*ci_HitSyncDCachePage)(struct cpu_info *, vaddr_t,
166			    paddr_t);
167	void		(*ci_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t);
168	void		(*ci_HitInvalidateDCache)(struct cpu_info *, vaddr_t,
169			    size_t);
170	void		(*ci_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t,
171			    int);
172
173	struct schedstate_percpu
174			ci_schedstate;
175	int		ci_want_resched;	/* need_resched() invoked */
176	cpuid_t		ci_cpuid;		/* our CPU ID */
177	uint32_t	ci_randseed;		/* per cpu random seed */
178	volatile int	ci_ipl;			/* software IPL */
179	uint32_t	ci_softpending;		/* pending soft interrupts */
180	int		ci_clock_started;
181	u_int32_t	ci_cpu_counter_last;	/* last compare value loaded */
182	u_int32_t	ci_cpu_counter_interval; /* # of counter ticks/tick */
183
184	u_int32_t	ci_pendingticks;
185
186	struct pmap	*ci_curpmap;
187	uint		ci_intrdepth;		/* interrupt depth */
188#ifdef MULTIPROCESSOR
189	u_long		ci_flags;		/* flags; see below */
190#endif
191	volatile int    ci_ddb;
192#define	CI_DDB_RUNNING		0
193#define	CI_DDB_SHOULDSTOP	1
194#define	CI_DDB_STOPPED		2
195#define	CI_DDB_ENTERDDB		3
196#define	CI_DDB_INDDB		4
197
198#ifdef DIAGNOSTIC
199	int	ci_mutex_level;
200#endif
201#ifdef GPROF
202	struct gmonparam *ci_gmon;
203#endif
204};
205
206#define	CPUF_PRIMARY	0x01		/* CPU is primary CPU */
207#define	CPUF_PRESENT	0x02		/* CPU is present */
208#define	CPUF_RUNNING	0x04		/* CPU is running */
209
210extern struct cpu_info cpu_info_primary;
211extern struct cpu_info *cpu_info_list;
212#define CPU_INFO_ITERATOR		int
213#define	CPU_INFO_FOREACH(cii, ci)	for (cii = 0, ci = cpu_info_list; \
214					    ci != NULL; ci = ci->ci_next)
215
216#define CPU_INFO_UNIT(ci)               ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
217
218extern void (*cpu_idle_cycle_func)(void);
219#define cpu_idle_cycle()		(*cpu_idle_cycle_func)()
220
221#ifdef MULTIPROCESSOR
222#define getcurcpu()			hw_getcurcpu()
223#define setcurcpu(ci)			hw_setcurcpu(ci)
224extern struct cpu_info *get_cpu_info(int);
225#define curcpu() getcurcpu()
226#define	CPU_IS_PRIMARY(ci)		((ci)->ci_flags & CPUF_PRIMARY)
227#define cpu_number()			(curcpu()->ci_cpuid)
228
229extern struct cpuset cpus_running;
230void cpu_unidle(struct cpu_info *);
231void cpu_boot_secondary_processors(void);
232#define cpu_boot_secondary(ci)          hw_cpu_boot_secondary(ci)
233#define cpu_hatch(ci)                   hw_cpu_hatch(ci)
234
235vaddr_t alloc_contiguous_pages(size_t);
236
237#define MIPS64_IPI_NOP		0x00000001
238#define MIPS64_IPI_RENDEZVOUS	0x00000002
239#define MIPS64_IPI_DDB		0x00000004
240#define MIPS64_NIPIS		3	/* must not exceed 32 */
241
242void	mips64_ipi_init(void);
243void	mips64_send_ipi(unsigned int, unsigned int);
244void	smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg);
245
246#include <sys/mplock.h>
247#else
248#define MAXCPUS				1
249#define curcpu()			(&cpu_info_primary)
250#define	CPU_IS_PRIMARY(ci)		1
251#define cpu_number()			0UL
252#define cpu_unidle(ci)
253#define get_cpu_info(i)			(&cpu_info_primary)
254#endif
255
256#define CPU_BUSY_CYCLE()	do {} while (0)
257
258extern void (*md_startclock)(struct cpu_info *);
259void	cp0_calibrate(struct cpu_info *);
260
261unsigned int cpu_rnd_messybits(void);
262
263#include <machine/frame.h>
264
265/*
266 * Arguments to hardclock encapsulate the previous machine state in
267 * an opaque clockframe.
268 */
269#define	clockframe trapframe	/* Use normal trap frame */
270
271#define	SR_KSU_USER		0x00000010
272#define	CLKF_USERMODE(framep)	((framep)->sr & SR_KSU_USER)
273#define	CLKF_PC(framep)		((framep)->pc)
274#define	CLKF_INTR(framep)	(curcpu()->ci_intrdepth > 1)	/* XXX */
275
276/*
277 * This is used during profiling to integrate system time.
278 */
279#define	PROC_PC(p)	((p)->p_md.md_regs->pc)
280#define	PROC_STACK(p)	((p)->p_md.md_regs->sp)
281
282/*
283 * Preempt the current process if in interrupt from user mode,
284 * or after the current trap/syscall if in system mode.
285 */
286void	need_resched(struct cpu_info *);
287#define	clear_resched(ci) 	(ci)->ci_want_resched = 0
288
289/*
290 * Give a profiling tick to the current process when the user profiling
291 * buffer pages are invalid.  On MIPS designs, request an ast to send us
292 * through trap, marking the proc as needing a profiling tick.
293 */
294#define	need_proftick(p)	aston(p)
295
296/*
297 * Notify the current process (p) that it has a signal pending,
298 * process as soon as possible.
299 */
300void	signotify(struct proc *);
301
302#define	aston(p)		((p)->p_md.md_astpending = 1)
303
304#define	mips_sync()		__asm__ volatile ("sync" ::: "memory")
305
306#endif /* _KERNEL && !_LOCORE */
307
308#ifdef _KERNEL
309/*
310 * Values for the code field in a break instruction.
311 */
312#define	BREAK_INSTR		0x0000000d
313#define	BREAK_VAL_MASK		0x03ff0000
314#define	BREAK_VAL_SHIFT		16
315#define	BREAK_KDB_VAL		512
316#define	BREAK_SSTEP_VAL		513
317#define	BREAK_BRKPT_VAL		514
318#define	BREAK_SOVER_VAL		515
319#define	BREAK_DDB_VAL		516
320#define	BREAK_FPUEMUL_VAL	517
321#define	BREAK_KDB	(BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT))
322#define	BREAK_SSTEP	(BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT))
323#define	BREAK_BRKPT	(BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT))
324#define	BREAK_SOVER	(BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT))
325#define	BREAK_DDB	(BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT))
326#define	BREAK_FPUEMUL	(BREAK_INSTR | (BREAK_FPUEMUL_VAL << BREAK_VAL_SHIFT))
327
328#endif /* _KERNEL */
329
330/*
331 * CTL_MACHDEP definitions.
332 */
333#define	CPU_ALLOWAPERTURE	1	/* allow mmap of /dev/xf86 */
334		/*		2	   formerly: keyboard reset */
335		/*		3	   formerly: CPU_LIDSUSPEND */
336#define CPU_LIDACTION		4	/* action caused by lid close */
337#define	CPU_MAXID		5	/* number of valid machdep ids */
338
339#define	CTL_MACHDEP_NAMES {			\
340	{ 0, 0 },				\
341	{ "allowaperture", CTLTYPE_INT },	\
342	{ 0, 0 },				\
343	{ 0, 0 },				\
344	{ "lidaction", CTLTYPE_INT },		\
345}
346
347/*
348 * MIPS CPU types (cp_imp).
349 */
350#define	MIPS_R2000	0x01	/* MIPS R2000 CPU		ISA I   */
351#define	MIPS_R3000	0x02	/* MIPS R3000 CPU		ISA I   */
352#define	MIPS_R6000	0x03	/* MIPS R6000 CPU		ISA II	*/
353#define	MIPS_R4000	0x04	/* MIPS R4000/4400 CPU		ISA III	*/
354#define	MIPS_R3LSI	0x05	/* LSI Logic R3000 derivate	ISA I	*/
355#define	MIPS_R6000A	0x06	/* MIPS R6000A CPU		ISA II	*/
356#define	MIPS_CN50XX	0x06	/* Cavium OCTEON CN50xx		MIPS64R2*/
357#define	MIPS_R3IDT	0x07	/* IDT R3000 derivate		ISA I	*/
358#define	MIPS_R10000	0x09	/* MIPS R10000/T5 CPU		ISA IV  */
359#define	MIPS_R4200	0x0a	/* MIPS R4200 CPU (ICE)		ISA III */
360#define	MIPS_R4300	0x0b	/* NEC VR4300 CPU		ISA III */
361#define	MIPS_R4100	0x0c	/* NEC VR41xx CPU MIPS-16	ISA III */
362#define	MIPS_R12000	0x0e	/* MIPS R12000			ISA IV  */
363#define	MIPS_R14000	0x0f	/* MIPS R14000			ISA IV  */
364#define	MIPS_R8000	0x10	/* MIPS R8000 Blackbird/TFP	ISA IV  */
365#define	MIPS_R4600	0x20	/* PMCS R4600 Orion		ISA III */
366#define	MIPS_R4700	0x21	/* PMCS R4700 Orion		ISA III */
367#define	MIPS_R3TOSH	0x22	/* Toshiba R3000 based CPU	ISA I	*/
368#define	MIPS_R5000	0x23	/* MIPS R5000 CPU		ISA IV  */
369#define	MIPS_RM7000	0x27	/* PMCS RM7000 CPU		ISA IV  */
370#define	MIPS_RM52X0	0x28	/* PMCS RM52X0 CPU		ISA IV  */
371#define	MIPS_RM9000	0x34	/* PMCS RM9000 CPU		ISA IV  */
372#define	MIPS_LOONGSON	0x42	/* STC LoongSon CPU		ISA III */
373#define	MIPS_VR5400	0x54	/* NEC Vr5400 CPU		ISA IV+ */
374#define	MIPS_LOONGSON2	0x63	/* STC LoongSon2/3 CPU		ISA III+ */
375#define	MIPS_CN63XX	0x90	/* Cavium OCTEON II CN6[23]xx	MIPS64R2 */
376#define	MIPS_CN68XX	0x91	/* Cavium OCTEON II CN68xx	MIPS64R2 */
377#define	MIPS_CN66XX	0x92	/* Cavium OCTEON II CN66xx	MIPS64R2 */
378#define	MIPS_CN61XX	0x93	/* Cavium OCTEON II CN6[01]xx	MIPS64R2 */
379#define	MIPS_CN78XX	0x95	/* Cavium OCTEON III CN7[678]xx	MIPS64R2 */
380#define	MIPS_CN71XX	0x96	/* Cavium OCTEON III CN7[01]xx	MIPS64R2 */
381#define	MIPS_CN73XX	0x97	/* Cavium OCTEON III CN7[23]xx	MIPS64R2 */
382
383/*
384 * MIPS FPU types. Only soft, rest is the same as cpu type.
385 */
386#define	MIPS_SOFT	0x00	/* Software emulation		ISA I   */
387
388
389#if defined(_KERNEL) && !defined(_LOCORE)
390
391extern register_t protosr;
392extern int cpu_has_synced_cp0_count;
393extern int cpu_has_userlocal;
394
395#ifdef FPUEMUL
396#define	CPU_HAS_FPU(ci)	((ci)->ci_hw.c1prid != 0)
397#else
398#define	CPU_HAS_FPU(ci)	1
399#endif
400
401struct exec_package;
402struct user;
403
404void	tlb_asid_wrap(struct cpu_info *);
405void	tlb_flush(int);
406void	tlb_flush_addr(vaddr_t);
407void	tlb_init(unsigned int);
408int64_t	tlb_probe(vaddr_t);
409void	tlb_set_page_mask(uint32_t);
410void	tlb_set_pid(u_int);
411void	tlb_set_wired(uint32_t);
412int	tlb_update(vaddr_t, register_t);
413void	tlb_update_indexed(vaddr_t, register_t, register_t, uint);
414
415void	build_trampoline(vaddr_t, vaddr_t);
416void	cpu_switchto_asm(struct proc *, struct proc *);
417int	exec_md_map(struct proc *, struct exec_package *);
418void	savectx(struct user *, int);
419
420void	enable_fpu(struct proc *);
421void	save_fpu(void);
422int	fpe_branch_emulate(struct proc *, struct trapframe *, uint32_t,
423	    vaddr_t);
424void	MipsSaveCurFPState(struct proc *);
425void	MipsSaveCurFPState16(struct proc *);
426void	MipsSwitchFPState(struct proc *, struct trapframe *);
427void	MipsSwitchFPState16(struct proc *, struct trapframe *);
428
429int	guarded_read_1(paddr_t, uint8_t *);
430int	guarded_read_2(paddr_t, uint16_t *);
431int	guarded_read_4(paddr_t, uint32_t *);
432int	guarded_write_4(paddr_t, uint32_t);
433
434void	MipsFPTrap(struct trapframe *);
435register_t MipsEmulateBranch(struct trapframe *, vaddr_t, uint32_t, uint32_t);
436
437int	classify_insn(uint32_t);
438#define	INSNCLASS_NEUTRAL	0
439#define	INSNCLASS_CALL		1
440#define	INSNCLASS_BRANCH	2
441
442/*
443 * Low level access routines to CPU registers
444 */
445
446void	setsoftintr0(void);
447void	clearsoftintr0(void);
448void	setsoftintr1(void);
449void	clearsoftintr1(void);
450register_t enableintr(void);
451register_t disableintr(void);
452register_t getsr(void);
453register_t setsr(register_t);
454
455u_int	cp0_get_count(void);
456register_t cp0_get_config(void);
457uint32_t cp0_get_config_1(void);
458uint32_t cp0_get_config_2(void);
459uint32_t cp0_get_config_3(void);
460uint32_t cp0_get_config_4(void);
461uint32_t cp0_get_pagegrain(void);
462register_t cp0_get_prid(void);
463void	cp0_reset_cause(register_t);
464void	cp0_set_compare(u_int);
465void	cp0_set_config(register_t);
466void	cp0_set_pagegrain(uint32_t);
467void	cp0_set_trapbase(register_t);
468u_int	cp1_get_prid(void);
469
470static inline uint32_t
471cp0_get_hwrena(void)
472{
473	uint32_t value;
474	__asm__ volatile ("mfc0 %0, $7" : "=r" (value));
475	return value;
476}
477
478static inline void
479cp0_set_hwrena(uint32_t value)
480{
481	__asm__ volatile ("mtc0 %0, $7" : : "r" (value));
482}
483
484static inline void
485cp0_set_userlocal(void *value)
486{
487	__asm__ volatile (
488	"	.set	push\n"
489	"	.set	mips64r2\n"
490	"	dmtc0	%0, $4, 2\n"
491	"	.set	pop\n"
492	: : "r" (value));
493}
494
495static inline u_long
496intr_disable(void)
497{
498	return disableintr();
499}
500
501static inline void
502intr_restore(u_long sr)
503{
504	setsr(sr);
505}
506
507/*
508 * Cache routines (may be overridden)
509 */
510
511#ifndef	Mips_SyncCache
512#define	Mips_SyncCache(ci) \
513	((ci)->ci_SyncCache)(ci)
514#endif
515#ifndef	Mips_InvalidateICache
516#define	Mips_InvalidateICache(ci, va, l) \
517	((ci)->ci_InvalidateICache)(ci, va, l)
518#endif
519#ifndef	Mips_InvalidateICachePage
520#define	Mips_InvalidateICachePage(ci, va) \
521	((ci)->ci_InvalidateICachePage)(ci, va)
522#endif
523#ifndef	Mips_SyncICache
524#define	Mips_SyncICache(ci) \
525	((ci)->ci_SyncICache)(ci)
526#endif
527#ifndef	Mips_SyncDCachePage
528#define	Mips_SyncDCachePage(ci, va, pa) \
529	((ci)->ci_SyncDCachePage)(ci, va, pa)
530#endif
531#ifndef	Mips_HitSyncDCachePage
532#define	Mips_HitSyncDCachePage(ci, va, pa) \
533	((ci)->ci_HitSyncDCachePage)(ci, va, pa)
534#endif
535#ifndef	Mips_HitSyncDCache
536#define	Mips_HitSyncDCache(ci, va, l) \
537	((ci)->ci_HitSyncDCache)(ci, va, l)
538#endif
539#ifndef	Mips_HitInvalidateDCache
540#define	Mips_HitInvalidateDCache(ci, va, l) \
541	((ci)->ci_HitInvalidateDCache)(ci, va, l)
542#endif
543#ifndef	Mips_IOSyncDCache
544#define	Mips_IOSyncDCache(ci, va, l, h) \
545	((ci)->ci_IOSyncDCache)(ci, va, l, h)
546#endif
547
548#endif /* _KERNEL && !_LOCORE */
549#endif /* !_MIPS64_CPU_H_ */
550