cpu.h revision 1.122
1/* $OpenBSD: cpu.h,v 1.122 2017/10/21 06:11:22 visa Exp $ */ 2 3/*- 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Ralph Campbell and Rick Macklem. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (C) 1989 Digital Equipment Corporation. 35 * Permission to use, copy, modify, and distribute this software and 36 * its documentation for any purpose and without fee is hereby granted, 37 * provided that the above copyright notice appears in all copies. 38 * Digital Equipment Corporation makes no representations about the 39 * suitability of this software for any purpose. It is provided "as is" 40 * without express or implied warranty. 41 * 42 * from: @(#)cpu.h 8.4 (Berkeley) 1/4/94 43 */ 44 45#ifndef _MIPS64_CPU_H_ 46#define _MIPS64_CPU_H_ 47 48#ifndef _LOCORE 49 50/* 51 * MIPS32-style segment definitions. 52 * They only cover the first 512MB of physical addresses. 53 */ 54#define CKSEG0_BASE 0xffffffff80000000UL 55#define CKSEG1_BASE 0xffffffffa0000000UL 56#define CKSSEG_BASE 0xffffffffc0000000UL 57#define CKSEG3_BASE 0xffffffffe0000000UL 58#define CKSEG_SIZE 0x0000000020000000UL 59 60#define CKSEG0_TO_PHYS(x) ((u_long)(x) & (CKSEG_SIZE - 1)) 61#define CKSEG1_TO_PHYS(x) ((u_long)(x) & (CKSEG_SIZE - 1)) 62#define PHYS_TO_CKSEG0(x) ((u_long)(x) | CKSEG0_BASE) 63#define PHYS_TO_CKSEG1(x) ((u_long)(x) | CKSEG1_BASE) 64 65/* 66 * MIPS64-style segment definitions. 67 * These allow for 36 bits of addressable physical memory, thus 64GB. 68 */ 69 70/* 71 * Cache Coherency Attributes. 72 */ 73/* r8k only */ 74#define CCA_NC_COPROCESSOR 0UL /* uncached, coprocessor ordered */ 75/* common to r4, r5k, r8k and r1xk */ 76#define CCA_NC 2UL /* uncached, write-around */ 77#define CCA_NONCOHERENT 3UL /* cached, non-coherent, write-back */ 78/* r8k, r1xk only */ 79#define CCA_COHERENT_EXCL 4UL /* cached, coherent, exclusive */ 80#define CCA_COHERENT_EXCLWRITE 5UL /* cached, coherent, exclusive write */ 81/* r4k only */ 82#define CCA_COHERENT_UPDWRITE 6UL /* cached, coherent, update on write */ 83/* r1xk only */ 84#define CCA_NC_ACCELERATED 7UL /* uncached accelerated */ 85 86#ifdef TGT_COHERENT 87#define CCA_CACHED CCA_COHERENT_EXCLWRITE 88#else 89#define CCA_CACHED CCA_NONCOHERENT 90#endif 91 92/* 93 * Uncached spaces. 94 * R1x000 processors use bits 58:57 of uncached virtual addresses (CCA_NC) 95 * to select different spaces. Unfortunately, other processors need these 96 * bits to be zero, so uncached address have to be decided at runtime. 97 */ 98#define SP_HUB 0UL /* Hub space */ 99#define SP_IO 1UL /* I/O space */ 100#define SP_SPECIAL 2UL /* Memory Special space */ 101#define SP_NC 3UL /* Memory Uncached space */ 102 103#define XKSSSEG_BASE 0x4000000000000000UL 104#define XKPHYS_BASE 0x8000000000000000UL 105#define XKSSEG_BASE 0xc000000000000000UL 106 107#define XKPHYS_TO_PHYS(x) ((paddr_t)(x) & 0x0000000fffffffffUL) 108#define PHYS_TO_XKPHYS(x,c) ((paddr_t)(x) | XKPHYS_BASE | ((c) << 59)) 109#define PHYS_TO_XKPHYS_UNCACHED(x,s) \ 110 (PHYS_TO_XKPHYS(x, CCA_NC) | ((s) << 57)) 111#define IS_XKPHYS(va) (((va) >> 62) == 2) 112#define XKPHYS_TO_CCA(x) (((x) >> 59) & 0x07) 113#define XKPHYS_TO_SP(x) (((x) >> 57) & 0x03) 114 115#endif /* _LOCORE */ 116 117/* 118 * Exported definitions unique to mips cpu support. 119 */ 120 121#if defined(_KERNEL) && !defined(_LOCORE) 122 123#include <sys/device.h> 124#include <machine/intr.h> 125#include <sys/sched.h> 126 127struct cpu_hwinfo { 128 uint32_t c0prid; 129 uint32_t c1prid; 130 uint32_t clock; /* Hz */ 131 uint32_t tlbsize; 132 uint type; 133 uint32_t l2size; 134}; 135 136/* 137 * Cache memory configuration. One struct per cache. 138 */ 139struct cache_info { 140 uint size; /* total cache size */ 141 uint linesize; /* line size */ 142 uint setsize; /* set size */ 143 uint sets; /* number of sets */ 144}; 145 146struct cpu_info { 147 struct device *ci_dev; /* our device */ 148 struct cpu_info *ci_self; /* pointer to this structure */ 149 struct cpu_info *ci_next; /* next cpu */ 150 struct proc *ci_curproc; 151 struct user *ci_curprocpaddr; 152 struct proc *ci_fpuproc; /* pointer to last proc to use FP */ 153 uint32_t ci_delayconst; 154 struct cpu_hwinfo 155 ci_hw; 156 157#if defined(MULTIPROCESSOR) 158 struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM]; 159#endif 160 161 /* cache information and pending flush state */ 162 uint ci_cacheconfiguration; 163 uint64_t ci_cachepending_l1i; 164 struct cache_info 165 ci_l1inst, 166 ci_l1data, 167 ci_l2, 168 ci_l3; 169 170 /* function pointers for the cache handling routines */ 171 void (*ci_SyncCache)(struct cpu_info *); 172 void (*ci_InvalidateICache)(struct cpu_info *, vaddr_t, 173 size_t); 174 void (*ci_InvalidateICachePage)(struct cpu_info *, vaddr_t); 175 void (*ci_SyncICache)(struct cpu_info *); 176 void (*ci_SyncDCachePage)(struct cpu_info *, vaddr_t, 177 paddr_t); 178 void (*ci_HitSyncDCachePage)(struct cpu_info *, vaddr_t, 179 paddr_t); 180 void (*ci_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t); 181 void (*ci_HitInvalidateDCache)(struct cpu_info *, vaddr_t, 182 size_t); 183 void (*ci_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t, 184 int); 185 186 struct schedstate_percpu 187 ci_schedstate; 188 int ci_want_resched; /* need_resched() invoked */ 189 cpuid_t ci_cpuid; /* our CPU ID */ 190 uint32_t ci_randseed; /* per cpu random seed */ 191 int ci_ipl; /* software IPL */ 192 uint32_t ci_softpending; /* pending soft interrupts */ 193 int ci_clock_started; 194 u_int32_t ci_cpu_counter_last; /* last compare value loaded */ 195 u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */ 196 197 u_int32_t ci_pendingticks; 198 199#ifdef TGT_ORIGIN 200 u_int16_t ci_nasid; 201 u_int16_t ci_slice; 202#endif 203 204 struct pmap *ci_curpmap; 205 uint ci_intrdepth; /* interrupt depth */ 206#ifdef MULTIPROCESSOR 207 u_long ci_flags; /* flags; see below */ 208 struct intrhand ci_ipiih; 209#endif 210 volatile int ci_ddb; 211#define CI_DDB_RUNNING 0 212#define CI_DDB_SHOULDSTOP 1 213#define CI_DDB_STOPPED 2 214#define CI_DDB_ENTERDDB 3 215#define CI_DDB_INDDB 4 216 217#ifdef DIAGNOSTIC 218 int ci_mutex_level; 219#endif 220#ifdef GPROF 221 struct gmonparam *ci_gmon; 222#endif 223}; 224 225#define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ 226#define CPUF_PRESENT 0x02 /* CPU is present */ 227#define CPUF_RUNNING 0x04 /* CPU is running */ 228 229extern struct cpu_info cpu_info_primary; 230extern struct cpu_info *cpu_info_list; 231#define CPU_INFO_ITERATOR int 232#define CPU_INFO_FOREACH(cii, ci) for (cii = 0, ci = cpu_info_list; \ 233 ci != NULL; ci = ci->ci_next) 234 235#define CPU_INFO_UNIT(ci) ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0) 236 237extern void (*cpu_idle_cycle_func)(void); 238#define cpu_idle_cycle() (*cpu_idle_cycle_func)() 239 240#ifdef MULTIPROCESSOR 241#define getcurcpu() hw_getcurcpu() 242#define setcurcpu(ci) hw_setcurcpu(ci) 243extern struct cpu_info *get_cpu_info(int); 244#define curcpu() getcurcpu() 245#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 246#define cpu_number() (curcpu()->ci_cpuid) 247 248extern struct cpuset cpus_running; 249void cpu_unidle(struct cpu_info *); 250void cpu_boot_secondary_processors(void); 251#define cpu_boot_secondary(ci) hw_cpu_boot_secondary(ci) 252#define cpu_hatch(ci) hw_cpu_hatch(ci) 253 254vaddr_t alloc_contiguous_pages(size_t); 255 256#define MIPS64_IPI_NOP 0x00000001 257#define MIPS64_IPI_RENDEZVOUS 0x00000002 258#define MIPS64_IPI_DDB 0x00000004 259#define MIPS64_NIPIS 3 /* must not exceed 32 */ 260 261void mips64_ipi_init(void); 262void mips64_send_ipi(unsigned int, unsigned int); 263void smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg); 264 265#include <sys/mplock.h> 266#else 267#define MAXCPUS 1 268#define curcpu() (&cpu_info_primary) 269#define CPU_IS_PRIMARY(ci) 1 270#define cpu_number() 0 271#define cpu_unidle(ci) 272#define get_cpu_info(i) (&cpu_info_primary) 273#endif 274 275#define CPU_BUSY_CYCLE() do {} while (0) 276 277extern void (*md_startclock)(struct cpu_info *); 278void cp0_calibrate(struct cpu_info *); 279 280#include <machine/frame.h> 281 282/* 283 * Arguments to hardclock encapsulate the previous machine state in 284 * an opaque clockframe. 285 */ 286#define clockframe trapframe /* Use normal trap frame */ 287 288#define SR_KSU_USER 0x00000010 289#define CLKF_USERMODE(framep) ((framep)->sr & SR_KSU_USER) 290#define CLKF_PC(framep) ((framep)->pc) 291#define CLKF_INTR(framep) (curcpu()->ci_intrdepth > 1) /* XXX */ 292 293/* 294 * This is used during profiling to integrate system time. 295 */ 296#define PROC_PC(p) ((p)->p_md.md_regs->pc) 297#define PROC_STACK(p) ((p)->p_md.md_regs->sp) 298 299/* 300 * Preempt the current process if in interrupt from user mode, 301 * or after the current trap/syscall if in system mode. 302 */ 303#define need_resched(ci) \ 304 do { \ 305 (ci)->ci_want_resched = 1; \ 306 if ((ci)->ci_curproc != NULL) \ 307 aston((ci)->ci_curproc); \ 308 } while(0) 309#define clear_resched(ci) (ci)->ci_want_resched = 0 310 311/* 312 * Give a profiling tick to the current process when the user profiling 313 * buffer pages are invalid. On MIPS designs, request an ast to send us 314 * through trap, marking the proc as needing a profiling tick. 315 */ 316#define need_proftick(p) aston(p) 317 318/* 319 * Notify the current process (p) that it has a signal pending, 320 * process as soon as possible. 321 */ 322#ifdef MULTIPROCESSOR 323#define signotify(p) (aston(p), cpu_unidle((p)->p_cpu)) 324#else 325#define signotify(p) aston(p) 326#endif 327 328#define aston(p) ((p)->p_md.md_astpending = 1) 329 330#ifdef CPU_R8000 331#define mips_sync() __asm__ volatile ("lw $0, 0(%0)" :: \ 332 "r" (PHYS_TO_XKPHYS(0, CCA_NC)) : "memory") 333#else 334#define mips_sync() __asm__ volatile ("sync" ::: "memory") 335#endif 336 337#endif /* _KERNEL && !_LOCORE */ 338 339#ifdef _KERNEL 340/* 341 * Values for the code field in a break instruction. 342 */ 343#define BREAK_INSTR 0x0000000d 344#define BREAK_VAL_MASK 0x03ff0000 345#define BREAK_VAL_SHIFT 16 346#define BREAK_KDB_VAL 512 347#define BREAK_SSTEP_VAL 513 348#define BREAK_BRKPT_VAL 514 349#define BREAK_SOVER_VAL 515 350#define BREAK_DDB_VAL 516 351#define BREAK_FPUEMUL_VAL 517 352#define BREAK_KDB (BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT)) 353#define BREAK_SSTEP (BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT)) 354#define BREAK_BRKPT (BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT)) 355#define BREAK_SOVER (BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT)) 356#define BREAK_DDB (BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT)) 357#define BREAK_FPUEMUL (BREAK_INSTR | (BREAK_FPUEMUL_VAL << BREAK_VAL_SHIFT)) 358 359#endif /* _KERNEL */ 360 361/* 362 * CTL_MACHDEP definitions. 363 */ 364#define CPU_ALLOWAPERTURE 1 /* allow mmap of /dev/xf86 */ 365 /* 2 formerly: keyboard reset */ 366 /* 3 formerly: CPU_LIDSUSPEND */ 367#define CPU_LIDACTION 4 /* action caused by lid close */ 368#define CPU_MAXID 5 /* number of valid machdep ids */ 369 370#define CTL_MACHDEP_NAMES { \ 371 { 0, 0 }, \ 372 { "allowaperture", CTLTYPE_INT }, \ 373 { 0, 0 }, \ 374 { 0, 0 }, \ 375 { "lidaction", CTLTYPE_INT }, \ 376} 377 378/* 379 * MIPS CPU types (cp_imp). 380 */ 381#define MIPS_R2000 0x01 /* MIPS R2000 CPU ISA I */ 382#define MIPS_R3000 0x02 /* MIPS R3000 CPU ISA I */ 383#define MIPS_R6000 0x03 /* MIPS R6000 CPU ISA II */ 384#define MIPS_R4000 0x04 /* MIPS R4000/4400 CPU ISA III */ 385#define MIPS_R3LSI 0x05 /* LSI Logic R3000 derivate ISA I */ 386#define MIPS_R6000A 0x06 /* MIPS R6000A CPU ISA II */ 387#define MIPS_CN50XX 0x06 /* Cavium OCTEON CN50xx MIPS64R2*/ 388#define MIPS_R3IDT 0x07 /* IDT R3000 derivate ISA I */ 389#define MIPS_R10000 0x09 /* MIPS R10000/T5 CPU ISA IV */ 390#define MIPS_R4200 0x0a /* MIPS R4200 CPU (ICE) ISA III */ 391#define MIPS_R4300 0x0b /* NEC VR4300 CPU ISA III */ 392#define MIPS_R4100 0x0c /* NEC VR41xx CPU MIPS-16 ISA III */ 393#define MIPS_R12000 0x0e /* MIPS R12000 ISA IV */ 394#define MIPS_R14000 0x0f /* MIPS R14000 ISA IV */ 395#define MIPS_R8000 0x10 /* MIPS R8000 Blackbird/TFP ISA IV */ 396#define MIPS_R4600 0x20 /* PMCS R4600 Orion ISA III */ 397#define MIPS_R4700 0x21 /* PMCS R4700 Orion ISA III */ 398#define MIPS_R3TOSH 0x22 /* Toshiba R3000 based CPU ISA I */ 399#define MIPS_R5000 0x23 /* MIPS R5000 CPU ISA IV */ 400#define MIPS_RM7000 0x27 /* PMCS RM7000 CPU ISA IV */ 401#define MIPS_RM52X0 0x28 /* PMCS RM52X0 CPU ISA IV */ 402#define MIPS_RM9000 0x34 /* PMCS RM9000 CPU ISA IV */ 403#define MIPS_LOONGSON 0x42 /* STC LoongSon CPU ISA III */ 404#define MIPS_VR5400 0x54 /* NEC Vr5400 CPU ISA IV+ */ 405#define MIPS_LOONGSON2 0x63 /* STC LoongSon2/3 CPU ISA III+ */ 406#define MIPS_CN61XX 0x93 /* Cavium OCTEON II CN6[01]xx MIPS64R2 */ 407#define MIPS_CN71XX 0x96 /* Cavium OCTEON III CN7[01]xx MIPS64R2 */ 408#define MIPS_CN73XX 0x97 /* Cavium OCTEON III CN7[23]xx MIPS64R2 */ 409 410/* 411 * MIPS FPU types. Only soft, rest is the same as cpu type. 412 */ 413#define MIPS_SOFT 0x00 /* Software emulation ISA I */ 414 415 416#if defined(_KERNEL) && !defined(_LOCORE) 417 418extern register_t protosr; 419extern int cpu_has_userlocal; 420 421#ifdef FPUEMUL 422#define CPU_HAS_FPU(ci) ((ci)->ci_hw.c1prid != 0) 423#else 424#define CPU_HAS_FPU(ci) 1 425#endif 426 427struct exec_package; 428struct user; 429 430void tlb_asid_wrap(struct cpu_info *); 431void tlb_flush(int); 432void tlb_flush_addr(vaddr_t); 433void tlb_init(unsigned int); 434int64_t tlb_probe(vaddr_t); 435void tlb_set_gbase(vaddr_t, vsize_t); 436void tlb_set_page_mask(uint32_t); 437void tlb_set_pid(u_int); 438void tlb_set_wired(uint32_t); 439int tlb_update(vaddr_t, register_t); 440void tlb_update_indexed(vaddr_t, register_t, register_t, uint); 441 442void build_trampoline(vaddr_t, vaddr_t); 443void cpu_switchto_asm(struct proc *, struct proc *); 444int exec_md_map(struct proc *, struct exec_package *); 445void savectx(struct user *, int); 446 447void enable_fpu(struct proc *); 448void save_fpu(void); 449int fpe_branch_emulate(struct proc *, struct trapframe *, uint32_t, 450 vaddr_t); 451void MipsSaveCurFPState(struct proc *); 452void MipsSaveCurFPState16(struct proc *); 453void MipsSwitchFPState(struct proc *, struct trapframe *); 454void MipsSwitchFPState16(struct proc *, struct trapframe *); 455 456int guarded_read_1(paddr_t, uint8_t *); 457int guarded_read_2(paddr_t, uint16_t *); 458int guarded_read_4(paddr_t, uint32_t *); 459int guarded_write_4(paddr_t, uint32_t); 460 461void MipsFPTrap(struct trapframe *); 462register_t MipsEmulateBranch(struct trapframe *, vaddr_t, uint32_t, uint32_t); 463 464int classify_insn(uint32_t); 465#define INSNCLASS_NEUTRAL 0 466#define INSNCLASS_CALL 1 467#define INSNCLASS_BRANCH 2 468 469/* 470 * R4000 end-of-page errata workaround routines 471 */ 472 473extern int r4000_errata; 474u_int eop_page_check(paddr_t); 475void eop_tlb_flush_addr(struct pmap *, vaddr_t, u_long); 476int eop_tlb_miss_handler(struct trapframe *, struct cpu_info *, 477 struct proc *); 478void eop_cleanup(struct trapframe *, struct proc *); 479 480/* 481 * Low level access routines to CPU registers 482 */ 483 484void setsoftintr0(void); 485void clearsoftintr0(void); 486void setsoftintr1(void); 487void clearsoftintr1(void); 488register_t enableintr(void); 489register_t disableintr(void); 490register_t getsr(void); 491register_t setsr(register_t); 492 493u_int cp0_get_count(void); 494register_t cp0_get_config(void); 495uint32_t cp0_get_config_1(void); 496uint32_t cp0_get_config_2(void); 497uint32_t cp0_get_config_3(void); 498uint32_t cp0_get_config_4(void); 499uint32_t cp0_get_pagegrain(void); 500register_t cp0_get_prid(void); 501void cp0_reset_cause(register_t); 502void cp0_set_compare(u_int); 503void cp0_set_config(register_t); 504void cp0_set_pagegrain(uint32_t); 505void cp0_set_trapbase(register_t); 506u_int cp1_get_prid(void); 507 508static inline uint32_t 509cp0_get_hwrena(void) 510{ 511 uint32_t value; 512 __asm__ volatile ("mfc0 %0, $7" : "=r" (value)); 513 return value; 514} 515 516static inline void 517cp0_set_hwrena(uint32_t value) 518{ 519 __asm__ volatile ("mtc0 %0, $7" : : "r" (value)); 520} 521 522static inline void 523cp0_set_userlocal(void *value) 524{ 525 __asm__ volatile ( 526 " .set push\n" 527 " .set mips64r2\n" 528 " dmtc0 %0, $4, 2\n" 529 " .set pop\n" 530 : : "r" (value)); 531} 532 533static inline u_long 534intr_disable(void) 535{ 536 return disableintr(); 537} 538 539static inline void 540intr_restore(u_long sr) 541{ 542 setsr(sr); 543} 544 545/* 546 * Cache routines (may be overridden) 547 */ 548 549#ifndef Mips_SyncCache 550#define Mips_SyncCache(ci) \ 551 ((ci)->ci_SyncCache)(ci) 552#endif 553#ifndef Mips_InvalidateICache 554#define Mips_InvalidateICache(ci, va, l) \ 555 ((ci)->ci_InvalidateICache)(ci, va, l) 556#endif 557#ifndef Mips_InvalidateICachePage 558#define Mips_InvalidateICachePage(ci, va) \ 559 ((ci)->ci_InvalidateICachePage)(ci, va) 560#endif 561#ifndef Mips_SyncICache 562#define Mips_SyncICache(ci) \ 563 ((ci)->ci_SyncICache)(ci) 564#endif 565#ifndef Mips_SyncDCachePage 566#define Mips_SyncDCachePage(ci, va, pa) \ 567 ((ci)->ci_SyncDCachePage)(ci, va, pa) 568#endif 569#ifndef Mips_HitSyncDCachePage 570#define Mips_HitSyncDCachePage(ci, va, pa) \ 571 ((ci)->ci_HitSyncDCachePage)(ci, va, pa) 572#endif 573#ifndef Mips_HitSyncDCache 574#define Mips_HitSyncDCache(ci, va, l) \ 575 ((ci)->ci_HitSyncDCache)(ci, va, l) 576#endif 577#ifndef Mips_HitInvalidateDCache 578#define Mips_HitInvalidateDCache(ci, va, l) \ 579 ((ci)->ci_HitInvalidateDCache)(ci, va, l) 580#endif 581#ifndef Mips_IOSyncDCache 582#define Mips_IOSyncDCache(ci, va, l, h) \ 583 ((ci)->ci_IOSyncDCache)(ci, va, l, h) 584#endif 585 586#endif /* _KERNEL && !_LOCORE */ 587#endif /* !_MIPS64_CPU_H_ */ 588