1/* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */ 2 3/*- 4 * Copyright (c) 1997 Mark Brinicombe. 5 * Copyright (c) 1997 Causality Limited 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Causality Limited. 19 * 4. The name of Causality Limited may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * RiscBSD kernel project 36 * 37 * cpufunc.h 38 * 39 * Prototypes for cpu, mmu and tlb related functions. 40 * 41 * $FreeBSD: stable/11/sys/arm/include/cpufunc.h 338514 2018-09-06 22:23:39Z jhb $ 42 */ 43 44#ifndef _MACHINE_CPUFUNC_H_ 45#define _MACHINE_CPUFUNC_H_ 46 47#ifdef _KERNEL 48 49#include <sys/types.h> 50#include <machine/armreg.h> 51 52static __inline void 53breakpoint(void) 54{ 55 __asm("udf 0xffff"); 56} 57 58struct cpu_functions { 59 60 /* CPU functions */ 61#if __ARM_ARCH < 6 62 void (*cf_cpwait) (void); 63 64 /* MMU functions */ 65 66 u_int (*cf_control) (u_int bic, u_int eor); 67 void (*cf_setttb) (u_int ttb); 68 69 /* TLB functions */ 70 71 void (*cf_tlb_flushID) (void); 72 void (*cf_tlb_flushID_SE) (u_int va); 73 void (*cf_tlb_flushD) (void); 74 void (*cf_tlb_flushD_SE) (u_int va); 75 76 /* 77 * Cache operations: 78 * 79 * We define the following primitives: 80 * 81 * icache_sync_range Synchronize I-cache range 82 * 83 * dcache_wbinv_all Write-back and Invalidate D-cache 84 * dcache_wbinv_range Write-back and Invalidate D-cache range 85 * dcache_inv_range Invalidate D-cache range 86 * dcache_wb_range Write-back D-cache range 87 * 88 * idcache_wbinv_all Write-back and Invalidate D-cache, 89 * Invalidate I-cache 90 * idcache_wbinv_range Write-back and Invalidate D-cache, 91 * Invalidate I-cache range 92 * 93 * Note that the ARM term for "write-back" is "clean". We use 94 * the term "write-back" since it's a more common way to describe 95 * the operation. 96 * 97 * There are some rules that must be followed: 98 * 99 * ID-cache Invalidate All: 100 * Unlike other functions, this one must never write back. 101 * It is used to intialize the MMU when it is in an unknown 102 * state (such as when it may have lines tagged as valid 103 * that belong to a previous set of mappings). 104 * 105 * I-cache Sync range: 106 * The goal is to synchronize the instruction stream, 107 * so you may beed to write-back dirty D-cache blocks 108 * first. If a range is requested, and you can't 109 * synchronize just a range, you have to hit the whole 110 * thing. 111 * 112 * D-cache Write-Back and Invalidate range: 113 * If you can't WB-Inv a range, you must WB-Inv the 114 * entire D-cache. 115 * 116 * D-cache Invalidate: 117 * If you can't Inv the D-cache, you must Write-Back 118 * and Invalidate. Code that uses this operation 119 * MUST NOT assume that the D-cache will not be written 120 * back to memory. 121 * 122 * D-cache Write-Back: 123 * If you can't Write-back without doing an Inv, 124 * that's fine. Then treat this as a WB-Inv. 125 * Skipping the invalidate is merely an optimization. 126 * 127 * All operations: 128 * Valid virtual addresses must be passed to each 129 * cache operation. 130 */ 131 void (*cf_icache_sync_range) (vm_offset_t, vm_size_t); 132 133 void (*cf_dcache_wbinv_all) (void); 134 void (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t); 135 void (*cf_dcache_inv_range) (vm_offset_t, vm_size_t); 136 void (*cf_dcache_wb_range) (vm_offset_t, vm_size_t); 137 138 void (*cf_idcache_inv_all) (void); 139 void (*cf_idcache_wbinv_all) (void); 140 void (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t); 141#endif 142 void (*cf_l2cache_wbinv_all) (void); 143 void (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t); 144 void (*cf_l2cache_inv_range) (vm_offset_t, vm_size_t); 145 void (*cf_l2cache_wb_range) (vm_offset_t, vm_size_t); 146 void (*cf_l2cache_drain_writebuf) (void); 147 148 /* Other functions */ 149 150#if __ARM_ARCH < 6 151 void (*cf_drain_writebuf) (void); 152#endif 153 154 void (*cf_sleep) (int mode); 155 156#if __ARM_ARCH < 6 157 /* Soft functions */ 158 159 void (*cf_context_switch) (void); 160#endif 161 162 void (*cf_setup) (void); 163}; 164 165extern struct cpu_functions cpufuncs; 166extern u_int cputype; 167 168#if __ARM_ARCH < 6 169#define cpu_cpwait() cpufuncs.cf_cpwait() 170#endif 171 172#define cpu_control(c, e) cpufuncs.cf_control(c, e) 173#if __ARM_ARCH < 6 174#define cpu_setttb(t) cpufuncs.cf_setttb(t) 175 176#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() 177#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) 178#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() 179#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) 180 181#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) 182 183#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() 184#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) 185#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) 186#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) 187 188#define cpu_idcache_inv_all() cpufuncs.cf_idcache_inv_all() 189#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() 190#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) 191#endif 192#define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all() 193#define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s)) 194#define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s)) 195#define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s)) 196#define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf() 197 198#if __ARM_ARCH < 6 199#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() 200#endif 201#define cpu_sleep(m) cpufuncs.cf_sleep(m) 202 203#define cpu_setup() cpufuncs.cf_setup() 204 205int set_cpufuncs (void); 206#define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ 207#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ 208 209void cpufunc_nullop (void); 210u_int cpu_ident (void); 211u_int cpufunc_control (u_int clear, u_int bic); 212void cpu_domains (u_int domains); 213u_int cpu_faultstatus (void); 214u_int cpu_faultaddress (void); 215u_int cpu_get_control (void); 216u_int cpu_pfr (int); 217 218#if defined(CPU_FA526) 219void fa526_setup (void); 220void fa526_setttb (u_int ttb); 221void fa526_context_switch (void); 222void fa526_cpu_sleep (int); 223void fa526_tlb_flushID_SE (u_int); 224 225void fa526_icache_sync_range(vm_offset_t start, vm_size_t end); 226void fa526_dcache_wbinv_all (void); 227void fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end); 228void fa526_dcache_inv_range (vm_offset_t start, vm_size_t end); 229void fa526_dcache_wb_range (vm_offset_t start, vm_size_t end); 230void fa526_idcache_wbinv_all(void); 231void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end); 232#endif 233 234 235#if defined(CPU_ARM9) || defined(CPU_ARM9E) 236void arm9_setttb (u_int); 237void arm9_tlb_flushID_SE (u_int va); 238void arm9_context_switch (void); 239#endif 240 241#if defined(CPU_ARM9) 242void arm9_icache_sync_range (vm_offset_t, vm_size_t); 243 244void arm9_dcache_wbinv_all (void); 245void arm9_dcache_wbinv_range (vm_offset_t, vm_size_t); 246void arm9_dcache_inv_range (vm_offset_t, vm_size_t); 247void arm9_dcache_wb_range (vm_offset_t, vm_size_t); 248 249void arm9_idcache_wbinv_all (void); 250void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t); 251 252void arm9_setup (void); 253 254extern unsigned arm9_dcache_sets_max; 255extern unsigned arm9_dcache_sets_inc; 256extern unsigned arm9_dcache_index_max; 257extern unsigned arm9_dcache_index_inc; 258#endif 259 260#if defined(CPU_ARM9E) 261void arm10_setup (void); 262 263u_int sheeva_control_ext (u_int, u_int); 264void sheeva_cpu_sleep (int); 265void sheeva_setttb (u_int); 266void sheeva_dcache_wbinv_range (vm_offset_t, vm_size_t); 267void sheeva_dcache_inv_range (vm_offset_t, vm_size_t); 268void sheeva_dcache_wb_range (vm_offset_t, vm_size_t); 269void sheeva_idcache_wbinv_range (vm_offset_t, vm_size_t); 270 271void sheeva_l2cache_wbinv_range (vm_offset_t, vm_size_t); 272void sheeva_l2cache_inv_range (vm_offset_t, vm_size_t); 273void sheeva_l2cache_wb_range (vm_offset_t, vm_size_t); 274void sheeva_l2cache_wbinv_all (void); 275#endif 276 277#if defined(CPU_MV_PJ4B) 278void armv6_idcache_wbinv_all (void); 279#endif 280#if defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT) 281void armv7_setttb (u_int); 282void armv7_tlb_flushID (void); 283void armv7_tlb_flushID_SE (u_int); 284void armv7_icache_sync_range (vm_offset_t, vm_size_t); 285void armv7_idcache_wbinv_range (vm_offset_t, vm_size_t); 286void armv7_idcache_inv_all (void); 287void armv7_dcache_wbinv_all (void); 288void armv7_idcache_wbinv_all (void); 289void armv7_dcache_wbinv_range (vm_offset_t, vm_size_t); 290void armv7_dcache_inv_range (vm_offset_t, vm_size_t); 291void armv7_dcache_wb_range (vm_offset_t, vm_size_t); 292void armv7_cpu_sleep (int); 293void armv7_setup (void); 294void armv7_context_switch (void); 295void armv7_drain_writebuf (void); 296u_int armv7_auxctrl (u_int, u_int); 297 298void armadaxp_idcache_wbinv_all (void); 299 300void cortexa_setup (void); 301#endif 302#if defined(CPU_MV_PJ4B) 303void pj4b_config (void); 304void pj4bv7_setup (void); 305#endif 306 307#if defined(CPU_ARM1176) 308void arm11_tlb_flushID (void); 309void arm11_tlb_flushID_SE (u_int); 310void arm11_tlb_flushD (void); 311void arm11_tlb_flushD_SE (u_int va); 312 313void arm11_context_switch (void); 314 315void arm11_drain_writebuf (void); 316 317void armv6_dcache_wbinv_range (vm_offset_t, vm_size_t); 318void armv6_dcache_inv_range (vm_offset_t, vm_size_t); 319void armv6_dcache_wb_range (vm_offset_t, vm_size_t); 320 321void armv6_idcache_inv_all (void); 322 323void arm11x6_setttb (u_int); 324void arm11x6_idcache_wbinv_all (void); 325void arm11x6_dcache_wbinv_all (void); 326void arm11x6_icache_sync_range (vm_offset_t, vm_size_t); 327void arm11x6_idcache_wbinv_range (vm_offset_t, vm_size_t); 328void arm11x6_setup (void); 329void arm11x6_sleep (int); /* no ref. for errata */ 330#endif 331 332#if defined(CPU_ARM9E) 333void armv5_ec_setttb(u_int); 334 335void armv5_ec_icache_sync_range(vm_offset_t, vm_size_t); 336 337void armv5_ec_dcache_wbinv_all(void); 338void armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t); 339void armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t); 340void armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t); 341 342void armv5_ec_idcache_wbinv_all(void); 343void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t); 344#endif 345 346#if defined(CPU_ARM9) || defined(CPU_ARM9E) || \ 347 defined(CPU_FA526) || \ 348 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ 349 defined(CPU_XSCALE_81342) 350 351void armv4_tlb_flushID (void); 352void armv4_tlb_flushD (void); 353void armv4_tlb_flushD_SE (u_int va); 354 355void armv4_drain_writebuf (void); 356void armv4_idcache_inv_all (void); 357#endif 358 359#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \ 360 defined(CPU_XSCALE_81342) 361void xscale_cpwait (void); 362 363void xscale_cpu_sleep (int mode); 364 365u_int xscale_control (u_int clear, u_int bic); 366 367void xscale_setttb (u_int ttb); 368 369void xscale_tlb_flushID_SE (u_int va); 370 371void xscale_cache_flushID (void); 372void xscale_cache_flushI (void); 373void xscale_cache_flushD (void); 374void xscale_cache_flushD_SE (u_int entry); 375 376void xscale_cache_cleanID (void); 377void xscale_cache_cleanD (void); 378void xscale_cache_cleanD_E (u_int entry); 379 380void xscale_cache_clean_minidata (void); 381 382void xscale_cache_purgeID (void); 383void xscale_cache_purgeID_E (u_int entry); 384void xscale_cache_purgeD (void); 385void xscale_cache_purgeD_E (u_int entry); 386 387void xscale_cache_syncI (void); 388void xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end); 389void xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end); 390void xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end); 391void xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end); 392void xscale_cache_syncI_rng (vm_offset_t start, vm_size_t end); 393void xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end); 394 395void xscale_context_switch (void); 396 397void xscale_setup (void); 398#endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */ 399 400#ifdef CPU_XSCALE_81342 401 402void xscalec3_l2cache_purge (void); 403void xscalec3_cache_purgeID (void); 404void xscalec3_cache_purgeD (void); 405void xscalec3_cache_cleanID (void); 406void xscalec3_cache_cleanD (void); 407void xscalec3_cache_syncI (void); 408 409void xscalec3_cache_purgeID_rng (vm_offset_t start, vm_size_t end); 410void xscalec3_cache_purgeD_rng (vm_offset_t start, vm_size_t end); 411void xscalec3_cache_cleanID_rng (vm_offset_t start, vm_size_t end); 412void xscalec3_cache_cleanD_rng (vm_offset_t start, vm_size_t end); 413void xscalec3_cache_syncI_rng (vm_offset_t start, vm_size_t end); 414 415void xscalec3_l2cache_flush_rng (vm_offset_t, vm_size_t); 416void xscalec3_l2cache_clean_rng (vm_offset_t start, vm_size_t end); 417void xscalec3_l2cache_purge_rng (vm_offset_t start, vm_size_t end); 418 419 420void xscalec3_setttb (u_int ttb); 421void xscalec3_context_switch (void); 422 423#endif /* CPU_XSCALE_81342 */ 424 425/* 426 * Macros for manipulating CPU interrupts 427 */ 428#if __ARM_ARCH < 6 429#define __ARM_INTR_BITS (PSR_I | PSR_F) 430#else 431#define __ARM_INTR_BITS (PSR_I | PSR_F | PSR_A) 432#endif 433 434static __inline uint32_t 435__set_cpsr(uint32_t bic, uint32_t eor) 436{ 437 uint32_t tmp, ret; 438 439 __asm __volatile( 440 "mrs %0, cpsr\n" /* Get the CPSR */ 441 "bic %1, %0, %2\n" /* Clear bits */ 442 "eor %1, %1, %3\n" /* XOR bits */ 443 "msr cpsr_xc, %1\n" /* Set the CPSR */ 444 : "=&r" (ret), "=&r" (tmp) 445 : "r" (bic), "r" (eor) : "memory"); 446 447 return ret; 448} 449 450static __inline uint32_t 451disable_interrupts(uint32_t mask) 452{ 453 454 return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS)); 455} 456 457static __inline uint32_t 458enable_interrupts(uint32_t mask) 459{ 460 461 return (__set_cpsr(mask & __ARM_INTR_BITS, 0)); 462} 463 464static __inline uint32_t 465restore_interrupts(uint32_t old_cpsr) 466{ 467 468 return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS)); 469} 470 471static __inline register_t 472intr_disable(void) 473{ 474 475 return (disable_interrupts(PSR_I | PSR_F)); 476} 477 478static __inline void 479intr_restore(register_t s) 480{ 481 482 restore_interrupts(s); 483} 484#undef __ARM_INTR_BITS 485 486/* 487 * Functions to manipulate cpu r13 488 * (in arm/arm32/setstack.S) 489 */ 490 491void set_stackptr (u_int mode, u_int address); 492u_int get_stackptr (u_int mode); 493 494/* 495 * Miscellany 496 */ 497 498int get_pc_str_offset (void); 499 500/* 501 * CPU functions from locore.S 502 */ 503 504void cpu_reset (void) __attribute__((__noreturn__)); 505 506/* 507 * Cache info variables. 508 */ 509 510/* PRIMARY CACHE VARIABLES */ 511extern int arm_picache_size; 512extern int arm_picache_line_size; 513extern int arm_picache_ways; 514 515extern int arm_pdcache_size; /* and unified */ 516extern int arm_pdcache_line_size; 517extern int arm_pdcache_ways; 518 519extern int arm_pcache_type; 520extern int arm_pcache_unified; 521 522extern int arm_dcache_align; 523extern int arm_dcache_align_mask; 524 525extern u_int arm_cache_level; 526extern u_int arm_cache_loc; 527extern u_int arm_cache_type[14]; 528 529#else /* !_KERNEL */ 530 531static __inline void 532breakpoint(void) 533{ 534 535 /* 536 * This matches the instruction used by GDB for software 537 * breakpoints. 538 */ 539 __asm("udf 0xfdee"); 540} 541 542#endif /* _KERNEL */ 543#endif /* _MACHINE_CPUFUNC_H_ */ 544 545/* End of cpufunc.h */ 546