cpufunc.h revision 1.24
1/* $OpenBSD: cpufunc.h,v 1.24 2016/03/22 23:35:01 patrick Exp $ */ 2/* $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $ */ 3 4/* 5 * Copyright (c) 1997 Mark Brinicombe. 6 * Copyright (c) 1997 Causality Limited 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Causality Limited. 20 * 4. The name of Causality Limited may not be used to endorse or promote 21 * products derived from this software without specific prior written 22 * permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * RiscBSD kernel project 37 * 38 * cpufunc.h 39 * 40 * Prototypes for cpu, mmu and tlb related functions. 41 */ 42 43#ifndef _ARM_CPUFUNC_H_ 44#define _ARM_CPUFUNC_H_ 45 46#ifdef _KERNEL 47 48#include <sys/types.h> 49#include <arm/cpuconf.h> 50 51struct cpu_functions { 52 53 /* CPU functions */ 54 55 u_int (*cf_id) (void); 56 void (*cf_cpwait) (void); 57 58 /* MMU functions */ 59 60 u_int (*cf_control) (u_int bic, u_int eor); 61 void (*cf_domains) (u_int domains); 62 void (*cf_setttb) (u_int ttb); 63 u_int (*cf_dfsr) (void); 64 u_int (*cf_dfar) (void); 65 u_int (*cf_ifsr) (void); 66 u_int (*cf_ifar) (void); 67 68 /* TLB functions */ 69 70 void (*cf_tlb_flushID) (void); 71 void (*cf_tlb_flushID_SE) (u_int va); 72 void (*cf_tlb_flushI) (void); 73 void (*cf_tlb_flushI_SE) (u_int va); 74 void (*cf_tlb_flushD) (void); 75 void (*cf_tlb_flushD_SE) (u_int va); 76 77 /* 78 * Cache operations: 79 * 80 * We define the following primitives: 81 * 82 * icache_sync_all Synchronize I-cache 83 * icache_sync_range Synchronize I-cache range 84 * 85 * dcache_wbinv_all Write-back and Invalidate D-cache 86 * dcache_wbinv_range Write-back and Invalidate D-cache range 87 * dcache_inv_range Invalidate D-cache range 88 * dcache_wb_range Write-back D-cache range 89 * 90 * idcache_wbinv_all Write-back and Invalidate D-cache, 91 * Invalidate I-cache 92 * idcache_wbinv_range Write-back and Invalidate D-cache, 93 * Invalidate I-cache range 94 * 95 * Note that the ARM term for "write-back" is "clean". We use 96 * the term "write-back" since it's a more common way to describe 97 * the operation. 98 * 99 * There are some rules that must be followed: 100 * 101 * I-cache Synch (all or range): 102 * The goal is to synchronize the instruction stream, 103 * so you may beed to write-back dirty D-cache blocks 104 * first. If a range is requested, and you can't 105 * synchronize just a range, you have to hit the whole 106 * thing. 107 * 108 * D-cache Write-Back and Invalidate range: 109 * If you can't WB-Inv a range, you must WB-Inv the 110 * entire D-cache. 111 * 112 * D-cache Invalidate: 113 * If you can't Inv the D-cache, you must Write-Back 114 * and Invalidate. Code that uses this operation 115 * MUST NOT assume that the D-cache will not be written 116 * back to memory. 117 * 118 * D-cache Write-Back: 119 * If you can't Write-back without doing an Inv, 120 * that's fine. Then treat this as a WB-Inv. 121 * Skipping the invalidate is merely an optimization. 122 * 123 * All operations: 124 * Valid virtual addresses must be passed to each 125 * cache operation. 126 */ 127 void (*cf_icache_sync_all) (void); 128 void (*cf_icache_sync_range) (vaddr_t, vsize_t); 129 130 void (*cf_dcache_wbinv_all) (void); 131 void (*cf_dcache_wbinv_range) (vaddr_t, vsize_t); 132 void (*cf_dcache_inv_range) (vaddr_t, vsize_t); 133 void (*cf_dcache_wb_range) (vaddr_t, vsize_t); 134 135 void (*cf_idcache_wbinv_all) (void); 136 void (*cf_idcache_wbinv_range) (vaddr_t, vsize_t); 137 138 void (*cf_sdcache_wbinv_all) (void); 139 void (*cf_sdcache_wbinv_range) (vaddr_t, paddr_t, vsize_t); 140 void (*cf_sdcache_inv_range) (vaddr_t, paddr_t, vsize_t); 141 void (*cf_sdcache_wb_range) (vaddr_t, paddr_t, vsize_t); 142 143 /* Other functions */ 144 145 void (*cf_flush_prefetchbuf) (void); 146 void (*cf_drain_writebuf) (void); 147 148 void (*cf_sleep) (int mode); 149 150 /* Soft functions */ 151 void (*cf_context_switch) (u_int); 152 void (*cf_setup) (void); 153}; 154 155extern struct cpu_functions cpufuncs; 156extern u_int cputype; 157 158#define cpu_id() cpufuncs.cf_id() 159#define cpu_cpwait() cpufuncs.cf_cpwait() 160 161#define cpu_control(c, e) cpufuncs.cf_control(c, e) 162#define cpu_domains(d) cpufuncs.cf_domains(d) 163#define cpu_setttb(t) cpufuncs.cf_setttb(t) 164#define cpu_dfsr() cpufuncs.cf_dfsr() 165#define cpu_dfar() cpufuncs.cf_dfar() 166#define cpu_ifsr() cpufuncs.cf_ifsr() 167#define cpu_ifar() cpufuncs.cf_ifar() 168 169#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() 170#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) 171#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI() 172#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) 173#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() 174#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) 175 176#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() 177#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) 178 179#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all() 180#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s)) 181#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s)) 182#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s)) 183 184#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all() 185#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s)) 186 187#define cpu_sdcache_enabled() (cpufuncs.cf_sdcache_wbinv_all != cpufunc_nullop) 188#define cpu_sdcache_wbinv_all() cpufuncs.cf_sdcache_wbinv_all() 189#define cpu_sdcache_wbinv_range(va, pa, s) cpufuncs.cf_sdcache_wbinv_range((va), (pa), (s)) 190#define cpu_sdcache_inv_range(va, pa, s) cpufuncs.cf_sdcache_inv_range((va), (pa), (s)) 191#define cpu_sdcache_wb_range(va, pa, s) cpufuncs.cf_sdcache_wb_range((va), (pa), (s)) 192 193#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf() 194#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() 195 196#define cpu_sleep(m) cpufuncs.cf_sleep(m) 197 198#define cpu_context_switch(a) cpufuncs.cf_context_switch(a) 199#define cpu_setup(a) cpufuncs.cf_setup(a) 200 201int set_cpufuncs (void); 202#define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */ 203#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ 204 205void cpufunc_nullop (void); 206int early_abort_fixup (void *); 207int late_abort_fixup (void *); 208u_int cpufunc_id (void); 209u_int cpufunc_control (u_int clear, u_int bic); 210void cpufunc_domains (u_int domains); 211u_int cpufunc_dfsr (void); 212u_int cpufunc_dfar (void); 213u_int cpufunc_ifsr (void); 214u_int cpufunc_ifar (void); 215 216#ifdef CPU_ARMv7 217void armv7_setttb (u_int); 218 219void armv7_tlb_flushID_SE (u_int); 220void armv7_tlb_flushI_SE (u_int); 221 222void armv7_context_switch (u_int); 223 224void armv7_setup (void); 225void armv7_tlb_flushID (void); 226void armv7_tlb_flushI (void); 227void armv7_tlb_flushD (void); 228void armv7_tlb_flushD_SE (u_int va); 229 230void armv7_drain_writebuf (void); 231void armv7_cpu_sleep (int mode); 232 233u_int armv7_periphbase (void); 234 235void armv7_icache_sync_all (void); 236void armv7_icache_sync_range (vaddr_t, vsize_t); 237 238void armv7_dcache_wbinv_all (void); 239void armv7_dcache_wbinv_range (vaddr_t, vsize_t); 240void armv7_dcache_inv_range (vaddr_t, vsize_t); 241void armv7_dcache_wb_range (vaddr_t, vsize_t); 242 243void armv7_idcache_wbinv_all (void); 244void armv7_idcache_wbinv_range (vaddr_t, vsize_t); 245 246extern unsigned armv7_dcache_sets_max; 247extern unsigned armv7_dcache_sets_inc; 248extern unsigned armv7_dcache_index_max; 249extern unsigned armv7_dcache_index_inc; 250#endif 251 252 253#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_PXA2X0) 254void armv4_tlb_flushID (void); 255void armv4_tlb_flushI (void); 256void armv4_tlb_flushD (void); 257void armv4_tlb_flushD_SE (u_int va); 258 259void armv4_drain_writebuf (void); 260#endif 261 262#if defined(CPU_XSCALE_80321) || \ 263 defined(CPU_XSCALE_PXA2X0) || (ARM_MMU_XSCALE == 1) 264void xscale_cpwait (void); 265 266void xscale_cpu_sleep (int mode); 267 268u_int xscale_control (u_int clear, u_int bic); 269 270void xscale_setttb (u_int ttb); 271 272void xscale_tlb_flushID_SE (u_int va); 273 274void xscale_cache_flushID (void); 275void xscale_cache_flushI (void); 276void xscale_cache_flushD (void); 277void xscale_cache_flushD_SE (u_int entry); 278 279void xscale_cache_cleanID (void); 280void xscale_cache_cleanD (void); 281void xscale_cache_cleanD_E (u_int entry); 282 283void xscale_cache_clean_minidata (void); 284 285void xscale_cache_purgeID (void); 286void xscale_cache_purgeID_E (u_int entry); 287void xscale_cache_purgeD (void); 288void xscale_cache_purgeD_E (u_int entry); 289 290void xscale_cache_syncI (void); 291void xscale_cache_cleanID_rng (vaddr_t start, vsize_t end); 292void xscale_cache_cleanD_rng (vaddr_t start, vsize_t end); 293void xscale_cache_purgeID_rng (vaddr_t start, vsize_t end); 294void xscale_cache_purgeD_rng (vaddr_t start, vsize_t end); 295void xscale_cache_syncI_rng (vaddr_t start, vsize_t end); 296void xscale_cache_flushD_rng (vaddr_t start, vsize_t end); 297 298void xscale_context_switch (u_int); 299 300void xscale_setup (void); 301#endif /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 */ 302 303#define tlb_flush cpu_tlb_flushID 304#define setttb cpu_setttb 305#define drain_writebuf cpu_drain_writebuf 306 307/* 308 * Macros for manipulating CPU interrupts 309 */ 310/* Functions to manipulate the CPSR. */ 311static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor); 312static __inline u_int32_t __get_cpsr(void); 313 314static __inline u_int32_t 315__set_cpsr_c(u_int bic, u_int eor) 316{ 317 u_int32_t tmp, ret; 318 319 __asm volatile( 320 "mrs %0, cpsr\n\t" /* Get the CPSR */ 321 "bic %1, %0, %2\n\t" /* Clear bits */ 322 "eor %1, %1, %3\n\t" /* XOR bits */ 323 "msr cpsr_c, %1" /* Set CPSR control field */ 324 : "=&r" (ret), "=&r" (tmp) 325 : "r" (bic), "r" (eor)); 326 327 return ret; 328} 329 330static __inline u_int32_t 331__get_cpsr() 332{ 333 u_int32_t ret; 334 335 __asm volatile("mrs %0, cpsr" : "=&r" (ret)); 336 337 return ret; 338} 339 340#define disable_interrupts(mask) \ 341 (__set_cpsr_c((mask) & (PSR_I | PSR_F), \ 342 (mask) & (PSR_I | PSR_F))) 343 344#define enable_interrupts(mask) \ 345 (__set_cpsr_c((mask) & (PSR_I | PSR_F), 0)) 346 347#define restore_interrupts(old_cpsr) \ 348 (__set_cpsr_c((PSR_I | PSR_F), (old_cpsr) & (PSR_I | PSR_F))) 349 350/* 351 * Functions to manipulate cpu r13 352 * (in arm/arm/setstack.S) 353 */ 354 355void set_stackptr (u_int mode, u_int address); 356u_int get_stackptr (u_int mode); 357 358/* 359 * Miscellany 360 */ 361 362int get_pc_str_offset (void); 363 364/* 365 * CPU functions from locore.S 366 */ 367 368void cpu_reset (void) __attribute__((__noreturn__)); 369 370/* 371 * Cache info variables. 372 */ 373 374/* PRIMARY CACHE VARIABLES */ 375extern int arm_picache_size; 376extern int arm_picache_line_size; 377extern int arm_picache_ways; 378 379extern int arm_pdcache_size; /* and unified */ 380extern int arm_pdcache_line_size; 381extern int arm_pdcache_ways; 382 383extern int arm_pcache_type; 384extern int arm_pcache_unified; 385 386extern int arm_dcache_align; 387extern int arm_dcache_align_mask; 388 389#endif /* _KERNEL */ 390#endif /* _ARM_CPUFUNC_H_ */ 391 392/* End of cpufunc.h */ 393