1214571Sdim/* SPDX-License-Identifier: GPL-2.0-only */ 2214571Sdim/* 3214571Sdim * arch/arm/include/asm/cacheflush.h 4214571Sdim * 5214571Sdim * Copyright (C) 1999-2002 Russell King 6214571Sdim */ 7214571Sdim#ifndef _ASMARM_CACHEFLUSH_H 8214571Sdim#define _ASMARM_CACHEFLUSH_H 9214571Sdim 10214571Sdim#include <linux/mm.h> 11214571Sdim 12214571Sdim#include <asm/glue-cache.h> 13214571Sdim#include <asm/shmparam.h> 14214571Sdim#include <asm/cachetype.h> 15214571Sdim#include <asm/outercache.h> 16214571Sdim 17214571Sdim#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) 18214571Sdim 19214571Sdim/* 20214571Sdim * This flag is used to indicate that the page pointed to by a pte is clean 21214571Sdim * and does not require cleaning before returning it to the user. 22214571Sdim */ 23214571Sdim#define PG_dcache_clean PG_arch_1 24214571Sdim 25214571Sdim/* 26214571Sdim * MM Cache Management 27214571Sdim * =================== 28214571Sdim * 29214571Sdim * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files 30214571Sdim * implement these methods. 31214571Sdim * 32214571Sdim * Start addresses are inclusive and end addresses are exclusive; 33214571Sdim * start addresses should be rounded down, end addresses up. 34214571Sdim * 35214571Sdim * See Documentation/core-api/cachetlb.rst for more information. 36214571Sdim * Please note that the implementation of these, and the required 37214571Sdim * effects are cache-type (VIVT/VIPT/PIPT) specific. 38214571Sdim * 39214571Sdim * flush_icache_all() 40214571Sdim * 41214571Sdim * Unconditionally clean and invalidate the entire icache. 42214571Sdim * Currently only needed for cache-v6.S and cache-v7.S, see 43214571Sdim * __flush_icache_all for the generic implementation. 44214571Sdim * 45214571Sdim * flush_kern_all() 46214571Sdim * 47214571Sdim * Unconditionally clean and invalidate the entire cache. 48214571Sdim * 49214571Sdim * flush_kern_louis() 50214571Sdim * 51214571Sdim * Flush data cache levels up to the level of unification 52214571Sdim * inner shareable and invalidate the I-cache. 53214571Sdim * Only needed from v7 onwards, falls back to flush_cache_all() 54214571Sdim * for all other processor versions. 55214571Sdim * 56214571Sdim * flush_user_all() 57214571Sdim * 58214571Sdim * Clean and invalidate all user space cache entries 59214571Sdim * before a change of page tables. 60214571Sdim * 61214571Sdim * flush_user_range(start, end, flags) 62214571Sdim * 63214571Sdim * Clean and invalidate a range of cache entries in the 64214571Sdim * specified address space before a change of page tables. 65214571Sdim * - start - user start address (inclusive, page aligned) 66214571Sdim * - end - user end address (exclusive, page aligned) 67214571Sdim * - flags - vma->vm_flags field 68214571Sdim * 69214571Sdim * coherent_kern_range(start, end) 70214571Sdim * 71214571Sdim * Ensure coherency between the Icache and the Dcache in the 72214571Sdim * region described by start, end. If you have non-snooping 73214571Sdim * Harvard caches, you need to implement this function. 74214571Sdim * - start - virtual start address 75214571Sdim * - end - virtual end address 76214571Sdim * 77214571Sdim * coherent_user_range(start, end) 78214571Sdim * 79214571Sdim * Ensure coherency between the Icache and the Dcache in the 80214571Sdim * region described by start, end. If you have non-snooping 81214571Sdim * Harvard caches, you need to implement this function. 82214571Sdim * - start - virtual start address 83214571Sdim * - end - virtual end address 84214571Sdim * 85214571Sdim * flush_kern_dcache_area(kaddr, size) 86214571Sdim * 87214571Sdim * Ensure that the data held in page is written back. 88214571Sdim * - kaddr - page address 89214571Sdim * - size - region size 90214571Sdim * 91214571Sdim * DMA Cache Coherency 92214571Sdim * =================== 93214571Sdim * 94214571Sdim * dma_flush_range(start, end) 95214571Sdim * 96214571Sdim * Clean and invalidate the specified virtual address range. 97214571Sdim * - start - virtual start address 98214571Sdim * - end - virtual end address 99214571Sdim */ 100214571Sdim 101214571Sdimstruct cpu_cache_fns { 102214571Sdim void (*flush_icache_all)(void); 103214571Sdim void (*flush_kern_all)(void); 104214571Sdim void (*flush_kern_louis)(void); 105214571Sdim void (*flush_user_all)(void); 106214571Sdim void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 107214571Sdim 108214571Sdim void (*coherent_kern_range)(unsigned long, unsigned long); 109214571Sdim int (*coherent_user_range)(unsigned long, unsigned long); 110214571Sdim void (*flush_kern_dcache_area)(void *, size_t); 111214571Sdim 112214571Sdim void (*dma_map_area)(const void *, size_t, int); 113214571Sdim void (*dma_unmap_area)(const void *, size_t, int); 114214571Sdim 115214571Sdim void (*dma_flush_range)(const void *, const void *); 116214571Sdim} __no_randomize_layout; 117214571Sdim 118214571Sdim/* 119214571Sdim * Select the calling method 120214571Sdim */ 121214571Sdim#ifdef MULTI_CACHE 122214571Sdim 123214571Sdimextern struct cpu_cache_fns cpu_cache; 124214571Sdim 125214571Sdim#define __cpuc_flush_icache_all cpu_cache.flush_icache_all 126214571Sdim#define __cpuc_flush_kern_all cpu_cache.flush_kern_all 127214571Sdim#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis 128214571Sdim#define __cpuc_flush_user_all cpu_cache.flush_user_all 129214571Sdim#define __cpuc_flush_user_range cpu_cache.flush_user_range 130214571Sdim#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 131214571Sdim#define __cpuc_coherent_user_range cpu_cache.coherent_user_range 132214571Sdim#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area 133214571Sdim 134214571Sdim/* 135214571Sdim * These are private to the dma-mapping API. Do not use directly. 136214571Sdim * Their sole purpose is to ensure that data held in the cache 137214571Sdim * is visible to DMA, or data written by DMA to system memory is 138214571Sdim * visible to the CPU. 139214571Sdim */ 140214571Sdim#define dmac_flush_range cpu_cache.dma_flush_range 141214571Sdim 142214571Sdim#else 143214571Sdim 144214571Sdimextern void __cpuc_flush_icache_all(void); 145214571Sdimextern void __cpuc_flush_kern_all(void); 146214571Sdimextern void __cpuc_flush_kern_louis(void); 147214571Sdimextern void __cpuc_flush_user_all(void); 148214571Sdimextern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 149214571Sdimextern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 150214571Sdimextern int __cpuc_coherent_user_range(unsigned long, unsigned long); 151214571Sdimextern void __cpuc_flush_dcache_area(void *, size_t); 152214571Sdim 153214571Sdim/* 154214571Sdim * These are private to the dma-mapping API. Do not use directly. 155214571Sdim * Their sole purpose is to ensure that data held in the cache 156214571Sdim * is visible to DMA, or data written by DMA to system memory is 157214571Sdim * visible to the CPU. 158214571Sdim */ 159214571Sdimextern void dmac_flush_range(const void *, const void *); 160214571Sdim 161214571Sdim#endif 162214571Sdim 163214571Sdim/* 164214571Sdim * Copy user data from/to a page which is mapped into a different 165214571Sdim * processes address space. Really, we want to allow our "user 166214571Sdim * space" model to handle this. 167214571Sdim */ 168214571Sdimextern void copy_to_user_page(struct vm_area_struct *, struct page *, 169214571Sdim unsigned long, void *, const void *, unsigned long); 170214571Sdim#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 171214571Sdim do { \ 172214571Sdim memcpy(dst, src, len); \ 173214571Sdim } while (0) 174214571Sdim 175214571Sdim/* 176214571Sdim * Convert calls to our calling convention. 177214571Sdim */ 178214571Sdim 179214571Sdim/* Invalidate I-cache */ 180214571Sdim#define __flush_icache_all_generic() \ 181214571Sdim asm("mcr p15, 0, %0, c7, c5, 0" \ 182214571Sdim : : "r" (0)); 183214571Sdim 184214571Sdim/* Invalidate I-cache inner shareable */ 185214571Sdim#define __flush_icache_all_v7_smp() \ 186214571Sdim asm("mcr p15, 0, %0, c7, c1, 0" \ 187214571Sdim : : "r" (0)); 188214571Sdim 189214571Sdim/* 190214571Sdim * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 191214571Sdim * will fall through to use __flush_icache_all_generic. 192214571Sdim */ 193214571Sdim#if (defined(CONFIG_CPU_V7) && \ 194214571Sdim (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ 195214571Sdim defined(CONFIG_SMP_ON_UP) 196214571Sdim#define __flush_icache_preferred __cpuc_flush_icache_all 197214571Sdim#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) 198214571Sdim#define __flush_icache_preferred __flush_icache_all_v7_smp 199214571Sdim#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920) 200214571Sdim#define __flush_icache_preferred __cpuc_flush_icache_all 201214571Sdim#else 202214571Sdim#define __flush_icache_preferred __flush_icache_all_generic 203214571Sdim#endif 204214571Sdim 205214571Sdimstatic inline void __flush_icache_all(void) 206214571Sdim{ 207214571Sdim __flush_icache_preferred(); 208214571Sdim dsb(ishst); 209214571Sdim} 210214571Sdim 211214571Sdim/* 212214571Sdim * Flush caches up to Level of Unification Inner Shareable 213214571Sdim */ 214214571Sdim#define flush_cache_louis() __cpuc_flush_kern_louis() 215214571Sdim 216214571Sdim#define flush_cache_all() __cpuc_flush_kern_all() 217214571Sdim 218214571Sdimstatic inline void vivt_flush_cache_mm(struct mm_struct *mm) 219214571Sdim{ 220214571Sdim if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 221214571Sdim __cpuc_flush_user_all(); 222214571Sdim} 223214571Sdim 224214571Sdimstatic inline void 225214571Sdimvivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 226214571Sdim{ 227214571Sdim struct mm_struct *mm = vma->vm_mm; 228214571Sdim 229214571Sdim if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 230214571Sdim __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 231214571Sdim vma->vm_flags); 232214571Sdim} 233214571Sdim 234214571Sdimstatic inline void vivt_flush_cache_pages(struct vm_area_struct *vma, 235214571Sdim unsigned long user_addr, unsigned long pfn, unsigned int nr) 236214571Sdim{ 237214571Sdim struct mm_struct *mm = vma->vm_mm; 238214571Sdim 239214571Sdim if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { 240214571Sdim unsigned long addr = user_addr & PAGE_MASK; 241214571Sdim __cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE, 242214571Sdim vma->vm_flags); 243214571Sdim } 244214571Sdim} 245214571Sdim 246214571Sdim#ifndef CONFIG_CPU_CACHE_VIPT 247214571Sdim#define flush_cache_mm(mm) \ 248214571Sdim vivt_flush_cache_mm(mm) 249214571Sdim#define flush_cache_range(vma,start,end) \ 250214571Sdim vivt_flush_cache_range(vma,start,end) 251214571Sdim#define flush_cache_pages(vma, addr, pfn, nr) \ 252214571Sdim vivt_flush_cache_pages(vma, addr, pfn, nr) 253214571Sdim#else 254214571Sdimvoid flush_cache_mm(struct mm_struct *mm); 255214571Sdimvoid flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 256214571Sdimvoid flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, 257214571Sdim unsigned long pfn, unsigned int nr); 258214571Sdim#endif 259214571Sdim 260214571Sdim#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 261214571Sdim#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1) 262214571Sdim 263214571Sdim/* 264214571Sdim * flush_icache_user_range is used when we want to ensure that the 265214571Sdim * Harvard caches are synchronised for the user space address range. 266214571Sdim * This is used for the ARM private sys_cacheflush system call. 267214571Sdim */ 268214571Sdim#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e) 269214571Sdim 270214571Sdim/* 271214571Sdim * Perform necessary cache operations to ensure that data previously 272214571Sdim * stored within this range of addresses can be executed by the CPU. 273214571Sdim */ 274214571Sdim#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) 275214571Sdim 276214571Sdim/* 277214571Sdim * Perform necessary cache operations to ensure that the TLB will 278214571Sdim * see data written in the specified area. 279214571Sdim */ 280214571Sdim#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) 281214571Sdim 282214571Sdim/* 283214571Sdim * flush_dcache_page is used when the kernel has written to the page 284214571Sdim * cache page at virtual address page->virtual. 285214571Sdim * 286214571Sdim * If this page isn't mapped (ie, page_mapping == NULL), or it might 287214571Sdim * have userspace mappings, then we _must_ always clean + invalidate 288214571Sdim * the dcache entries associated with the kernel mapping. 289214571Sdim * 290214571Sdim * Otherwise we can defer the operation, and clean the cache when we are 291214571Sdim * about to change to user space. This is the same method as used on SPARC64. 292214571Sdim * See update_mmu_cache for the user space part. 293214571Sdim */ 294214571Sdim#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 295214571Sdimvoid flush_dcache_page(struct page *); 296214571Sdimvoid flush_dcache_folio(struct folio *folio); 297214571Sdim#define flush_dcache_folio flush_dcache_folio 298214571Sdim 299214571Sdim#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 300214571Sdimstatic inline void flush_kernel_vmap_range(void *addr, int size) 301214571Sdim{ 302214571Sdim if ((cache_is_vivt() || cache_is_vipt_aliasing())) 303214571Sdim __cpuc_flush_dcache_area(addr, (size_t)size); 304214571Sdim} 305214571Sdimstatic inline void invalidate_kernel_vmap_range(void *addr, int size) 306214571Sdim{ 307214571Sdim if ((cache_is_vivt() || cache_is_vipt_aliasing())) 308214571Sdim __cpuc_flush_dcache_area(addr, (size_t)size); 309214571Sdim} 310214571Sdim 311214571Sdim#define ARCH_HAS_FLUSH_ANON_PAGE 312214571Sdimstatic inline void flush_anon_page(struct vm_area_struct *vma, 313214571Sdim struct page *page, unsigned long vmaddr) 314214571Sdim{ 315214571Sdim extern void __flush_anon_page(struct vm_area_struct *vma, 316214571Sdim struct page *, unsigned long); 317214571Sdim if (PageAnon(page)) 318214571Sdim __flush_anon_page(vma, page, vmaddr); 319214571Sdim} 320214571Sdim 321214571Sdim#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 322214571Sdim#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 323214571Sdim 324214571Sdim/* 325214571Sdim * flush_cache_vmap() is used when creating mappings (eg, via vmap, 326214571Sdim * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT 327214571Sdim * caches, since the direct-mappings of these pages may contain cached 328214571Sdim * data, we need to do a full cache flush to ensure that writebacks 329214571Sdim * don't corrupt data placed into these pages via the new mappings. 330214571Sdim */ 331214571Sdimstatic inline void flush_cache_vmap(unsigned long start, unsigned long end) 332214571Sdim{ 333214571Sdim if (!cache_is_vipt_nonaliasing()) 334214571Sdim flush_cache_all(); 335214571Sdim else 336214571Sdim /* 337214571Sdim * set_pte_at() called from vmap_pte_range() does not 338214571Sdim * have a DSB after cleaning the cache line. 339214571Sdim */ 340214571Sdim dsb(ishst); 341214571Sdim} 342214571Sdim 343214571Sdim#define flush_cache_vmap_early(start, end) do { } while (0) 344214571Sdim 345214571Sdimstatic inline void flush_cache_vunmap(unsigned long start, unsigned long end) 346214571Sdim{ 347214571Sdim if (!cache_is_vipt_nonaliasing()) 348214571Sdim flush_cache_all(); 349214571Sdim} 350214571Sdim 351214571Sdim/* 352214571Sdim * Memory synchronization helpers for mixed cached vs non cached accesses. 353214571Sdim * 354214571Sdim * Some synchronization algorithms have to set states in memory with the 355214571Sdim * cache enabled or disabled depending on the code path. It is crucial 356214571Sdim * to always ensure proper cache maintenance to update main memory right 357214571Sdim * away in that case. 358214571Sdim * 359214571Sdim * Any cached write must be followed by a cache clean operation. 360214571Sdim * Any cached read must be preceded by a cache invalidate operation. 361214571Sdim * Yet, in the read case, a cache flush i.e. atomic clean+invalidate 362214571Sdim * operation is needed to avoid discarding possible concurrent writes to the 363214571Sdim * accessed memory. 364214571Sdim * 365214571Sdim * Also, in order to prevent a cached writer from interfering with an 366214571Sdim * adjacent non-cached writer, each state variable must be located to 367214571Sdim * a separate cache line. 368214571Sdim */ 369214571Sdim 370214571Sdim/* 371214571Sdim * This needs to be >= the max cache writeback size of all 372214571Sdim * supported platforms included in the current kernel configuration. 373214571Sdim * This is used to align state variables to their own cache lines. 374214571Sdim */ 375214571Sdim#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ 376214571Sdim#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) 377214571Sdim 378214571Sdim/* 379214571Sdim * There is no __cpuc_clean_dcache_area but we use it anyway for 380214571Sdim * code intent clarity, and alias it to __cpuc_flush_dcache_area. 381214571Sdim */ 382214571Sdim#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area 383214571Sdim 384214571Sdim/* 385214571Sdim * Ensure preceding writes to *p by this CPU are visible to 386214571Sdim * subsequent reads by other CPUs: 387214571Sdim */ 388214571Sdimstatic inline void __sync_cache_range_w(volatile void *p, size_t size) 389214571Sdim{ 390214571Sdim char *_p = (char *)p; 391214571Sdim 392214571Sdim __cpuc_clean_dcache_area(_p, size); 393214571Sdim outer_clean_range(__pa(_p), __pa(_p + size)); 394214571Sdim} 395214571Sdim 396214571Sdim/* 397214571Sdim * Ensure preceding writes to *p by other CPUs are visible to 398214571Sdim * subsequent reads by this CPU. We must be careful not to 399214571Sdim * discard data simultaneously written by another CPU, hence the 400214571Sdim * usage of flush rather than invalidate operations. 401214571Sdim */ 402214571Sdimstatic inline void __sync_cache_range_r(volatile void *p, size_t size) 403214571Sdim{ 404214571Sdim char *_p = (char *)p; 405214571Sdim 406214571Sdim#ifdef CONFIG_OUTER_CACHE 407214571Sdim if (outer_cache.flush_range) { 408214571Sdim /* 409214571Sdim * Ensure dirty data migrated from other CPUs into our cache 410214571Sdim * are cleaned out safely before the outer cache is cleaned: 411214571Sdim */ 412214571Sdim __cpuc_clean_dcache_area(_p, size); 413214571Sdim 414214571Sdim /* Clean and invalidate stale data for *p from outer ... */ 415214571Sdim outer_flush_range(__pa(_p), __pa(_p + size)); 416214571Sdim } 417214571Sdim#endif 418214571Sdim 419214571Sdim /* ... and inner cache: */ 420214571Sdim __cpuc_flush_dcache_area(_p, size); 421214571Sdim} 422214571Sdim 423214571Sdim#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) 424214571Sdim#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) 425214571Sdim 426214571Sdim/* 427214571Sdim * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. 428214571Sdim * To do so we must: 429214571Sdim * 430214571Sdim * - Clear the SCTLR.C bit to prevent further cache allocations 431214571Sdim * - Flush the desired level of cache 432214571Sdim * - Clear the ACTLR "SMP" bit to disable local coherency 433214571Sdim * 434214571Sdim * ... and so without any intervening memory access in between those steps, 435214571Sdim * not even to the stack. 436214571Sdim * 437214571Sdim * WARNING -- After this has been called: 438214571Sdim * 439214571Sdim * - No ldrex/strex (and similar) instructions must be used. 440214571Sdim * - The CPU is obviously no longer coherent with the other CPUs. 441214571Sdim * - This is unlikely to work as expected if Linux is running non-secure. 442214571Sdim * 443214571Sdim * Note: 444214571Sdim * 445214571Sdim * - This is known to apply to several ARMv7 processor implementations, 446214571Sdim * however some exceptions may exist. Caveat emptor. 447214571Sdim * 448214571Sdim * - The clobber list is dictated by the call to v7_flush_dcache_*. 449214571Sdim */ 450214571Sdim#define v7_exit_coherency_flush(level) \ 451214571Sdim asm volatile( \ 452214571Sdim ".arch armv7-a \n\t" \ 453214571Sdim "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ 454214571Sdim "bic r0, r0, #"__stringify(CR_C)" \n\t" \ 455214571Sdim "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ 456214571Sdim "isb \n\t" \ 457214571Sdim "bl v7_flush_dcache_"__stringify(level)" \n\t" \ 458214571Sdim "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ 459214571Sdim "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ 460214571Sdim "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ 461214571Sdim "isb \n\t" \ 462214571Sdim "dsb" \ 463214571Sdim : : : "r0","r1","r2","r3","r4","r5","r6", \ 464214571Sdim "r9","r10","ip","lr","memory" ) 465214571Sdim 466214571Sdimvoid flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 467214571Sdim void *kaddr, unsigned long len); 468214571Sdim 469214571Sdim 470214571Sdim#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 471214571Sdimvoid check_cpu_icache_size(int cpuid); 472214571Sdim#else 473214571Sdimstatic inline void check_cpu_icache_size(int cpuid) { } 474214571Sdim#endif 475214571Sdim 476214571Sdim#endif 477214571Sdim