1/*- 2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 3 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: stable/11/sys/arm/include/cpu-v6.h 328966 2018-02-07 06:27:29Z mmel $ 28 */ 29#ifndef MACHINE_CPU_V6_H 30#define MACHINE_CPU_V6_H 31 32/* There are no user serviceable parts here, they may change without notice */ 33#ifndef _KERNEL 34#error Only include this file in the kernel 35#endif 36 37#include <machine/atomic.h> 38#include <machine/cpufunc.h> 39#include <machine/cpuinfo.h> 40#include <machine/sysreg.h> 41 42#if __ARM_ARCH < 6 43#error Only include this file for ARMv6 44#else 45 46#define CPU_ASID_KERNEL 0 47 48void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */ 49vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); 50vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); 51 52#ifdef DEV_PMU 53#include <sys/pcpu.h> 54#define PMU_OVSR_C 0x80000000 /* Cycle Counter */ 55extern uint32_t ccnt_hi[MAXCPU]; 56extern int pmu_attched; 57#endif /* DEV_PMU */ 58 59#define sev() __asm __volatile("sev" : : : "memory") 60#define wfe() __asm __volatile("wfe" : : : "memory") 61 62/* 63 * Macros to generate CP15 (system control processor) read/write functions. 64 */ 65#define _FX(s...) #s 66 67#define _RF0(fname, aname...) \ 68static __inline uint32_t \ 69fname(void) \ 70{ \ 71 uint32_t reg; \ 72 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ 73 return(reg); \ 74} 75 76#define _R64F0(fname, aname) \ 77static __inline uint64_t \ 78fname(void) \ 79{ \ 80 uint64_t reg; \ 81 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ 82 return(reg); \ 83} 84 85#define _WF0(fname, aname...) \ 86static __inline void \ 87fname(void) \ 88{ \ 89 __asm __volatile("mcr\t" _FX(aname)); \ 90} 91 92#define _WF1(fname, aname...) \ 93static __inline void \ 94fname(uint32_t reg) \ 95{ \ 96 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ 97} 98 99#define _W64F1(fname, aname...) \ 100static __inline void \ 101fname(uint64_t reg) \ 102{ \ 103 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ 104} 105 106/* 107 * Raw CP15 maintenance operations 108 * !!! not for external use !!! 109 */ 110 111/* TLB */ 112 113_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ 114#if __ARM_ARCH >= 7 && defined SMP 115_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ 116#endif 117_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ 118#if __ARM_ARCH >= 7 && defined SMP 119_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ 120#endif 121_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ 122#if __ARM_ARCH >= 7 && defined SMP 123_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ 124#endif 125_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ 126 127_WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) 128 129/* Cache and Branch predictor */ 130 131_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ 132#if __ARM_ARCH >= 7 && defined SMP 133_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ 134#endif 135_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ 136_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ 137_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ 138_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ 139#if __ARM_ARCH >= 7 140_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ 141#endif 142_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ 143_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ 144_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ 145_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ 146#if __ARM_ARCH >= 7 && defined SMP 147_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ 148#endif 149_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ 150 151/* 152 * Publicly accessible functions 153 */ 154 155/* CP14 Debug Registers */ 156_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) 157_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) 158_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) 159_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) 160_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) 161 162_WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) 163_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) 164_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) 165_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) 166 167/* Various control registers */ 168 169_RF0(cp15_cpacr_get, CP15_CPACR(%0)) 170_WF1(cp15_cpacr_set, CP15_CPACR(%0)) 171_RF0(cp15_dfsr_get, CP15_DFSR(%0)) 172_RF0(cp15_ifsr_get, CP15_IFSR(%0)) 173_WF1(cp15_prrr_set, CP15_PRRR(%0)) 174_WF1(cp15_nmrr_set, CP15_NMRR(%0)) 175_RF0(cp15_ttbr_get, CP15_TTBR0(%0)) 176_RF0(cp15_dfar_get, CP15_DFAR(%0)) 177#if __ARM_ARCH >= 7 178_RF0(cp15_ifar_get, CP15_IFAR(%0)) 179_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) 180#endif 181_RF0(cp15_actlr_get, CP15_ACTLR(%0)) 182_WF1(cp15_actlr_set, CP15_ACTLR(%0)) 183_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) 184_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) 185_WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0)) 186_WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0)) 187_RF0(cp15_par_get, CP15_PAR(%0)) 188_RF0(cp15_sctlr_get, CP15_SCTLR(%0)) 189 190/*CPU id registers */ 191_RF0(cp15_midr_get, CP15_MIDR(%0)) 192_RF0(cp15_ctr_get, CP15_CTR(%0)) 193_RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) 194_RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) 195_RF0(cp15_mpidr_get, CP15_MPIDR(%0)) 196_RF0(cp15_revidr_get, CP15_REVIDR(%0)) 197_RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) 198_RF0(cp15_clidr_get, CP15_CLIDR(%0)) 199_RF0(cp15_aidr_get, CP15_AIDR(%0)) 200_WF1(cp15_csselr_set, CP15_CSSELR(%0)) 201_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) 202_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) 203_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) 204_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) 205_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) 206_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) 207_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) 208_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) 209_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) 210_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) 211_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) 212_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) 213_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) 214_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) 215_RF0(cp15_cbar_get, CP15_CBAR(%0)) 216 217/* Performance Monitor registers */ 218 219#if __ARM_ARCH == 6 && defined(CPU_ARM1176) 220_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 221_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 222_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 223_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 224_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 225_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 226#elif __ARM_ARCH > 6 227_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 228_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 229_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) 230_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) 231_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) 232_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) 233_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) 234_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) 235_RF0(cp15_pmselr_get, CP15_PMSELR(%0)) 236_WF1(cp15_pmselr_set, CP15_PMSELR(%0)) 237_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 238_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 239_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) 240_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) 241_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) 242_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) 243_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 244_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 245_RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) 246_WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) 247_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) 248#endif 249 250_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) 251_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) 252_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) 253_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) 254_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) 255_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) 256 257/* Generic Timer registers - only use when you know the hardware is available */ 258_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) 259_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) 260_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) 261_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) 262_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) 263_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) 264_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) 265_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) 266_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) 267_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) 268_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) 269_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) 270_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) 271_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) 272_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) 273_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) 274_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) 275_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) 276 277_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) 278_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) 279_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) 280_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) 281_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) 282_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) 283_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) 284_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) 285_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) 286_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) 287 288#undef _FX 289#undef _RF0 290#undef _WF0 291#undef _WF1 292 293/* 294 * TLB maintenance operations. 295 */ 296 297/* Local (i.e. not broadcasting ) operations. */ 298 299/* Flush all TLB entries (even global). */ 300static __inline void 301tlb_flush_all_local(void) 302{ 303 304 dsb(); 305 _CP15_TLBIALL(); 306 dsb(); 307} 308 309/* Flush all not global TLB entries. */ 310static __inline void 311tlb_flush_all_ng_local(void) 312{ 313 314 dsb(); 315 _CP15_TLBIASID(CPU_ASID_KERNEL); 316 dsb(); 317} 318 319/* Flush single TLB entry (even global). */ 320static __inline void 321tlb_flush_local(vm_offset_t va) 322{ 323 324 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 325 326 dsb(); 327 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 328 dsb(); 329} 330 331/* Flush range of TLB entries (even global). */ 332static __inline void 333tlb_flush_range_local(vm_offset_t va, vm_size_t size) 334{ 335 vm_offset_t eva = va + size; 336 337 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 338 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 339 size)); 340 341 dsb(); 342 for (; va < eva; va += PAGE_SIZE) 343 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 344 dsb(); 345} 346 347/* Broadcasting operations. */ 348#if __ARM_ARCH >= 7 && defined SMP 349 350static __inline void 351tlb_flush_all(void) 352{ 353 354 dsb(); 355 _CP15_TLBIALLIS(); 356 dsb(); 357} 358 359static __inline void 360tlb_flush_all_ng(void) 361{ 362 363 dsb(); 364 _CP15_TLBIASIDIS(CPU_ASID_KERNEL); 365 dsb(); 366} 367 368static __inline void 369tlb_flush(vm_offset_t va) 370{ 371 372 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 373 374 dsb(); 375 _CP15_TLBIMVAAIS(va); 376 dsb(); 377} 378 379static __inline void 380tlb_flush_range(vm_offset_t va, vm_size_t size) 381{ 382 vm_offset_t eva = va + size; 383 384 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 385 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 386 size)); 387 388 dsb(); 389 for (; va < eva; va += PAGE_SIZE) 390 _CP15_TLBIMVAAIS(va); 391 dsb(); 392} 393#else /* SMP */ 394 395#define tlb_flush_all() tlb_flush_all_local() 396#define tlb_flush_all_ng() tlb_flush_all_ng_local() 397#define tlb_flush(va) tlb_flush_local(va) 398#define tlb_flush_range(va, size) tlb_flush_range_local(va, size) 399 400#endif /* SMP */ 401 402/* 403 * Cache maintenance operations. 404 */ 405 406/* Sync I and D caches to PoU */ 407static __inline void 408icache_sync(vm_offset_t va, vm_size_t size) 409{ 410 vm_offset_t eva = va + size; 411 412 dsb(); 413 va &= ~cpuinfo.dcache_line_mask; 414 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 415#if __ARM_ARCH >= 7 && defined SMP 416 _CP15_DCCMVAU(va); 417#else 418 _CP15_DCCMVAC(va); 419#endif 420 } 421 dsb(); 422#if __ARM_ARCH >= 7 && defined SMP 423 _CP15_ICIALLUIS(); 424#else 425 _CP15_ICIALLU(); 426#endif 427 dsb(); 428 isb(); 429} 430 431/* Invalidate I cache */ 432static __inline void 433icache_inv_all(void) 434{ 435#if __ARM_ARCH >= 7 && defined SMP 436 _CP15_ICIALLUIS(); 437#else 438 _CP15_ICIALLU(); 439#endif 440 dsb(); 441 isb(); 442} 443 444/* Invalidate branch predictor buffer */ 445static __inline void 446bpb_inv_all(void) 447{ 448#if __ARM_ARCH >= 7 && defined SMP 449 _CP15_BPIALLIS(); 450#else 451 _CP15_BPIALL(); 452#endif 453 dsb(); 454 isb(); 455} 456 457/* Write back D-cache to PoU */ 458static __inline void 459dcache_wb_pou(vm_offset_t va, vm_size_t size) 460{ 461 vm_offset_t eva = va + size; 462 463 dsb(); 464 va &= ~cpuinfo.dcache_line_mask; 465 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 466#if __ARM_ARCH >= 7 && defined SMP 467 _CP15_DCCMVAU(va); 468#else 469 _CP15_DCCMVAC(va); 470#endif 471 } 472 dsb(); 473} 474 475/* 476 * Invalidate D-cache to PoC 477 * 478 * Caches are invalidated from outermost to innermost as fresh cachelines 479 * flow in this direction. In given range, if there was no dirty cacheline 480 * in any cache before, no stale cacheline should remain in them after this 481 * operation finishes. 482 */ 483static __inline void 484dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 485{ 486 vm_offset_t eva = va + size; 487 488 dsb(); 489 /* invalidate L2 first */ 490 cpu_l2cache_inv_range(pa, size); 491 492 /* then L1 */ 493 va &= ~cpuinfo.dcache_line_mask; 494 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 495 _CP15_DCIMVAC(va); 496 } 497 dsb(); 498} 499 500/* 501 * Discard D-cache lines to PoC, prior to overwrite by DMA engine. 502 * 503 * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't 504 * flow into L1 while invalidating. This routine is intended to be used only 505 * when invalidating a buffer before a DMA operation loads new data into memory. 506 * The concern in this case is that dirty lines are not evicted to main memory, 507 * overwriting the DMA data. For that reason, the L1 is done first to ensure 508 * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. 509 */ 510static __inline void 511dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 512{ 513 vm_offset_t eva = va + size; 514 515 /* invalidate L1 first */ 516 dsb(); 517 va &= ~cpuinfo.dcache_line_mask; 518 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 519 _CP15_DCIMVAC(va); 520 } 521 dsb(); 522 523 /* then L2 */ 524 cpu_l2cache_inv_range(pa, size); 525} 526 527/* 528 * Write back D-cache to PoC 529 * 530 * Caches are written back from innermost to outermost as dirty cachelines 531 * flow in this direction. In given range, no dirty cacheline should remain 532 * in any cache after this operation finishes. 533 */ 534static __inline void 535dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 536{ 537 vm_offset_t eva = va + size; 538 539 dsb(); 540 va &= ~cpuinfo.dcache_line_mask; 541 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 542 _CP15_DCCMVAC(va); 543 } 544 dsb(); 545 546 cpu_l2cache_wb_range(pa, size); 547} 548 549/* Write back and invalidate D-cache to PoC */ 550static __inline void 551dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) 552{ 553 vm_offset_t va; 554 vm_offset_t eva = sva + size; 555 556 dsb(); 557 /* write back L1 first */ 558 va = sva & ~cpuinfo.dcache_line_mask; 559 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 560 _CP15_DCCMVAC(va); 561 } 562 dsb(); 563 564 /* then write back and invalidate L2 */ 565 cpu_l2cache_wbinv_range(pa, size); 566 567 /* then invalidate L1 */ 568 va = sva & ~cpuinfo.dcache_line_mask; 569 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 570 _CP15_DCIMVAC(va); 571 } 572 dsb(); 573} 574 575/* Set TTB0 register */ 576static __inline void 577cp15_ttbr_set(uint32_t reg) 578{ 579 dsb(); 580 _CP15_TTB_SET(reg); 581 dsb(); 582 _CP15_BPIALL(); 583 dsb(); 584 isb(); 585 tlb_flush_all_ng_local(); 586} 587 588/* 589 * Functions for address checking: 590 * 591 * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access 592 * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access 593 * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access 594 * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access 595 * 596 * They must be called while interrupts are disabled to get consistent result. 597 */ 598static __inline int 599cp15_ats1cpr_check(vm_offset_t addr) 600{ 601 602 cp15_ats1cpr_set(addr); 603 isb(); 604 return (cp15_par_get() & 0x01 ? EFAULT : 0); 605} 606 607static __inline int 608cp15_ats1cpw_check(vm_offset_t addr) 609{ 610 611 cp15_ats1cpw_set(addr); 612 isb(); 613 return (cp15_par_get() & 0x01 ? EFAULT : 0); 614} 615 616static __inline int 617cp15_ats1cur_check(vm_offset_t addr) 618{ 619 620 cp15_ats1cur_set(addr); 621 isb(); 622 return (cp15_par_get() & 0x01 ? EFAULT : 0); 623} 624 625static __inline int 626cp15_ats1cuw_check(vm_offset_t addr) 627{ 628 629 cp15_ats1cuw_set(addr); 630 isb(); 631 return (cp15_par_get() & 0x01 ? EFAULT : 0); 632} 633#endif /* !__ARM_ARCH < 6 */ 634 635#endif /* !MACHINE_CPU_V6_H */ 636