1/*- 2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 3 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: releng/11.0/sys/arm/include/cpu-v6.h 300694 2016-05-25 19:44:26Z ian $ 28 */ 29#ifndef MACHINE_CPU_V6_H 30#define MACHINE_CPU_V6_H 31 32/* There are no user serviceable parts here, they may change without notice */ 33#ifndef _KERNEL 34#error Only include this file in the kernel 35#endif 36 37#include <machine/atomic.h> 38#include <machine/cpufunc.h> 39#include <machine/cpuinfo.h> 40#include <machine/sysreg.h> 41 42#if __ARM_ARCH < 6 43#error Only include this file for ARMv6 44#else 45 46#define CPU_ASID_KERNEL 0 47 48void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */ 49vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); 50vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); 51 52#ifdef DEV_PMU 53#include <sys/pcpu.h> 54#define PMU_OVSR_C 0x80000000 /* Cycle Counter */ 55extern uint32_t ccnt_hi[MAXCPU]; 56extern int pmu_attched; 57#endif /* DEV_PMU */ 58 59 60/* 61 * Macros to generate CP15 (system control processor) read/write functions. 62 */ 63#define _FX(s...) #s 64 65#define _RF0(fname, aname...) \ 66static __inline register_t \ 67fname(void) \ 68{ \ 69 register_t reg; \ 70 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ 71 return(reg); \ 72} 73 74#define _R64F0(fname, aname) \ 75static __inline uint64_t \ 76fname(void) \ 77{ \ 78 uint64_t reg; \ 79 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ 80 return(reg); \ 81} 82 83#define _WF0(fname, aname...) \ 84static __inline void \ 85fname(void) \ 86{ \ 87 __asm __volatile("mcr\t" _FX(aname)); \ 88} 89 90#define _WF1(fname, aname...) \ 91static __inline void \ 92fname(register_t reg) \ 93{ \ 94 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ 95} 96 97#define _W64F1(fname, aname...) \ 98static __inline void \ 99fname(uint64_t reg) \ 100{ \ 101 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ 102} 103 104/* 105 * Raw CP15 maintenance operations 106 * !!! not for external use !!! 107 */ 108 109/* TLB */ 110 111_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ 112#if __ARM_ARCH >= 7 && defined SMP 113_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ 114#endif 115_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ 116#if __ARM_ARCH >= 7 && defined SMP 117_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ 118#endif 119_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ 120#if __ARM_ARCH >= 7 && defined SMP 121_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ 122#endif 123_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ 124 125_WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) 126 127/* Cache and Branch predictor */ 128 129_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ 130#if __ARM_ARCH >= 7 && defined SMP 131_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ 132#endif 133_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ 134_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ 135_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ 136_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ 137#if __ARM_ARCH >= 7 138_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ 139#endif 140_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ 141_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ 142_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ 143_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ 144#if __ARM_ARCH >= 7 && defined SMP 145_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ 146#endif 147_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ 148 149/* 150 * Publicly accessible functions 151 */ 152 153/* CP14 Debug Registers */ 154_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) 155_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) 156_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) 157_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) 158_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) 159 160_WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) 161_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) 162_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) 163_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) 164 165/* Various control registers */ 166 167_RF0(cp15_cpacr_get, CP15_CPACR(%0)) 168_WF1(cp15_cpacr_set, CP15_CPACR(%0)) 169_RF0(cp15_dfsr_get, CP15_DFSR(%0)) 170_RF0(cp15_ifsr_get, CP15_IFSR(%0)) 171_WF1(cp15_prrr_set, CP15_PRRR(%0)) 172_WF1(cp15_nmrr_set, CP15_NMRR(%0)) 173_RF0(cp15_ttbr_get, CP15_TTBR0(%0)) 174_RF0(cp15_dfar_get, CP15_DFAR(%0)) 175#if __ARM_ARCH >= 7 176_RF0(cp15_ifar_get, CP15_IFAR(%0)) 177_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) 178#endif 179_RF0(cp15_actlr_get, CP15_ACTLR(%0)) 180_WF1(cp15_actlr_set, CP15_ACTLR(%0)) 181_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) 182_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) 183_WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0)) 184_WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0)) 185_RF0(cp15_par_get, CP15_PAR(%0)) 186_RF0(cp15_sctlr_get, CP15_SCTLR(%0)) 187 188/*CPU id registers */ 189_RF0(cp15_midr_get, CP15_MIDR(%0)) 190_RF0(cp15_ctr_get, CP15_CTR(%0)) 191_RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) 192_RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) 193_RF0(cp15_mpidr_get, CP15_MPIDR(%0)) 194_RF0(cp15_revidr_get, CP15_REVIDR(%0)) 195_RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) 196_RF0(cp15_clidr_get, CP15_CLIDR(%0)) 197_RF0(cp15_aidr_get, CP15_AIDR(%0)) 198_WF1(cp15_csselr_set, CP15_CSSELR(%0)) 199_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) 200_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) 201_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) 202_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) 203_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) 204_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) 205_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) 206_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) 207_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) 208_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) 209_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) 210_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) 211_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) 212_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) 213_RF0(cp15_cbar_get, CP15_CBAR(%0)) 214 215/* Performance Monitor registers */ 216 217#if __ARM_ARCH == 6 && defined(CPU_ARM1176) 218_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 219_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 220_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 221_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 222_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 223_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 224#elif __ARM_ARCH > 6 225_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 226_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 227_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) 228_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) 229_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) 230_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) 231_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) 232_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) 233_RF0(cp15_pmselr_get, CP15_PMSELR(%0)) 234_WF1(cp15_pmselr_set, CP15_PMSELR(%0)) 235_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 236_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 237_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) 238_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) 239_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) 240_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) 241_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 242_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 243_RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) 244_WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) 245_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) 246#endif 247 248_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) 249_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) 250_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) 251_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) 252_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) 253_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) 254 255/* Generic Timer registers - only use when you know the hardware is available */ 256_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) 257_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) 258_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) 259_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) 260_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) 261_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) 262_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) 263_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) 264_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) 265_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) 266_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) 267_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) 268_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) 269_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) 270_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) 271_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) 272_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) 273_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) 274 275_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) 276_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) 277_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) 278_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) 279_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) 280_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) 281_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) 282_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) 283_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) 284_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) 285 286#undef _FX 287#undef _RF0 288#undef _WF0 289#undef _WF1 290 291/* 292 * TLB maintenance operations. 293 */ 294 295/* Local (i.e. not broadcasting ) operations. */ 296 297/* Flush all TLB entries (even global). */ 298static __inline void 299tlb_flush_all_local(void) 300{ 301 302 dsb(); 303 _CP15_TLBIALL(); 304 dsb(); 305} 306 307/* Flush all not global TLB entries. */ 308static __inline void 309tlb_flush_all_ng_local(void) 310{ 311 312 dsb(); 313 _CP15_TLBIASID(CPU_ASID_KERNEL); 314 dsb(); 315} 316 317/* Flush single TLB entry (even global). */ 318static __inline void 319tlb_flush_local(vm_offset_t va) 320{ 321 322 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 323 324 dsb(); 325 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 326 dsb(); 327} 328 329/* Flush range of TLB entries (even global). */ 330static __inline void 331tlb_flush_range_local(vm_offset_t va, vm_size_t size) 332{ 333 vm_offset_t eva = va + size; 334 335 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 336 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 337 size)); 338 339 dsb(); 340 for (; va < eva; va += PAGE_SIZE) 341 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 342 dsb(); 343} 344 345/* Broadcasting operations. */ 346#if __ARM_ARCH >= 7 && defined SMP 347 348static __inline void 349tlb_flush_all(void) 350{ 351 352 dsb(); 353 _CP15_TLBIALLIS(); 354 dsb(); 355} 356 357static __inline void 358tlb_flush_all_ng(void) 359{ 360 361 dsb(); 362 _CP15_TLBIASIDIS(CPU_ASID_KERNEL); 363 dsb(); 364} 365 366static __inline void 367tlb_flush(vm_offset_t va) 368{ 369 370 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 371 372 dsb(); 373 _CP15_TLBIMVAAIS(va); 374 dsb(); 375} 376 377static __inline void 378tlb_flush_range(vm_offset_t va, vm_size_t size) 379{ 380 vm_offset_t eva = va + size; 381 382 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 383 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 384 size)); 385 386 dsb(); 387 for (; va < eva; va += PAGE_SIZE) 388 _CP15_TLBIMVAAIS(va); 389 dsb(); 390} 391#else /* SMP */ 392 393#define tlb_flush_all() tlb_flush_all_local() 394#define tlb_flush_all_ng() tlb_flush_all_ng_local() 395#define tlb_flush(va) tlb_flush_local(va) 396#define tlb_flush_range(va, size) tlb_flush_range_local(va, size) 397 398#endif /* SMP */ 399 400/* 401 * Cache maintenance operations. 402 */ 403 404/* Sync I and D caches to PoU */ 405static __inline void 406icache_sync(vm_offset_t va, vm_size_t size) 407{ 408 vm_offset_t eva = va + size; 409 410 dsb(); 411 va &= ~cpuinfo.dcache_line_mask; 412 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 413#if __ARM_ARCH >= 7 && defined SMP 414 _CP15_DCCMVAU(va); 415#else 416 _CP15_DCCMVAC(va); 417#endif 418 } 419 dsb(); 420#if __ARM_ARCH >= 7 && defined SMP 421 _CP15_ICIALLUIS(); 422#else 423 _CP15_ICIALLU(); 424#endif 425 dsb(); 426 isb(); 427} 428 429/* Invalidate I cache */ 430static __inline void 431icache_inv_all(void) 432{ 433#if __ARM_ARCH >= 7 && defined SMP 434 _CP15_ICIALLUIS(); 435#else 436 _CP15_ICIALLU(); 437#endif 438 dsb(); 439 isb(); 440} 441 442/* Invalidate branch predictor buffer */ 443static __inline void 444bpb_inv_all(void) 445{ 446#if __ARM_ARCH >= 7 && defined SMP 447 _CP15_BPIALLIS(); 448#else 449 _CP15_BPIALL(); 450#endif 451 dsb(); 452 isb(); 453} 454 455/* Write back D-cache to PoU */ 456static __inline void 457dcache_wb_pou(vm_offset_t va, vm_size_t size) 458{ 459 vm_offset_t eva = va + size; 460 461 dsb(); 462 va &= ~cpuinfo.dcache_line_mask; 463 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 464#if __ARM_ARCH >= 7 && defined SMP 465 _CP15_DCCMVAU(va); 466#else 467 _CP15_DCCMVAC(va); 468#endif 469 } 470 dsb(); 471} 472 473/* 474 * Invalidate D-cache to PoC 475 * 476 * Caches are invalidated from outermost to innermost as fresh cachelines 477 * flow in this direction. In given range, if there was no dirty cacheline 478 * in any cache before, no stale cacheline should remain in them after this 479 * operation finishes. 480 */ 481static __inline void 482dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 483{ 484 vm_offset_t eva = va + size; 485 486 dsb(); 487 /* invalidate L2 first */ 488 cpu_l2cache_inv_range(pa, size); 489 490 /* then L1 */ 491 va &= ~cpuinfo.dcache_line_mask; 492 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 493 _CP15_DCIMVAC(va); 494 } 495 dsb(); 496} 497 498/* 499 * Discard D-cache lines to PoC, prior to overwrite by DMA engine. 500 * 501 * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't 502 * flow into L1 while invalidating. This routine is intended to be used only 503 * when invalidating a buffer before a DMA operation loads new data into memory. 504 * The concern in this case is that dirty lines are not evicted to main memory, 505 * overwriting the DMA data. For that reason, the L1 is done first to ensure 506 * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. 507 */ 508static __inline void 509dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 510{ 511 vm_offset_t eva = va + size; 512 513 /* invalidate L1 first */ 514 dsb(); 515 va &= ~cpuinfo.dcache_line_mask; 516 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 517 _CP15_DCIMVAC(va); 518 } 519 dsb(); 520 521 /* then L2 */ 522 cpu_l2cache_inv_range(pa, size); 523} 524 525/* 526 * Write back D-cache to PoC 527 * 528 * Caches are written back from innermost to outermost as dirty cachelines 529 * flow in this direction. In given range, no dirty cacheline should remain 530 * in any cache after this operation finishes. 531 */ 532static __inline void 533dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 534{ 535 vm_offset_t eva = va + size; 536 537 dsb(); 538 va &= ~cpuinfo.dcache_line_mask; 539 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 540 _CP15_DCCMVAC(va); 541 } 542 dsb(); 543 544 cpu_l2cache_wb_range(pa, size); 545} 546 547/* Write back and invalidate D-cache to PoC */ 548static __inline void 549dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) 550{ 551 vm_offset_t va; 552 vm_offset_t eva = sva + size; 553 554 dsb(); 555 /* write back L1 first */ 556 va = sva & ~cpuinfo.dcache_line_mask; 557 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 558 _CP15_DCCMVAC(va); 559 } 560 dsb(); 561 562 /* then write back and invalidate L2 */ 563 cpu_l2cache_wbinv_range(pa, size); 564 565 /* then invalidate L1 */ 566 va = sva & ~cpuinfo.dcache_line_mask; 567 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 568 _CP15_DCIMVAC(va); 569 } 570 dsb(); 571} 572 573/* Set TTB0 register */ 574static __inline void 575cp15_ttbr_set(uint32_t reg) 576{ 577 dsb(); 578 _CP15_TTB_SET(reg); 579 dsb(); 580 _CP15_BPIALL(); 581 dsb(); 582 isb(); 583 tlb_flush_all_ng_local(); 584} 585 586/* 587 * Functions for address checking: 588 * 589 * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access 590 * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access 591 * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access 592 * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access 593 * 594 * They must be called while interrupts are disabled to get consistent result. 595 */ 596static __inline int 597cp15_ats1cpr_check(vm_offset_t addr) 598{ 599 600 cp15_ats1cpr_set(addr); 601 isb(); 602 return (cp15_par_get() & 0x01 ? EFAULT : 0); 603} 604 605static __inline int 606cp15_ats1cpw_check(vm_offset_t addr) 607{ 608 609 cp15_ats1cpw_set(addr); 610 isb(); 611 return (cp15_par_get() & 0x01 ? EFAULT : 0); 612} 613 614static __inline int 615cp15_ats1cur_check(vm_offset_t addr) 616{ 617 618 cp15_ats1cur_set(addr); 619 isb(); 620 return (cp15_par_get() & 0x01 ? EFAULT : 0); 621} 622 623static __inline int 624cp15_ats1cuw_check(vm_offset_t addr) 625{ 626 627 cp15_ats1cuw_set(addr); 628 isb(); 629 return (cp15_par_get() & 0x01 ? EFAULT : 0); 630} 631#endif /* !__ARM_ARCH < 6 */ 632 633#endif /* !MACHINE_CPU_V6_H */ 634