1/*- 2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 3 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29#ifndef MACHINE_CPU_V6_H 30#define MACHINE_CPU_V6_H 31 32/* There are no user serviceable parts here, they may change without notice */ 33#ifndef _KERNEL 34#error Only include this file in the kernel 35#endif 36 37#include <machine/atomic.h> 38#include <machine/cpufunc.h> 39#include <machine/cpuinfo.h> 40#include <machine/sysreg.h> 41 42#if __ARM_ARCH < 6 43#error Only include this file for ARMv6 44#endif 45 46/* 47 * Some kernel modules (dtrace all for example) are compiled 48 * unconditionally with -DSMP. Although it looks like a bug, 49 * handle this case here and in #elif condition in ARM_SMP_UP macro. 50 */ 51#if __ARM_ARCH <= 6 && defined(SMP) && !defined(KLD_MODULE) 52#error SMP option is not supported on ARMv6 53#endif 54 55#if __ARM_ARCH <= 6 && defined(SMP_ON_UP) 56#error SMP_ON_UP option is only supported on ARMv7+ CPUs 57#endif 58 59#if !defined(SMP) && defined(SMP_ON_UP) 60#error SMP option must be defined for SMP_ON_UP option 61#endif 62 63#define CPU_ASID_KERNEL 0 64 65#if defined(SMP_ON_UP) 66#define ARM_SMP_UP(smp_code, up_code) \ 67do { \ 68 if (cpuinfo.mp_ext != 0) { \ 69 smp_code; \ 70 } else { \ 71 up_code; \ 72 } \ 73} while (0) 74#elif defined(SMP) && __ARM_ARCH > 6 75#define ARM_SMP_UP(smp_code, up_code) \ 76do { \ 77 smp_code; \ 78} while (0) 79#else 80#define ARM_SMP_UP(smp_code, up_code) \ 81do { \ 82 up_code; \ 83} while (0) 84#endif 85 86void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */ 87vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); 88vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); 89 90#ifdef DEV_PMU 91#include <sys/pcpu.h> 92#define PMU_OVSR_C 0x80000000 /* Cycle Counter */ 93extern uint32_t ccnt_hi[MAXCPU]; 94extern int pmu_attched; 95#endif /* DEV_PMU */ 96 97#define sev() __asm __volatile("sev" : : : "memory") 98#define wfe() __asm __volatile("wfe" : : : "memory") 99 100/* 101 * Macros to generate CP15 (system control processor) read/write functions. 102 */ 103#define _FX(s...) #s 104 105#define _RF0(fname, aname...) \ 106static __inline uint32_t \ 107fname(void) \ 108{ \ 109 uint32_t reg; \ 110 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ 111 return(reg); \ 112} 113 114#define _R64F0(fname, aname) \ 115static __inline uint64_t \ 116fname(void) \ 117{ \ 118 uint64_t reg; \ 119 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ 120 return(reg); \ 121} 122 123#define _WF0(fname, aname...) \ 124static __inline void \ 125fname(void) \ 126{ \ 127 __asm __volatile("mcr\t" _FX(aname)); \ 128} 129 130#define _WF1(fname, aname...) \ 131static __inline void \ 132fname(uint32_t reg) \ 133{ \ 134 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ 135} 136 137#define _W64F1(fname, aname...) \ 138static __inline void \ 139fname(uint64_t reg) \ 140{ \ 141 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ 142} 143 144/* 145 * Raw CP15 maintenance operations 146 * !!! not for external use !!! 147 */ 148 149/* TLB */ 150 151_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ 152#if __ARM_ARCH >= 7 && defined(SMP) 153_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ 154#endif 155_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ 156#if __ARM_ARCH >= 7 && defined(SMP) 157_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ 158#endif 159_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ 160#if __ARM_ARCH >= 7 && defined(SMP) 161_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ 162#endif 163_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ 164 165_WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) 166 167/* Cache and Branch predictor */ 168 169_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ 170#if __ARM_ARCH >= 7 && defined(SMP) 171_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ 172#endif 173_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ 174_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ 175_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ 176_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ 177#if __ARM_ARCH >= 7 178_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ 179#endif 180_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ 181_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ 182_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ 183_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ 184#if __ARM_ARCH >= 7 && defined(SMP) 185_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ 186#endif 187_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ 188 189/* 190 * Publicly accessible functions 191 */ 192 193/* CP14 Debug Registers */ 194_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) 195_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) 196_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) 197_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) 198_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) 199 200_WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) 201_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) 202_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) 203_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) 204 205/* Various control registers */ 206 207_RF0(cp15_cpacr_get, CP15_CPACR(%0)) 208_WF1(cp15_cpacr_set, CP15_CPACR(%0)) 209_RF0(cp15_dfsr_get, CP15_DFSR(%0)) 210_RF0(cp15_ifsr_get, CP15_IFSR(%0)) 211_WF1(cp15_prrr_set, CP15_PRRR(%0)) 212_WF1(cp15_nmrr_set, CP15_NMRR(%0)) 213_RF0(cp15_ttbr_get, CP15_TTBR0(%0)) 214_RF0(cp15_dfar_get, CP15_DFAR(%0)) 215#if __ARM_ARCH >= 7 216_RF0(cp15_ifar_get, CP15_IFAR(%0)) 217_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) 218#endif 219_RF0(cp15_actlr_get, CP15_ACTLR(%0)) 220_WF1(cp15_actlr_set, CP15_ACTLR(%0)) 221_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) 222_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) 223_WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0)) 224_WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0)) 225_RF0(cp15_par_get, CP15_PAR(%0)) 226_RF0(cp15_sctlr_get, CP15_SCTLR(%0)) 227 228/*CPU id registers */ 229_RF0(cp15_midr_get, CP15_MIDR(%0)) 230_RF0(cp15_ctr_get, CP15_CTR(%0)) 231_RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) 232_RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) 233_RF0(cp15_mpidr_get, CP15_MPIDR(%0)) 234_RF0(cp15_revidr_get, CP15_REVIDR(%0)) 235_RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) 236_RF0(cp15_clidr_get, CP15_CLIDR(%0)) 237_RF0(cp15_aidr_get, CP15_AIDR(%0)) 238_WF1(cp15_csselr_set, CP15_CSSELR(%0)) 239_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) 240_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) 241_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) 242_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) 243_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) 244_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) 245_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) 246_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) 247_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) 248_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) 249_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) 250_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) 251_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) 252_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) 253_RF0(cp15_cbar_get, CP15_CBAR(%0)) 254 255/* Performance Monitor registers */ 256 257#if __ARM_ARCH == 6 && defined(CPU_ARM1176) 258_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 259_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 260_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 261_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 262_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 263_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 264#elif __ARM_ARCH > 6 265_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 266_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 267_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) 268_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) 269_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) 270_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) 271_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) 272_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) 273_RF0(cp15_pmselr_get, CP15_PMSELR(%0)) 274_WF1(cp15_pmselr_set, CP15_PMSELR(%0)) 275_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 276_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 277_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) 278_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) 279_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) 280_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) 281_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 282_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 283_RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) 284_WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) 285_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) 286#endif 287 288_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) 289_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) 290_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) 291_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) 292_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) 293_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) 294 295/* Generic Timer registers - only use when you know the hardware is available */ 296_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) 297_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) 298_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) 299_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) 300_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) 301_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) 302_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) 303_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) 304_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) 305_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) 306_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) 307_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) 308_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) 309_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) 310_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) 311_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) 312_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) 313_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) 314 315_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) 316_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) 317_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) 318_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) 319_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) 320_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) 321_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) 322_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) 323_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) 324_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) 325 326#undef _FX 327#undef _RF0 328#undef _WF0 329#undef _WF1 330 331/* 332 * TLB maintenance operations. 333 */ 334 335/* Local (i.e. not broadcasting ) operations. */ 336 337/* Flush all TLB entries (even global). */ 338static __inline void 339tlb_flush_all_local(void) 340{ 341 342 dsb(); 343 _CP15_TLBIALL(); 344 dsb(); 345} 346 347/* Flush all not global TLB entries. */ 348static __inline void 349tlb_flush_all_ng_local(void) 350{ 351 352 dsb(); 353 _CP15_TLBIASID(CPU_ASID_KERNEL); 354 dsb(); 355} 356 357/* Flush single TLB entry (even global). */ 358static __inline void 359tlb_flush_local(vm_offset_t va) 360{ 361 362 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 363 364 dsb(); 365 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 366 dsb(); 367} 368 369/* Flush range of TLB entries (even global). */ 370static __inline void 371tlb_flush_range_local(vm_offset_t va, vm_size_t size) 372{ 373 vm_offset_t eva = va + size; 374 375 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 376 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 377 size)); 378 379 dsb(); 380 for (; va < eva; va += PAGE_SIZE) 381 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 382 dsb(); 383} 384 385/* Broadcasting operations. */ 386#if __ARM_ARCH >= 7 && defined(SMP) 387 388static __inline void 389tlb_flush_all(void) 390{ 391 392 dsb(); 393 ARM_SMP_UP( 394 _CP15_TLBIALLIS(), 395 _CP15_TLBIALL() 396 ); 397 dsb(); 398} 399 400static __inline void 401tlb_flush_all_ng(void) 402{ 403 404 dsb(); 405 ARM_SMP_UP( 406 _CP15_TLBIASIDIS(CPU_ASID_KERNEL), 407 _CP15_TLBIASID(CPU_ASID_KERNEL) 408 ); 409 dsb(); 410} 411 412static __inline void 413tlb_flush(vm_offset_t va) 414{ 415 416 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 417 418 dsb(); 419 ARM_SMP_UP( 420 _CP15_TLBIMVAAIS(va), 421 _CP15_TLBIMVA(va | CPU_ASID_KERNEL) 422 ); 423 dsb(); 424} 425 426static __inline void 427tlb_flush_range(vm_offset_t va, vm_size_t size) 428{ 429 vm_offset_t eva = va + size; 430 431 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 432 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 433 size)); 434 435 dsb(); 436 ARM_SMP_UP( 437 { 438 for (; va < eva; va += PAGE_SIZE) 439 _CP15_TLBIMVAAIS(va); 440 }, 441 { 442 for (; va < eva; va += PAGE_SIZE) 443 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 444 } 445 ); 446 dsb(); 447} 448#else /* __ARM_ARCH < 7 */ 449 450#define tlb_flush_all() tlb_flush_all_local() 451#define tlb_flush_all_ng() tlb_flush_all_ng_local() 452#define tlb_flush(va) tlb_flush_local(va) 453#define tlb_flush_range(va, size) tlb_flush_range_local(va, size) 454 455#endif /* __ARM_ARCH < 7 */ 456 457/* 458 * Cache maintenance operations. 459 */ 460 461/* Sync I and D caches to PoU */ 462static __inline void 463icache_sync(vm_offset_t va, vm_size_t size) 464{ 465 vm_offset_t eva = va + size; 466 467 dsb(); 468 va &= ~cpuinfo.dcache_line_mask; 469 470 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 471#if __ARM_ARCH >= 7 472 _CP15_DCCMVAU(va); 473#else 474 _CP15_DCCMVAC(va); 475#endif 476 } 477 dsb(); 478 ARM_SMP_UP( 479 _CP15_ICIALLUIS(), 480 _CP15_ICIALLU() 481 ); 482 dsb(); 483 isb(); 484} 485 486/* Invalidate I cache */ 487static __inline void 488icache_inv_all(void) 489{ 490 491 ARM_SMP_UP( 492 _CP15_ICIALLUIS(), 493 _CP15_ICIALLU() 494 ); 495 dsb(); 496 isb(); 497} 498 499/* Invalidate branch predictor buffer */ 500static __inline void 501bpb_inv_all(void) 502{ 503 504 ARM_SMP_UP( 505 _CP15_BPIALLIS(), 506 _CP15_BPIALL() 507 ); 508 dsb(); 509 isb(); 510} 511 512/* Write back D-cache to PoU */ 513static __inline void 514dcache_wb_pou(vm_offset_t va, vm_size_t size) 515{ 516 vm_offset_t eva = va + size; 517 518 dsb(); 519 va &= ~cpuinfo.dcache_line_mask; 520 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 521#if __ARM_ARCH >= 7 522 _CP15_DCCMVAU(va); 523#else 524 _CP15_DCCMVAC(va); 525#endif 526 } 527 dsb(); 528} 529 530/* 531 * Invalidate D-cache to PoC 532 * 533 * Caches are invalidated from outermost to innermost as fresh cachelines 534 * flow in this direction. In given range, if there was no dirty cacheline 535 * in any cache before, no stale cacheline should remain in them after this 536 * operation finishes. 537 */ 538static __inline void 539dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 540{ 541 vm_offset_t eva = va + size; 542 543 dsb(); 544 /* invalidate L2 first */ 545 cpu_l2cache_inv_range(pa, size); 546 547 /* then L1 */ 548 va &= ~cpuinfo.dcache_line_mask; 549 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 550 _CP15_DCIMVAC(va); 551 } 552 dsb(); 553} 554 555/* 556 * Discard D-cache lines to PoC, prior to overwrite by DMA engine. 557 * 558 * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't 559 * flow into L1 while invalidating. This routine is intended to be used only 560 * when invalidating a buffer before a DMA operation loads new data into memory. 561 * The concern in this case is that dirty lines are not evicted to main memory, 562 * overwriting the DMA data. For that reason, the L1 is done first to ensure 563 * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. 564 */ 565static __inline void 566dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 567{ 568 vm_offset_t eva = va + size; 569 570 /* invalidate L1 first */ 571 dsb(); 572 va &= ~cpuinfo.dcache_line_mask; 573 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 574 _CP15_DCIMVAC(va); 575 } 576 dsb(); 577 578 /* then L2 */ 579 cpu_l2cache_inv_range(pa, size); 580} 581 582/* 583 * Write back D-cache to PoC 584 * 585 * Caches are written back from innermost to outermost as dirty cachelines 586 * flow in this direction. In given range, no dirty cacheline should remain 587 * in any cache after this operation finishes. 588 */ 589static __inline void 590dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 591{ 592 vm_offset_t eva = va + size; 593 594 dsb(); 595 va &= ~cpuinfo.dcache_line_mask; 596 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 597 _CP15_DCCMVAC(va); 598 } 599 dsb(); 600 601 cpu_l2cache_wb_range(pa, size); 602} 603 604/* Write back and invalidate D-cache to PoC */ 605static __inline void 606dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) 607{ 608 vm_offset_t va; 609 vm_offset_t eva = sva + size; 610 611 dsb(); 612 /* write back L1 first */ 613 va = sva & ~cpuinfo.dcache_line_mask; 614 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 615 _CP15_DCCMVAC(va); 616 } 617 dsb(); 618 619 /* then write back and invalidate L2 */ 620 cpu_l2cache_wbinv_range(pa, size); 621 622 /* then invalidate L1 */ 623 va = sva & ~cpuinfo.dcache_line_mask; 624 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 625 _CP15_DCIMVAC(va); 626 } 627 dsb(); 628} 629 630/* Set TTB0 register */ 631static __inline void 632cp15_ttbr_set(uint32_t reg) 633{ 634 dsb(); 635 _CP15_TTB_SET(reg); 636 dsb(); 637 _CP15_BPIALL(); 638 dsb(); 639 isb(); 640 tlb_flush_all_ng_local(); 641} 642 643/* 644 * Functions for address checking: 645 * 646 * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access 647 * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access 648 * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access 649 * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access 650 * 651 * They must be called while interrupts are disabled to get consistent result. 652 */ 653static __inline int 654cp15_ats1cpr_check(vm_offset_t addr) 655{ 656 657 cp15_ats1cpr_set(addr); 658 isb(); 659 return (cp15_par_get() & 0x01 ? EFAULT : 0); 660} 661 662static __inline int 663cp15_ats1cpw_check(vm_offset_t addr) 664{ 665 666 cp15_ats1cpw_set(addr); 667 isb(); 668 return (cp15_par_get() & 0x01 ? EFAULT : 0); 669} 670 671static __inline int 672cp15_ats1cur_check(vm_offset_t addr) 673{ 674 675 cp15_ats1cur_set(addr); 676 isb(); 677 return (cp15_par_get() & 0x01 ? EFAULT : 0); 678} 679 680static __inline int 681cp15_ats1cuw_check(vm_offset_t addr) 682{ 683 684 cp15_ats1cuw_set(addr); 685 isb(); 686 return (cp15_par_get() & 0x01 ? EFAULT : 0); 687} 688 689#endif /* !MACHINE_CPU_V6_H */ 690