cpu-v4.h revision 294740
1/*- 2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 3 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: head/sys/arm/include/cpu-v6.h 294740 2016-01-25 18:02:28Z zbb $ 28 */ 29#ifndef MACHINE_CPU_V6_H 30#define MACHINE_CPU_V6_H 31 32/* There are no user serviceable parts here, they may change without notice */ 33#ifndef _KERNEL 34#error Only include this file in the kernel 35#else 36 37#include <machine/acle-compat.h> 38#include "machine/atomic.h" 39#include "machine/cpufunc.h" 40#include "machine/cpuinfo.h" 41#include "machine/sysreg.h" 42 43#define CPU_ASID_KERNEL 0 44 45vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); 46vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); 47 48/* 49 * Macros to generate CP15 (system control processor) read/write functions. 50 */ 51#define _FX(s...) #s 52 53#define _RF0(fname, aname...) \ 54static __inline register_t \ 55fname(void) \ 56{ \ 57 register_t reg; \ 58 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ 59 return(reg); \ 60} 61 62#define _R64F0(fname, aname) \ 63static __inline uint64_t \ 64fname(void) \ 65{ \ 66 uint64_t reg; \ 67 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ 68 return(reg); \ 69} 70 71#define _WF0(fname, aname...) \ 72static __inline void \ 73fname(void) \ 74{ \ 75 __asm __volatile("mcr\t" _FX(aname)); \ 76} 77 78#define _WF1(fname, aname...) \ 79static __inline void \ 80fname(register_t reg) \ 81{ \ 82 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ 83} 84 85#define _W64F1(fname, aname...) \ 86static __inline void \ 87fname(uint64_t reg) \ 88{ \ 89 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ 90} 91 92/* 93 * Raw CP15 maintenance operations 94 * !!! not for external use !!! 95 */ 96 97/* TLB */ 98 99_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ 100#if __ARM_ARCH >= 7 && defined SMP 101_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ 102#endif 103_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ 104#if __ARM_ARCH >= 7 && defined SMP 105_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ 106#endif 107_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ 108#if __ARM_ARCH >= 7 && defined SMP 109_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ 110#endif 111_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ 112 113_WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) 114 115/* Cache and Branch predictor */ 116 117_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ 118#if __ARM_ARCH >= 7 && defined SMP 119_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ 120#endif 121_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ 122_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ 123_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ 124_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ 125#if __ARM_ARCH >= 7 126_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ 127#endif 128_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ 129_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ 130_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ 131_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ 132#if __ARM_ARCH >= 7 && defined SMP 133_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ 134#endif 135_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ 136 137/* 138 * Publicly accessible functions 139 */ 140 141/* CP14 Debug Registers */ 142_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0)) 143_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0)) 144_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0)) 145_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0)) 146_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0)) 147 148_WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0)) 149_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0)) 150_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0)) 151_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0)) 152 153/* Various control registers */ 154 155_RF0(cp15_cpacr_get, CP15_CPACR(%0)) 156_WF1(cp15_cpacr_set, CP15_CPACR(%0)) 157_RF0(cp15_dfsr_get, CP15_DFSR(%0)) 158_RF0(cp15_ifsr_get, CP15_IFSR(%0)) 159_WF1(cp15_prrr_set, CP15_PRRR(%0)) 160_WF1(cp15_nmrr_set, CP15_NMRR(%0)) 161_RF0(cp15_ttbr_get, CP15_TTBR0(%0)) 162_RF0(cp15_dfar_get, CP15_DFAR(%0)) 163#if __ARM_ARCH >= 7 164_RF0(cp15_ifar_get, CP15_IFAR(%0)) 165_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) 166#endif 167/* ARMv6+ and XScale */ 168_RF0(cp15_actlr_get, CP15_ACTLR(%0)) 169_WF1(cp15_actlr_set, CP15_ACTLR(%0)) 170#if __ARM_ARCH >= 6 171_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)) 172_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0)) 173_RF0(cp15_par_get, CP15_PAR(%0)) 174_RF0(cp15_sctlr_get, CP15_SCTLR(%0)) 175#endif 176 177/*CPU id registers */ 178_RF0(cp15_midr_get, CP15_MIDR(%0)) 179_RF0(cp15_ctr_get, CP15_CTR(%0)) 180_RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) 181_RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) 182_RF0(cp15_mpidr_get, CP15_MPIDR(%0)) 183_RF0(cp15_revidr_get, CP15_REVIDR(%0)) 184_RF0(cp15_ccsidr_get, CP15_CCSIDR(%0)) 185_RF0(cp15_clidr_get, CP15_CLIDR(%0)) 186_RF0(cp15_aidr_get, CP15_AIDR(%0)) 187_WF1(cp15_csselr_set, CP15_CSSELR(%0)) 188_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) 189_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) 190_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) 191_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) 192_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) 193_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) 194_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) 195_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) 196_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) 197_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) 198_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) 199_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) 200_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) 201_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) 202_RF0(cp15_cbar_get, CP15_CBAR(%0)) 203 204/* Performance Monitor registers */ 205 206#if __ARM_ARCH == 6 && defined(CPU_ARM1176) 207_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 208_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 209_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 210_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 211_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 212_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 213#elif __ARM_ARCH > 6 214_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 215_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 216_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) 217_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) 218_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) 219_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) 220_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) 221_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) 222_RF0(cp15_pmselr_get, CP15_PMSELR(%0)) 223_WF1(cp15_pmselr_set, CP15_PMSELR(%0)) 224_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 225_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 226_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) 227_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) 228_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) 229_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) 230_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 231_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 232_RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) 233_WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) 234_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) 235#endif 236 237_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) 238_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) 239_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) 240_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) 241_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) 242_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) 243 244/* Generic Timer registers - only use when you know the hardware is available */ 245_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) 246_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) 247_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) 248_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) 249_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) 250_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) 251_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) 252_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) 253_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) 254_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) 255_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) 256_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) 257_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) 258_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) 259_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) 260_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) 261_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) 262_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) 263 264_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) 265_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) 266_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) 267_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) 268_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) 269_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) 270_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) 271_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) 272_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) 273_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) 274 275#undef _FX 276#undef _RF0 277#undef _WF0 278#undef _WF1 279 280#if __ARM_ARCH >= 6 281/* 282 * Cache and TLB maintenance operations for armv6+ code. The #else block 283 * provides armv4/v5 implementations for a few of these used in common code. 284 */ 285 286/* 287 * TLB maintenance operations. 288 */ 289 290/* Local (i.e. not broadcasting ) operations. */ 291 292/* Flush all TLB entries (even global). */ 293static __inline void 294tlb_flush_all_local(void) 295{ 296 297 dsb(); 298 _CP15_TLBIALL(); 299 dsb(); 300} 301 302/* Flush all not global TLB entries. */ 303static __inline void 304tlb_flush_all_ng_local(void) 305{ 306 307 dsb(); 308 _CP15_TLBIASID(CPU_ASID_KERNEL); 309 dsb(); 310} 311 312/* Flush single TLB entry (even global). */ 313static __inline void 314tlb_flush_local(vm_offset_t va) 315{ 316 317 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 318 319 dsb(); 320 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 321 dsb(); 322} 323 324/* Flush range of TLB entries (even global). */ 325static __inline void 326tlb_flush_range_local(vm_offset_t va, vm_size_t size) 327{ 328 vm_offset_t eva = va + size; 329 330 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 331 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 332 size)); 333 334 dsb(); 335 for (; va < eva; va += PAGE_SIZE) 336 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 337 dsb(); 338} 339 340/* Broadcasting operations. */ 341#if __ARM_ARCH >= 7 && defined SMP 342 343static __inline void 344tlb_flush_all(void) 345{ 346 347 dsb(); 348 _CP15_TLBIALLIS(); 349 dsb(); 350} 351 352static __inline void 353tlb_flush_all_ng(void) 354{ 355 356 dsb(); 357 _CP15_TLBIASIDIS(CPU_ASID_KERNEL); 358 dsb(); 359} 360 361static __inline void 362tlb_flush(vm_offset_t va) 363{ 364 365 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 366 367 dsb(); 368 _CP15_TLBIMVAAIS(va); 369 dsb(); 370} 371 372static __inline void 373tlb_flush_range(vm_offset_t va, vm_size_t size) 374{ 375 vm_offset_t eva = va + size; 376 377 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 378 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 379 size)); 380 381 dsb(); 382 for (; va < eva; va += PAGE_SIZE) 383 _CP15_TLBIMVAAIS(va); 384 dsb(); 385} 386#else /* SMP */ 387 388#define tlb_flush_all() tlb_flush_all_local() 389#define tlb_flush_all_ng() tlb_flush_all_ng_local() 390#define tlb_flush(va) tlb_flush_local(va) 391#define tlb_flush_range(va, size) tlb_flush_range_local(va, size) 392 393#endif /* SMP */ 394 395/* 396 * Cache maintenance operations. 397 */ 398 399/* Sync I and D caches to PoU */ 400static __inline void 401icache_sync(vm_offset_t va, vm_size_t size) 402{ 403 vm_offset_t eva = va + size; 404 405 dsb(); 406 va &= ~cpuinfo.dcache_line_mask; 407 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 408#if __ARM_ARCH >= 7 && defined SMP 409 _CP15_DCCMVAU(va); 410#else 411 _CP15_DCCMVAC(va); 412#endif 413 } 414 dsb(); 415#if __ARM_ARCH >= 7 && defined SMP 416 _CP15_ICIALLUIS(); 417#else 418 _CP15_ICIALLU(); 419#endif 420 dsb(); 421 isb(); 422} 423 424/* Invalidate I cache */ 425static __inline void 426icache_inv_all(void) 427{ 428#if __ARM_ARCH >= 7 && defined SMP 429 _CP15_ICIALLUIS(); 430#else 431 _CP15_ICIALLU(); 432#endif 433 dsb(); 434 isb(); 435} 436 437/* Invalidate branch predictor buffer */ 438static __inline void 439bpb_inv_all(void) 440{ 441#if __ARM_ARCH >= 7 && defined SMP 442 _CP15_BPIALLIS(); 443#else 444 _CP15_BPIALL(); 445#endif 446 dsb(); 447 isb(); 448} 449 450/* Write back D-cache to PoU */ 451static __inline void 452dcache_wb_pou(vm_offset_t va, vm_size_t size) 453{ 454 vm_offset_t eva = va + size; 455 456 dsb(); 457 va &= ~cpuinfo.dcache_line_mask; 458 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 459#if __ARM_ARCH >= 7 && defined SMP 460 _CP15_DCCMVAU(va); 461#else 462 _CP15_DCCMVAC(va); 463#endif 464 } 465 dsb(); 466} 467 468/* 469 * Invalidate D-cache to PoC 470 * 471 * Caches are invalidated from outermost to innermost as fresh cachelines 472 * flow in this direction. In given range, if there was no dirty cacheline 473 * in any cache before, no stale cacheline should remain in them after this 474 * operation finishes. 475 */ 476static __inline void 477dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 478{ 479 vm_offset_t eva = va + size; 480 481 dsb(); 482 /* invalidate L2 first */ 483 cpu_l2cache_inv_range(pa, size); 484 485 /* then L1 */ 486 va &= ~cpuinfo.dcache_line_mask; 487 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 488 _CP15_DCIMVAC(va); 489 } 490 dsb(); 491} 492 493/* 494 * Discard D-cache lines to PoC, prior to overwrite by DMA engine. 495 * 496 * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't 497 * flow into L1 while invalidating. This routine is intended to be used only 498 * when invalidating a buffer before a DMA operation loads new data into memory. 499 * The concern in this case is that dirty lines are not evicted to main memory, 500 * overwriting the DMA data. For that reason, the L1 is done first to ensure 501 * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned. 502 */ 503static __inline void 504dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 505{ 506 vm_offset_t eva = va + size; 507 508 /* invalidate L1 first */ 509 dsb(); 510 va &= ~cpuinfo.dcache_line_mask; 511 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 512 _CP15_DCIMVAC(va); 513 } 514 dsb(); 515 516 /* then L2 */ 517 cpu_l2cache_inv_range(pa, size); 518} 519 520/* 521 * Write back D-cache to PoC 522 * 523 * Caches are written back from innermost to outermost as dirty cachelines 524 * flow in this direction. In given range, no dirty cacheline should remain 525 * in any cache after this operation finishes. 526 */ 527static __inline void 528dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 529{ 530 vm_offset_t eva = va + size; 531 532 dsb(); 533 va &= ~cpuinfo.dcache_line_mask; 534 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 535 _CP15_DCCMVAC(va); 536 } 537 dsb(); 538 539 cpu_l2cache_wb_range(pa, size); 540} 541 542/* Write back and invalidate D-cache to PoC */ 543static __inline void 544dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) 545{ 546 vm_offset_t va; 547 vm_offset_t eva = sva + size; 548 549 dsb(); 550 /* write back L1 first */ 551 va = sva & ~cpuinfo.dcache_line_mask; 552 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 553 _CP15_DCCMVAC(va); 554 } 555 dsb(); 556 557 /* then write back and invalidate L2 */ 558 cpu_l2cache_wbinv_range(pa, size); 559 560 /* then invalidate L1 */ 561 va = sva & ~cpuinfo.dcache_line_mask; 562 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 563 _CP15_DCIMVAC(va); 564 } 565 dsb(); 566} 567 568/* Set TTB0 register */ 569static __inline void 570cp15_ttbr_set(uint32_t reg) 571{ 572 dsb(); 573 _CP15_TTB_SET(reg); 574 dsb(); 575 _CP15_BPIALL(); 576 dsb(); 577 isb(); 578 tlb_flush_all_ng_local(); 579} 580 581#else /* ! __ARM_ARCH >= 6 */ 582 583/* 584 * armv4/5 compatibility shims. 585 * 586 * These functions provide armv4 cache maintenance using the new armv6 names. 587 * Included here are just the functions actually used now in common code; it may 588 * be necessary to add things here over time. 589 * 590 * The callers of the dcache functions expect these routines to handle address 591 * and size values which are not aligned to cacheline boundaries; the armv4 and 592 * armv5 asm code handles that. 593 */ 594 595static __inline void 596dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 597{ 598 599 cpu_dcache_inv_range(va, size); 600 cpu_l2cache_inv_range(va, size); 601} 602 603static __inline void 604dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 605{ 606 607 /* See armv6 code, above, for why we do L2 before L1 in this case. */ 608 cpu_l2cache_inv_range(va, size); 609 cpu_dcache_inv_range(va, size); 610} 611 612static __inline void 613dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 614{ 615 616 cpu_dcache_wb_range(va, size); 617 cpu_l2cache_wb_range(va, size); 618} 619 620#endif /* __ARM_ARCH >= 6 */ 621 622#endif /* _KERNEL */ 623 624#endif /* !MACHINE_CPU_V6_H */ 625