cpu-v4.h revision 282984
1/*- 2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com> 3 * Copyright 2014 Michal Meloun <meloun@miracle.cz> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: head/sys/arm/include/cpu-v6.h 282984 2015-05-15 18:10:00Z ian $ 28 */ 29#ifndef MACHINE_CPU_V6_H 30#define MACHINE_CPU_V6_H 31 32/* There are no user serviceable parts here, they may change without notice */ 33#ifndef _KERNEL 34#error Only include this file in the kernel 35#else 36 37#include "machine/atomic.h" 38#include "machine/cpufunc.h" 39#include "machine/cpuinfo.h" 40#include "machine/sysreg.h" 41 42#define CPU_ASID_KERNEL 0 43 44vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); 45vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); 46 47/* 48 * Macros to generate CP15 (system control processor) read/write functions. 49 */ 50#define _FX(s...) #s 51 52#define _RF0(fname, aname...) \ 53static __inline register_t \ 54fname(void) \ 55{ \ 56 register_t reg; \ 57 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \ 58 return(reg); \ 59} 60 61#define _R64F0(fname, aname) \ 62static __inline uint64_t \ 63fname(void) \ 64{ \ 65 uint64_t reg; \ 66 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \ 67 return(reg); \ 68} 69 70#define _WF0(fname, aname...) \ 71static __inline void \ 72fname(void) \ 73{ \ 74 __asm __volatile("mcr\t" _FX(aname)); \ 75} 76 77#define _WF1(fname, aname...) \ 78static __inline void \ 79fname(register_t reg) \ 80{ \ 81 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \ 82} 83 84#define _W64F1(fname, aname...) \ 85static __inline void \ 86fname(uint64_t reg) \ 87{ \ 88 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \ 89} 90 91/* 92 * Raw CP15 maintenance operations 93 * !!! not for external use !!! 94 */ 95 96/* TLB */ 97 98_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */ 99#if __ARM_ARCH >= 7 && defined SMP 100_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */ 101#endif 102_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */ 103#if __ARM_ARCH >= 7 && defined SMP 104_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */ 105#endif 106_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */ 107#if __ARM_ARCH >= 7 && defined SMP 108_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */ 109#endif 110_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */ 111 112_WF1(_CP15_TTB_SET, CP15_TTBR0(%0)) 113 114/* Cache and Branch predictor */ 115 116_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */ 117#if __ARM_ARCH >= 7 && defined SMP 118_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */ 119#endif 120_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */ 121_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */ 122_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */ 123_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */ 124#if __ARM_ARCH >= 7 125_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */ 126#endif 127_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */ 128_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */ 129_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */ 130_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */ 131#if __ARM_ARCH >= 7 && defined SMP 132_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */ 133#endif 134_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */ 135 136/* 137 * Publicly accessible functions 138 */ 139 140/* Various control registers */ 141 142_RF0(cp15_dfsr_get, CP15_DFSR(%0)) 143_RF0(cp15_ifsr_get, CP15_IFSR(%0)) 144_WF1(cp15_prrr_set, CP15_PRRR(%0)) 145_WF1(cp15_nmrr_set, CP15_NMRR(%0)) 146_RF0(cp15_ttbr_get, CP15_TTBR0(%0)) 147_RF0(cp15_dfar_get, CP15_DFAR(%0)) 148#if __ARM_ARCH >= 7 149_RF0(cp15_ifar_get, CP15_IFAR(%0)) 150_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0)) 151#endif 152#if __ARM_ARCH >= 6 153_RF0(cp15_actlr_get, CP15_ACTLR(%0)) 154_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0)); 155_RF0(cp15_par_get, CP15_PAR); 156_RF0(cp15_sctlr_get, CP15_SCTLR(%0)) 157#endif 158 159/*CPU id registers */ 160_RF0(cp15_midr_get, CP15_MIDR(%0)) 161_RF0(cp15_ctr_get, CP15_CTR(%0)) 162_RF0(cp15_tcmtr_get, CP15_TCMTR(%0)) 163_RF0(cp15_tlbtr_get, CP15_TLBTR(%0)) 164_RF0(cp15_mpidr_get, CP15_MPIDR(%0)) 165_RF0(cp15_revidr_get, CP15_REVIDR(%0)) 166_RF0(cp15_aidr_get, CP15_AIDR(%0)) 167_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0)) 168_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0)) 169_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0)) 170_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0)) 171_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0)) 172_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0)) 173_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0)) 174_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0)) 175_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0)) 176_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0)) 177_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0)) 178_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0)) 179_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0)) 180_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0)) 181_RF0(cp15_cbar_get, CP15_CBAR(%0)) 182 183/* Performance Monitor registers */ 184 185#if __ARM_ARCH == 6 && defined(CPU_ARM1176) 186_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 187_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 188#elif __ARM_ARCH > 6 189_RF0(cp15_pmcr_get, CP15_PMCR(%0)) 190_WF1(cp15_pmcr_set, CP15_PMCR(%0)) 191_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0)) 192_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0)) 193_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0)) 194_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0)) 195_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0)) 196_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0)) 197_RF0(cp15_pmselr_get, CP15_PMSELR(%0)) 198_WF1(cp15_pmselr_set, CP15_PMSELR(%0)) 199_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0)) 200_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0)) 201_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0)) 202_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0)) 203_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0)) 204_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0)) 205_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0)) 206_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0)) 207_RF0(cp15_pminten_get, CP15_PMINTENSET(%0)) 208_WF1(cp15_pminten_set, CP15_PMINTENSET(%0)) 209_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0)) 210#endif 211 212_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0)) 213_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0)) 214_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0)) 215_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0)) 216_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0)) 217_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0)) 218 219/* Generic Timer registers - only use when you know the hardware is available */ 220_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0)) 221_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0)) 222_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0)) 223_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0)) 224_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0)) 225_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0)) 226_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0)) 227_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0)) 228_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0)) 229_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0)) 230_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0)) 231_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0)) 232_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0)) 233_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0)) 234_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0)) 235_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0)) 236_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0)) 237_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0)) 238 239_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0)) 240_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0)) 241_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0)) 242_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0)) 243_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0)) 244_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0)) 245_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0)) 246_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0)) 247_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0)) 248_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0)) 249 250#undef _FX 251#undef _RF0 252#undef _WF0 253#undef _WF1 254 255/* 256 * TLB maintenance operations. 257 */ 258 259/* Local (i.e. not broadcasting ) operations. */ 260 261/* Flush all TLB entries (even global). */ 262static __inline void 263tlb_flush_all_local(void) 264{ 265 266 dsb(); 267 _CP15_TLBIALL(); 268 dsb(); 269} 270 271/* Flush all not global TLB entries. */ 272static __inline void 273tlb_flush_all_ng_local(void) 274{ 275 276 dsb(); 277 _CP15_TLBIASID(CPU_ASID_KERNEL); 278 dsb(); 279} 280 281/* Flush single TLB entry (even global). */ 282static __inline void 283tlb_flush_local(vm_offset_t va) 284{ 285 286 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 287 288 dsb(); 289 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 290 dsb(); 291} 292 293/* Flush range of TLB entries (even global). */ 294static __inline void 295tlb_flush_range_local(vm_offset_t va, vm_size_t size) 296{ 297 vm_offset_t eva = va + size; 298 299 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 300 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 301 size)); 302 303 dsb(); 304 for (; va < eva; va += PAGE_SIZE) 305 _CP15_TLBIMVA(va | CPU_ASID_KERNEL); 306 dsb(); 307} 308 309/* Broadcasting operations. */ 310#if __ARM_ARCH >= 7 && defined SMP 311 312static __inline void 313tlb_flush_all(void) 314{ 315 316 dsb(); 317 _CP15_TLBIALLIS(); 318 dsb(); 319} 320 321static __inline void 322tlb_flush_all_ng(void) 323{ 324 325 dsb(); 326 _CP15_TLBIASIDIS(CPU_ASID_KERNEL); 327 dsb(); 328} 329 330static __inline void 331tlb_flush(vm_offset_t va) 332{ 333 334 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 335 336 dsb(); 337 _CP15_TLBIMVAAIS(va); 338 dsb(); 339} 340 341static __inline void 342tlb_flush_range(vm_offset_t va, vm_size_t size) 343{ 344 vm_offset_t eva = va + size; 345 346 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va)); 347 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__, 348 size)); 349 350 dsb(); 351 for (; va < eva; va += PAGE_SIZE) 352 _CP15_TLBIMVAAIS(va); 353 dsb(); 354} 355#else /* SMP */ 356 357#define tlb_flush_all() tlb_flush_all_local() 358#define tlb_flush_all_ng() tlb_flush_all_ng_local() 359#define tlb_flush(va) tlb_flush_local(va) 360#define tlb_flush_range(va, size) tlb_flush_range_local(va, size) 361 362#endif /* SMP */ 363 364/* 365 * Cache maintenance operations. 366 */ 367 368/* Sync I and D caches to PoU */ 369static __inline void 370icache_sync(vm_offset_t va, vm_size_t size) 371{ 372 vm_offset_t eva = va + size; 373 374 dsb(); 375 va &= ~cpuinfo.dcache_line_mask; 376 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 377#if __ARM_ARCH >= 7 && defined SMP 378 _CP15_DCCMVAU(va); 379#else 380 _CP15_DCCMVAC(va); 381#endif 382 } 383 dsb(); 384#if __ARM_ARCH >= 7 && defined SMP 385 _CP15_ICIALLUIS(); 386#else 387 _CP15_ICIALLU(); 388#endif 389 dsb(); 390 isb(); 391} 392 393/* Invalidate I cache */ 394static __inline void 395icache_inv_all(void) 396{ 397#if __ARM_ARCH >= 7 && defined SMP 398 _CP15_ICIALLUIS(); 399#else 400 _CP15_ICIALLU(); 401#endif 402 dsb(); 403 isb(); 404} 405 406/* Invalidate branch predictor buffer */ 407static __inline void 408bpb_inv_all(void) 409{ 410#if __ARM_ARCH >= 7 && defined SMP 411 _CP15_BPIALLIS(); 412#else 413 _CP15_BPIALL(); 414#endif 415 dsb(); 416 isb(); 417} 418 419/* Write back D-cache to PoU */ 420static __inline void 421dcache_wb_pou(vm_offset_t va, vm_size_t size) 422{ 423 vm_offset_t eva = va + size; 424 425 dsb(); 426 va &= ~cpuinfo.dcache_line_mask; 427 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 428#if __ARM_ARCH >= 7 && defined SMP 429 _CP15_DCCMVAU(va); 430#else 431 _CP15_DCCMVAC(va); 432#endif 433 } 434 dsb(); 435} 436 437/* 438 * Invalidate D-cache to PoC 439 * 440 * Caches are invalidated from outermost to innermost as fresh cachelines 441 * flow in this direction. In given range, if there was no dirty cacheline 442 * in any cache before, no stale cacheline should remain in them after this 443 * operation finishes. 444 */ 445static __inline void 446dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 447{ 448 vm_offset_t eva = va + size; 449 450 dsb(); 451 /* invalidate L2 first */ 452 cpu_l2cache_inv_range(pa, size); 453 454 /* then L1 */ 455 va &= ~cpuinfo.dcache_line_mask; 456 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 457 _CP15_DCIMVAC(va); 458 } 459 dsb(); 460} 461 462/* 463 * Write back D-cache to PoC 464 * 465 * Caches are written back from innermost to outermost as dirty cachelines 466 * flow in this direction. In given range, no dirty cacheline should remain 467 * in any cache after this operation finishes. 468 */ 469static __inline void 470dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 471{ 472 vm_offset_t eva = va + size; 473 474 dsb(); 475 va &= ~cpuinfo.dcache_line_mask; 476 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 477 _CP15_DCCMVAC(va); 478 } 479 dsb(); 480 481 cpu_l2cache_wb_range(pa, size); 482} 483 484/* Write back and invalidate D-cache to PoC */ 485static __inline void 486dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) 487{ 488 vm_offset_t va; 489 vm_offset_t eva = sva + size; 490 491 dsb(); 492 /* write back L1 first */ 493 va = sva & ~cpuinfo.dcache_line_mask; 494 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 495 _CP15_DCCMVAC(va); 496 } 497 dsb(); 498 499 /* then write back and invalidate L2 */ 500 cpu_l2cache_wbinv_range(pa, size); 501 502 /* then invalidate L1 */ 503 va = sva & ~cpuinfo.dcache_line_mask; 504 for ( ; va < eva; va += cpuinfo.dcache_line_size) { 505 _CP15_DCIMVAC(va); 506 } 507 dsb(); 508} 509 510/* Set TTB0 register */ 511static __inline void 512cp15_ttbr_set(uint32_t reg) 513{ 514 dsb(); 515 _CP15_TTB_SET(reg); 516 dsb(); 517 _CP15_BPIALL(); 518 dsb(); 519 isb(); 520 tlb_flush_all_ng_local(); 521} 522 523#endif /* _KERNEL */ 524 525#endif /* !MACHINE_CPU_V6_H */ 526