1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#include <linux/compiler.h> 5#include <asm/barriers.h> 6 7#ifdef CONFIG_ARM64 8 9/* 10 * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions 11 */ 12#define CR_M (1 << 0) /* MMU enable */ 13#define CR_A (1 << 1) /* Alignment abort enable */ 14#define CR_C (1 << 2) /* Dcache enable */ 15#define CR_SA (1 << 3) /* Stack Alignment Check Enable */ 16#define CR_I (1 << 12) /* Icache enable */ 17#define CR_WXN (1 << 19) /* Write Permision Imply XN */ 18#define CR_EE (1 << 25) /* Exception (Big) Endian */ 19 20#define ES_TO_AARCH64 1 21#define ES_TO_AARCH32 0 22 23/* 24 * SCR_EL3 bits definitions 25 */ 26#define SCR_EL3_RW_AARCH64 (1 << 10) /* Next lower level is AArch64 */ 27#define SCR_EL3_RW_AARCH32 (0 << 10) /* Lower lowers level are AArch32 */ 28#define SCR_EL3_HCE_EN (1 << 8) /* Hypervisor Call enable */ 29#define SCR_EL3_SMD_DIS (1 << 7) /* Secure Monitor Call disable */ 30#define SCR_EL3_RES1 (3 << 4) /* Reserved, RES1 */ 31#define SCR_EL3_EA_EN (1 << 3) /* External aborts taken to EL3 */ 32#define SCR_EL3_NS_EN (1 << 0) /* EL0 and EL1 in Non-scure state */ 33 34/* 35 * SPSR_EL3/SPSR_EL2 bits definitions 36 */ 37#define SPSR_EL_END_LE (0 << 9) /* Exception Little-endian */ 38#define SPSR_EL_DEBUG_MASK (1 << 9) /* Debug exception masked */ 39#define SPSR_EL_ASYN_MASK (1 << 8) /* Asynchronous data abort masked */ 40#define SPSR_EL_SERR_MASK (1 << 8) /* System Error exception masked */ 41#define SPSR_EL_IRQ_MASK (1 << 7) /* IRQ exception masked */ 42#define SPSR_EL_FIQ_MASK (1 << 6) /* FIQ exception masked */ 43#define SPSR_EL_T_A32 (0 << 5) /* AArch32 instruction set A32 */ 44#define SPSR_EL_M_AARCH64 (0 << 4) /* Exception taken from AArch64 */ 45#define SPSR_EL_M_AARCH32 (1 << 4) /* Exception taken from AArch32 */ 46#define SPSR_EL_M_SVC (0x3) /* Exception taken from SVC mode */ 47#define SPSR_EL_M_HYP (0xa) /* Exception taken from HYP mode */ 48#define SPSR_EL_M_EL1H (5) /* Exception taken from EL1h mode */ 49#define SPSR_EL_M_EL2H (9) /* Exception taken from EL2h mode */ 50 51/* 52 * CPTR_EL2 bits definitions 53 */ 54#define CPTR_EL2_RES1 (3 << 12 | 0x3ff) /* Reserved, RES1 */ 55 56/* 57 * SCTLR_EL2 bits definitions 58 */ 59#define SCTLR_EL2_RES1 (3 << 28 | 3 << 22 | 1 << 18 | 1 << 16 |\ 60 1 << 11 | 3 << 4) /* Reserved, RES1 */ 61#define SCTLR_EL2_EE_LE (0 << 25) /* Exception Little-endian */ 62#define SCTLR_EL2_WXN_DIS (0 << 19) /* Write permission is not XN */ 63#define SCTLR_EL2_ICACHE_DIS (0 << 12) /* Instruction cache disabled */ 64#define SCTLR_EL2_SA_DIS (0 << 3) /* Stack Alignment Check disabled */ 65#define SCTLR_EL2_DCACHE_DIS (0 << 2) /* Data cache disabled */ 66#define SCTLR_EL2_ALIGN_DIS (0 << 1) /* Alignment check disabled */ 67#define SCTLR_EL2_MMU_DIS (0) /* MMU disabled */ 68 69/* 70 * CNTHCTL_EL2 bits definitions 71 */ 72#define CNTHCTL_EL2_EL1PCEN_EN (1 << 1) /* Physical timer regs accessible */ 73#define CNTHCTL_EL2_EL1PCTEN_EN (1 << 0) /* Physical counter accessible */ 74 75/* 76 * HCR_EL2 bits definitions 77 */ 78#define HCR_EL2_API (1 << 41) /* Trap pointer authentication 79 instructions */ 80#define HCR_EL2_APK (1 << 40) /* Trap pointer authentication 81 key access */ 82#define HCR_EL2_RW_AARCH64 (1 << 31) /* EL1 is AArch64 */ 83#define HCR_EL2_RW_AARCH32 (0 << 31) /* Lower levels are AArch32 */ 84#define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */ 85#define HCR_EL2_AMO_EL2 (1 << 5) /* Route SErrors to EL2 */ 86 87#define ID_AA64ISAR0_EL1_RNDR (0xFUL << 60) /* RNDR random registers */ 88/* 89 * ID_AA64ISAR1_EL1 bits definitions 90 */ 91#define ID_AA64ISAR1_EL1_GPI (0xF << 28) /* Implementation-defined generic 92 code auth algorithm */ 93#define ID_AA64ISAR1_EL1_GPA (0xF << 24) /* QARMA generic code auth 94 algorithm */ 95#define ID_AA64ISAR1_EL1_API (0xF << 8) /* Implementation-defined address 96 auth algorithm */ 97#define ID_AA64ISAR1_EL1_APA (0xF << 4) /* QARMA address auth algorithm */ 98 99/* 100 * ID_AA64PFR0_EL1 bits definitions 101 */ 102#define ID_AA64PFR0_EL1_EL3 (0xF << 12) /* EL3 implemented */ 103#define ID_AA64PFR0_EL1_EL2 (0xF << 8) /* EL2 implemented */ 104 105/* 106 * CPACR_EL1 bits definitions 107 */ 108#define CPACR_EL1_FPEN_EN (3 << 20) /* SIMD and FP instruction enabled */ 109 110/* 111 * SCTLR_EL1 bits definitions 112 */ 113#define SCTLR_EL1_RES1 (3 << 28 | 3 << 22 | 1 << 20 |\ 114 1 << 11) /* Reserved, RES1 */ 115#define SCTLR_EL1_UCI_DIS (0 << 26) /* Cache instruction disabled */ 116#define SCTLR_EL1_EE_LE (0 << 25) /* Exception Little-endian */ 117#define SCTLR_EL1_WXN_DIS (0 << 19) /* Write permission is not XN */ 118#define SCTLR_EL1_NTWE_DIS (0 << 18) /* WFE instruction disabled */ 119#define SCTLR_EL1_NTWI_DIS (0 << 16) /* WFI instruction disabled */ 120#define SCTLR_EL1_UCT_DIS (0 << 15) /* CTR_EL0 access disabled */ 121#define SCTLR_EL1_DZE_DIS (0 << 14) /* DC ZVA instruction disabled */ 122#define SCTLR_EL1_ICACHE_DIS (0 << 12) /* Instruction cache disabled */ 123#define SCTLR_EL1_UMA_DIS (0 << 9) /* User Mask Access disabled */ 124#define SCTLR_EL1_SED_EN (0 << 8) /* SETEND instruction enabled */ 125#define SCTLR_EL1_ITD_EN (0 << 7) /* IT instruction enabled */ 126#define SCTLR_EL1_CP15BEN_DIS (0 << 5) /* CP15 barrier operation disabled */ 127#define SCTLR_EL1_SA0_DIS (0 << 4) /* Stack Alignment EL0 disabled */ 128#define SCTLR_EL1_SA_DIS (0 << 3) /* Stack Alignment EL1 disabled */ 129#define SCTLR_EL1_DCACHE_DIS (0 << 2) /* Data cache disabled */ 130#define SCTLR_EL1_ALIGN_DIS (0 << 1) /* Alignment check disabled */ 131#define SCTLR_EL1_MMU_DIS (0) /* MMU disabled */ 132 133#ifndef __ASSEMBLY__ 134 135struct pt_regs; 136 137u64 get_page_table_size(void); 138#define PGTABLE_SIZE get_page_table_size() 139 140/* 2MB granularity */ 141#define MMU_SECTION_SHIFT 21 142#define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT) 143 144/* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */ 145enum dcache_option { 146 DCACHE_OFF = 0 << 2, 147 DCACHE_WRITETHROUGH = 3 << 2, 148 DCACHE_WRITEBACK = 4 << 2, 149 DCACHE_WRITEALLOC = 4 << 2, 150}; 151 152#define wfi() \ 153 ({asm volatile( \ 154 "wfi" : : : "memory"); \ 155 }) 156 157static inline unsigned int current_el(void) 158{ 159 unsigned long el; 160 161 asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc"); 162 return 3 & (el >> 2); 163} 164 165static inline unsigned int get_sctlr(void) 166{ 167 unsigned int el; 168 unsigned long val; 169 170 el = current_el(); 171 if (el == 1) 172 asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc"); 173 else if (el == 2) 174 asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc"); 175 else 176 asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc"); 177 178 return val; 179} 180 181static inline void set_sctlr(unsigned long val) 182{ 183 unsigned int el; 184 185 el = current_el(); 186 if (el == 1) 187 asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc"); 188 else if (el == 2) 189 asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc"); 190 else 191 asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc"); 192 193 asm volatile("isb"); 194} 195 196static inline unsigned long read_mpidr(void) 197{ 198 unsigned long val; 199 200 asm volatile("mrs %0, mpidr_el1" : "=r" (val)); 201 202 return val; 203} 204 205#define BSP_COREID 0 206 207void __asm_flush_dcache_all(void); 208void __asm_invalidate_dcache_all(void); 209void __asm_flush_dcache_range(u64 start, u64 end); 210 211/** 212 * __asm_invalidate_dcache_range() - Invalidate a range of virtual addresses 213 * 214 * This performance an invalidate from @start to @end - 1. Both addresses 215 * should be cache-aligned, otherwise this function will align the start 216 * address and may continue past the end address. 217 * 218 * Data in the address range is evicted from the cache and is not written back 219 * to memory. 220 * 221 * @start: Start address to invalidate 222 * @end: End address to invalidate up to (exclusive) 223 */ 224void __asm_invalidate_dcache_range(u64 start, u64 end); 225void __asm_invalidate_tlb_all(void); 226void __asm_invalidate_icache_all(void); 227int __asm_invalidate_l3_dcache(void); 228int __asm_flush_l3_dcache(void); 229int __asm_invalidate_l3_icache(void); 230void __asm_switch_ttbr(u64 new_ttbr); 231 232/* 233 * armv8_switch_to_el2() - switch from EL3 to EL2 for ARMv8 234 * 235 * @args: For loading 64-bit OS, fdt address. 236 * For loading 32-bit OS, zero. 237 * @mach_nr: For loading 64-bit OS, zero. 238 * For loading 32-bit OS, machine nr 239 * @fdt_addr: For loading 64-bit OS, zero. 240 * For loading 32-bit OS, fdt address. 241 * @arg4: Input argument. 242 * @entry_point: kernel entry point 243 * @es_flag: execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32 244 */ 245void __noreturn armv8_switch_to_el2(u64 args, u64 mach_nr, u64 fdt_addr, 246 u64 arg4, u64 entry_point, u64 es_flag); 247/* 248 * armv8_switch_to_el1() - switch from EL2 to EL1 for ARMv8 249 * 250 * @args: For loading 64-bit OS, fdt address. 251 * For loading 32-bit OS, zero. 252 * @mach_nr: For loading 64-bit OS, zero. 253 * For loading 32-bit OS, machine nr 254 * @fdt_addr: For loading 64-bit OS, zero. 255 * For loading 32-bit OS, fdt address. 256 * @arg4: Input argument. 257 * @entry_point: kernel entry point 258 * @es_flag: execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32 259 */ 260void armv8_switch_to_el1(u64 args, u64 mach_nr, u64 fdt_addr, 261 u64 arg4, u64 entry_point, u64 es_flag); 262void armv8_el2_to_aarch32(u64 args, u64 mach_nr, u64 fdt_addr, 263 u64 arg4, u64 entry_point); 264void gic_init(void); 265void gic_send_sgi(unsigned long sgino); 266void wait_for_wakeup(void); 267void protect_secure_region(void); 268void smp_kick_all_cpus(void); 269 270void flush_l3_cache(void); 271void mmu_change_region_attr(phys_addr_t start, size_t size, u64 attrs); 272 273/* 274 * smc_call() - issue a secure monitor call 275 * 276 * Issue a secure monitor call in accordance with ARM "SMC Calling convention", 277 * DEN0028A 278 * 279 * @args: input and output arguments 280 */ 281void smc_call(struct pt_regs *args); 282 283void __noreturn psci_system_reset(void); 284void __noreturn psci_system_reset2(u32 reset_level, u32 cookie); 285void __noreturn psci_system_off(void); 286 287#ifdef CONFIG_ARMV8_PSCI 288extern char __secure_start[]; 289extern char __secure_end[]; 290extern char __secure_stack_start[]; 291extern char __secure_stack_end[]; 292 293void armv8_setup_psci(void); 294void psci_setup_vectors(void); 295void psci_arch_init(void); 296#endif 297 298#endif /* __ASSEMBLY__ */ 299 300#else /* CONFIG_ARM64 */ 301 302#ifdef __KERNEL__ 303 304#define CPU_ARCH_UNKNOWN 0 305#define CPU_ARCH_ARMv3 1 306#define CPU_ARCH_ARMv4 2 307#define CPU_ARCH_ARMv4T 3 308#define CPU_ARCH_ARMv5 4 309#define CPU_ARCH_ARMv5T 5 310#define CPU_ARCH_ARMv5TE 6 311#define CPU_ARCH_ARMv5TEJ 7 312#define CPU_ARCH_ARMv6 8 313#define CPU_ARCH_ARMv7 9 314 315/* 316 * CR1 bits (CP#15 CR1) 317 */ 318#define CR_M (1 << 0) /* MMU enable */ 319#define CR_A (1 << 1) /* Alignment abort enable */ 320#define CR_C (1 << 2) /* Dcache enable */ 321#define CR_W (1 << 3) /* Write buffer enable */ 322#define CR_P (1 << 4) /* 32-bit exception handler */ 323#define CR_D (1 << 5) /* 32-bit data address range */ 324#define CR_L (1 << 6) /* Implementation defined */ 325#define CR_B (1 << 7) /* Big endian */ 326#define CR_S (1 << 8) /* System MMU protection */ 327#define CR_R (1 << 9) /* ROM MMU protection */ 328#define CR_F (1 << 10) /* Implementation defined */ 329#define CR_Z (1 << 11) /* Implementation defined */ 330#define CR_I (1 << 12) /* Icache enable */ 331#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 332#define CR_RR (1 << 14) /* Round Robin cache replacement */ 333#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 334#define CR_DT (1 << 16) 335#define CR_IT (1 << 18) 336#define CR_ST (1 << 19) 337#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 338#define CR_U (1 << 22) /* Unaligned access operation */ 339#define CR_XP (1 << 23) /* Extended page tables */ 340#define CR_VE (1 << 24) /* Vectored interrupts */ 341#define CR_EE (1 << 25) /* Exception (Big) Endian */ 342#define CR_TRE (1 << 28) /* TEX remap enable */ 343#define CR_AFE (1 << 29) /* Access flag enable */ 344#define CR_TE (1 << 30) /* Thumb exception enable */ 345 346#if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE) 347#define PGTABLE_SIZE (4096 * 5) 348#elif !defined(PGTABLE_SIZE) 349#define PGTABLE_SIZE (4096 * 4) 350#endif 351 352/* 353 * This is used to ensure the compiler did actually allocate the register we 354 * asked it for some inline assembly sequences. Apparently we can't trust 355 * the compiler from one version to another so a bit of paranoia won't hurt. 356 * This string is meant to be concatenated with the inline asm string and 357 * will cause compilation to stop on mismatch. 358 * (for details, see gcc PR 15089) 359 */ 360#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 361 362#ifndef __ASSEMBLY__ 363 364#ifdef CONFIG_ARMV7_LPAE 365void switch_to_hypervisor_ret(void); 366#endif 367 368#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 369 370#ifdef __ARM_ARCH_7A__ 371#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 372#else 373#define wfi() 374#endif 375 376static inline unsigned long get_cpsr(void) 377{ 378 unsigned long cpsr; 379 380 asm volatile("mrs %0, cpsr" : "=r"(cpsr): ); 381 return cpsr; 382} 383 384static inline int is_hyp(void) 385{ 386#ifdef CONFIG_ARMV7_LPAE 387 /* HYP mode requires LPAE ... */ 388 return ((get_cpsr() & 0x1f) == 0x1a); 389#else 390 /* ... so without LPAE support we can optimize all hyp code away */ 391 return 0; 392#endif 393} 394 395static inline unsigned int get_cr(void) 396{ 397 unsigned int val; 398 399 if (is_hyp()) 400 asm volatile("mrc p15, 4, %0, c1, c0, 0 @ get CR" : "=r" (val) 401 : 402 : "cc"); 403 else 404 asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) 405 : 406 : "cc"); 407 return val; 408} 409 410static inline void set_cr(unsigned int val) 411{ 412 if (is_hyp()) 413 asm volatile("mcr p15, 4, %0, c1, c0, 0 @ set CR" : 414 : "r" (val) 415 : "cc"); 416 else 417 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : 418 : "r" (val) 419 : "cc"); 420 isb(); 421} 422 423#ifdef CONFIG_ARMV7_LPAE 424/* Long-Descriptor Translation Table Level 1/2 Bits */ 425#define TTB_SECT_XN_MASK (1ULL << 54) 426#define TTB_SECT_NG_MASK (1 << 11) 427#define TTB_SECT_AF (1 << 10) 428#define TTB_SECT_SH_MASK (3 << 8) 429#define TTB_SECT_NS_MASK (1 << 5) 430#define TTB_SECT_AP (1 << 6) 431/* Note: TTB AP bits are set elsewhere */ 432#define TTB_SECT_MAIR(x) ((x & 0x7) << 2) /* Index into MAIR */ 433#define TTB_SECT (1 << 0) 434#define TTB_PAGETABLE (3 << 0) 435 436/* TTBCR flags */ 437#define TTBCR_EAE (1 << 31) 438#define TTBCR_T0SZ(x) ((x) << 0) 439#define TTBCR_T1SZ(x) ((x) << 16) 440#define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0)) 441#define TTBCR_IRGN0_NC (0 << 8) 442#define TTBCR_IRGN0_WBWA (1 << 8) 443#define TTBCR_IRGN0_WT (2 << 8) 444#define TTBCR_IRGN0_WBNWA (3 << 8) 445#define TTBCR_IRGN0_MASK (3 << 8) 446#define TTBCR_ORGN0_NC (0 << 10) 447#define TTBCR_ORGN0_WBWA (1 << 10) 448#define TTBCR_ORGN0_WT (2 << 10) 449#define TTBCR_ORGN0_WBNWA (3 << 10) 450#define TTBCR_ORGN0_MASK (3 << 10) 451#define TTBCR_SHARED_NON (0 << 12) 452#define TTBCR_SHARED_OUTER (2 << 12) 453#define TTBCR_SHARED_INNER (3 << 12) 454#define TTBCR_EPD0 (0 << 7) 455 456/* 457 * VMSAv8-32 Long-descriptor format memory region attributes 458 * (ARM Architecture Reference Manual section G5.7.4 [DDI0487E.a]) 459 * 460 * MAIR0[ 7: 0] 0x00 Device-nGnRnE (aka Strongly-Ordered) 461 * MAIR0[15: 8] 0xaa Outer/Inner Write-Through, Read-Allocate No Write-Allocate 462 * MAIR0[23:16] 0xee Outer/Inner Write-Back, Read-Allocate No Write-Allocate 463 * MAIR0[31:24] 0xff Outer/Inner Write-Back, Read-Allocate Write-Allocate 464 */ 465#define MEMORY_ATTRIBUTES ((0x00 << (0 * 8)) | (0xaa << (1 * 8)) | \ 466 (0xee << (2 * 8)) | (0xff << (3 * 8))) 467 468/* options available for data cache on each page */ 469enum dcache_option { 470 DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0) | TTB_SECT_XN_MASK, 471 DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1), 472 DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2), 473 DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3), 474}; 475#elif defined(CONFIG_CPU_V7A) 476/* Short-Descriptor Translation Table Level 1 Bits */ 477#define TTB_SECT_NS_MASK (1 << 19) 478#define TTB_SECT_NG_MASK (1 << 17) 479#define TTB_SECT_S_MASK (1 << 16) 480/* Note: TTB AP bits are set elsewhere */ 481#define TTB_SECT_AP (3 << 10) 482#define TTB_SECT_TEX(x) ((x & 0x7) << 12) 483#define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5) 484#define TTB_SECT_XN_MASK (1 << 4) 485#define TTB_SECT_C_MASK (1 << 3) 486#define TTB_SECT_B_MASK (1 << 2) 487#define TTB_SECT (2 << 0) 488 489/* 490 * Short-descriptor format memory region attributes, without TEX remap 491 * (ARM Architecture Reference Manual section G5.7.2 [DDI0487E.a]) 492 * 493 * TEX[0] C B 494 * 0 0 0 Device-nGnRnE (aka Strongly-Ordered) 495 * 0 1 0 Outer/Inner Write-Through, Read-Allocate No Write-Allocate 496 * 0 1 1 Outer/Inner Write-Back, Read-Allocate No Write-Allocate 497 * 1 1 1 Outer/Inner Write-Back, Read-Allocate Write-Allocate 498 */ 499enum dcache_option { 500 DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT, 501 DCACHE_WRITETHROUGH = TTB_SECT_DOMAIN(0) | TTB_SECT | TTB_SECT_C_MASK, 502 DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK, 503 DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1), 504}; 505#else 506#define TTB_SECT_AP (3 << 10) 507/* options available for data cache on each page */ 508enum dcache_option { 509 DCACHE_OFF = 0x12, 510 DCACHE_WRITETHROUGH = 0x1a, 511 DCACHE_WRITEBACK = 0x1e, 512 DCACHE_WRITEALLOC = 0x16, 513}; 514#endif 515 516/* Size of an MMU section */ 517enum { 518#ifdef CONFIG_ARMV7_LPAE 519 MMU_SECTION_SHIFT = 21, /* 2MB */ 520#else 521 MMU_SECTION_SHIFT = 20, /* 1MB */ 522#endif 523 MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, 524}; 525 526#ifdef CONFIG_CPU_V7A 527/* TTBR0 bits */ 528#define TTBR0_BASE_ADDR_MASK 0xFFFFC000 529#define TTBR0_RGN_NC (0 << 3) 530#define TTBR0_RGN_WBWA (1 << 3) 531#define TTBR0_RGN_WT (2 << 3) 532#define TTBR0_RGN_WB (3 << 3) 533/* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */ 534#define TTBR0_IRGN_NC (0 << 0 | 0 << 6) 535#define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6) 536#define TTBR0_IRGN_WT (1 << 0 | 0 << 6) 537#define TTBR0_IRGN_WB (1 << 0 | 1 << 6) 538#endif 539 540/** 541 * mmu_page_table_flush() - register an update to page tables 542 * 543 * Register an update to the page tables, and flush the TLB 544 * 545 * @start: start address of update in page table 546 * @stop: stop address of update in page table 547 */ 548void mmu_page_table_flush(unsigned long start, unsigned long stop); 549 550#ifdef CONFIG_ARMV7_PSCI 551void psci_arch_cpu_entry(void); 552void psci_arch_init(void); 553u32 psci_version(void); 554s32 psci_features(u32 function_id, u32 psci_fid); 555s32 psci_cpu_off(void); 556s32 psci_cpu_on(u32 function_id, u32 target_cpu, u32 pc, 557 u32 context_id); 558s32 psci_affinity_info(u32 function_id, u32 target_affinity, 559 u32 lowest_affinity_level); 560u32 psci_migrate_info_type(void); 561void psci_system_off(void); 562void psci_system_reset(void); 563#endif 564 565#endif /* __ASSEMBLY__ */ 566 567#define arch_align_stack(x) (x) 568 569#endif /* __KERNEL__ */ 570 571#endif /* CONFIG_ARM64 */ 572 573#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) 574#define DCACHE_DEFAULT_OPTION DCACHE_WRITETHROUGH 575#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) 576#define DCACHE_DEFAULT_OPTION DCACHE_WRITEALLOC 577#elif defined(CONFIG_SYS_ARM_CACHE_WRITEBACK) 578#define DCACHE_DEFAULT_OPTION DCACHE_WRITEBACK 579#endif 580 581#ifndef __ASSEMBLY__ 582/** 583 * save_boot_params() - Save boot parameters before starting reset sequence 584 * 585 * If you provide this function it will be called immediately U-Boot starts, 586 * both for SPL and U-Boot proper. 587 * 588 * All registers are unchanged from U-Boot entry. No registers need be 589 * preserved. 590 * 591 * This is not a normal C function. There is no stack. Return by branching to 592 * save_boot_params_ret. 593 * 594 * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3); 595 */ 596 597/** 598 * save_boot_params_ret() - Return from save_boot_params() 599 * 600 * If you provide save_boot_params(), then you should jump back to this 601 * function when done. Try to preserve all registers. 602 * 603 * If your implementation of save_boot_params() is in C then it is acceptable 604 * to simply call save_boot_params_ret() at the end of your function. Since 605 * there is no link register set up, you cannot just exit the function. U-Boot 606 * will return to the (initialised) value of lr, and likely crash/hang. 607 * 608 * If your implementation of save_boot_params() is in assembler then you 609 * should use 'b' or 'bx' to return to save_boot_params_ret. 610 */ 611void save_boot_params_ret(void); 612 613/** 614 * mmu_set_region_dcache_behaviour_phys() - set virt/phys mapping 615 * 616 * Change the virt/phys mapping and cache settings for a region. 617 * 618 * @virt: virtual start address of memory region to change 619 * @phys: physical address for the memory region to set 620 * @size: size of memory region to change 621 * @option: dcache option to select 622 */ 623void mmu_set_region_dcache_behaviour_phys(phys_addr_t virt, phys_addr_t phys, 624 size_t size, enum dcache_option option); 625 626/** 627 * mmu_set_region_dcache_behaviour() - set cache settings 628 * 629 * Change the cache settings for a region. 630 * 631 * @start: start address of memory region to change 632 * @size: size of memory region to change 633 * @option: dcache option to select 634 */ 635void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, 636 enum dcache_option option); 637 638#ifdef CONFIG_SYS_NONCACHED_MEMORY 639/** 640 * noncached_init() - Initialize non-cached memory region 641 * 642 * Initialize non-cached memory area. This memory region will be typically 643 * located right below the malloc() area and mapped uncached in the MMU. 644 * 645 * It is called during the generic post-relocation init sequence. 646 * 647 * Return: 0 if OK 648 */ 649int noncached_init(void); 650 651phys_addr_t noncached_alloc(size_t size, size_t align); 652#endif /* CONFIG_SYS_NONCACHED_MEMORY */ 653 654#endif /* __ASSEMBLY__ */ 655 656#endif 657