1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Based on arch/arm/include/asm/memory.h 4 * 5 * Copyright (C) 2000-2002 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 * 8 * Note: this file should not be included by non-asm/.h files 9 */ 10#ifndef __ASM_MEMORY_H 11#define __ASM_MEMORY_H 12 13#include <linux/const.h> 14#include <linux/sizes.h> 15#include <asm/page-def.h> 16 17/* 18 * Size of the PCI I/O space. This must remain a power of two so that 19 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. 20 */ 21#define PCI_IO_SIZE SZ_16M 22 23/* 24 * VMEMMAP_SIZE - allows the whole linear region to be covered by 25 * a struct page array 26 * 27 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE 28 * needs to cover the memory region from the beginning of the 52-bit 29 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to 30 * keep a constant PAGE_OFFSET and "fallback" to using the higher end 31 * of the VMEMMAP where 52-bit support is not available in hardware. 32 */ 33#define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) 34#define VMEMMAP_SIZE ((VMEMMAP_RANGE >> PAGE_SHIFT) * sizeof(struct page)) 35 36/* 37 * PAGE_OFFSET - the virtual address of the start of the linear map, at the 38 * start of the TTBR1 address space. 39 * PAGE_END - the end of the linear map, where all other kernel mappings begin. 40 * KIMAGE_VADDR - the virtual address of the start of the kernel image. 41 * VA_BITS - the maximum number of bits for virtual addresses. 42 */ 43#define VA_BITS (CONFIG_ARM64_VA_BITS) 44#define _PAGE_OFFSET(va) (-(UL(1) << (va))) 45#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 46#define KIMAGE_VADDR (MODULES_END) 47#define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 48#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) 49#define MODULES_VSIZE (SZ_2G) 50#define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE) 51#define VMEMMAP_END (-UL(SZ_1G)) 52#define PCI_IO_START (VMEMMAP_END + SZ_8M) 53#define PCI_IO_END (PCI_IO_START + PCI_IO_SIZE) 54#define FIXADDR_TOP (-UL(SZ_8M)) 55 56#if VA_BITS > 48 57#ifdef CONFIG_ARM64_16K_PAGES 58#define VA_BITS_MIN (47) 59#else 60#define VA_BITS_MIN (48) 61#endif 62#else 63#define VA_BITS_MIN (VA_BITS) 64#endif 65 66#define _PAGE_END(va) (-(UL(1) << ((va) - 1))) 67 68#define KERNEL_START _text 69#define KERNEL_END _end 70 71/* 72 * Generic and Software Tag-Based KASAN modes require 1/8th and 1/16th of the 73 * kernel virtual address space for storing the shadow memory respectively. 74 * 75 * The mapping between a virtual memory address and its corresponding shadow 76 * memory address is defined based on the formula: 77 * 78 * shadow_addr = (addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET 79 * 80 * where KASAN_SHADOW_SCALE_SHIFT is the order of the number of bits that map 81 * to a single shadow byte and KASAN_SHADOW_OFFSET is a constant that offsets 82 * the mapping. Note that KASAN_SHADOW_OFFSET does not point to the start of 83 * the shadow memory region. 84 * 85 * Based on this mapping, we define two constants: 86 * 87 * KASAN_SHADOW_START: the start of the shadow memory region; 88 * KASAN_SHADOW_END: the end of the shadow memory region. 89 * 90 * KASAN_SHADOW_END is defined first as the shadow address that corresponds to 91 * the upper bound of possible virtual kernel memory addresses UL(1) << 64 92 * according to the mapping formula. 93 * 94 * KASAN_SHADOW_START is defined second based on KASAN_SHADOW_END. The shadow 95 * memory start must map to the lowest possible kernel virtual memory address 96 * and thus it depends on the actual bitness of the address space. 97 * 98 * As KASAN inserts redzones between stack variables, this increases the stack 99 * memory usage significantly. Thus, we double the (minimum) stack size. 100 */ 101#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 102#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 103#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) + KASAN_SHADOW_OFFSET) 104#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (UL(1) << ((va) - KASAN_SHADOW_SCALE_SHIFT))) 105#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) 106#define PAGE_END KASAN_SHADOW_START 107#define KASAN_THREAD_SHIFT 1 108#else 109#define KASAN_THREAD_SHIFT 0 110#define PAGE_END (_PAGE_END(VA_BITS_MIN)) 111#endif /* CONFIG_KASAN */ 112 113#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) 114 115/* 116 * VMAP'd stacks are allocated at page granularity, so we must ensure that such 117 * stacks are a multiple of page size. 118 */ 119#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) 120#define THREAD_SHIFT PAGE_SHIFT 121#else 122#define THREAD_SHIFT MIN_THREAD_SHIFT 123#endif 124 125#if THREAD_SHIFT >= PAGE_SHIFT 126#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 127#endif 128 129#define THREAD_SIZE (UL(1) << THREAD_SHIFT) 130 131/* 132 * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by 133 * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry 134 * assembly. 135 */ 136#ifdef CONFIG_VMAP_STACK 137#define THREAD_ALIGN (2 * THREAD_SIZE) 138#else 139#define THREAD_ALIGN THREAD_SIZE 140#endif 141 142#define IRQ_STACK_SIZE THREAD_SIZE 143 144#define OVERFLOW_STACK_SIZE SZ_4K 145 146/* 147 * With the minimum frame size of [x29, x30], exactly half the combined 148 * sizes of the hyp and overflow stacks is the maximum size needed to 149 * save the unwinded stacktrace; plus an additional entry to delimit the 150 * end. 151 */ 152#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long)) 153 154/* 155 * Alignment of kernel segments (e.g. .text, .data). 156 * 157 * 4 KB granule: 16 level 3 entries, with contiguous bit 158 * 16 KB granule: 4 level 3 entries, without contiguous bit 159 * 64 KB granule: 1 level 3 entry 160 */ 161#define SEGMENT_ALIGN SZ_64K 162 163/* 164 * Memory types available. 165 * 166 * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in 167 * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note 168 * that protection_map[] only contains MT_NORMAL attributes. 169 */ 170#define MT_NORMAL 0 171#define MT_NORMAL_TAGGED 1 172#define MT_NORMAL_NC 2 173#define MT_DEVICE_nGnRnE 3 174#define MT_DEVICE_nGnRE 4 175 176/* 177 * Memory types for Stage-2 translation 178 */ 179#define MT_S2_NORMAL 0xf 180#define MT_S2_NORMAL_NC 0x5 181#define MT_S2_DEVICE_nGnRE 0x1 182 183/* 184 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001 185 * Stage-2 enforces Normal-WB and Device-nGnRE 186 */ 187#define MT_S2_FWB_NORMAL 6 188#define MT_S2_FWB_NORMAL_NC 5 189#define MT_S2_FWB_DEVICE_nGnRE 1 190 191#ifdef CONFIG_ARM64_4K_PAGES 192#define IOREMAP_MAX_ORDER (PUD_SHIFT) 193#else 194#define IOREMAP_MAX_ORDER (PMD_SHIFT) 195#endif 196 197/* 198 * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated 199 * until link time. 200 */ 201#define RESERVED_SWAPPER_OFFSET (PAGE_SIZE) 202 203/* 204 * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated 205 * until link time. 206 */ 207#define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) 208 209#ifndef __ASSEMBLY__ 210 211#include <linux/bitops.h> 212#include <linux/compiler.h> 213#include <linux/mmdebug.h> 214#include <linux/types.h> 215#include <asm/boot.h> 216#include <asm/bug.h> 217#include <asm/sections.h> 218#include <asm/sysreg.h> 219 220static inline u64 __pure read_tcr(void) 221{ 222 u64 tcr; 223 224 // read_sysreg() uses asm volatile, so avoid it here 225 asm("mrs %0, tcr_el1" : "=r"(tcr)); 226 return tcr; 227} 228 229#if VA_BITS > 48 230// For reasons of #include hell, we can't use TCR_T1SZ_OFFSET/TCR_T1SZ_MASK here 231#define vabits_actual (64 - ((read_tcr() >> 16) & 63)) 232#else 233#define vabits_actual ((u64)VA_BITS) 234#endif 235 236extern s64 memstart_addr; 237/* PHYS_OFFSET - the physical address of the start of memory. */ 238#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) 239 240/* the offset between the kernel virtual and physical mappings */ 241extern u64 kimage_voffset; 242 243static inline unsigned long kaslr_offset(void) 244{ 245 return (u64)&_text - KIMAGE_VADDR; 246} 247 248#ifdef CONFIG_RANDOMIZE_BASE 249void kaslr_init(void); 250static inline bool kaslr_enabled(void) 251{ 252 extern bool __kaslr_is_enabled; 253 return __kaslr_is_enabled; 254} 255#else 256static inline void kaslr_init(void) { } 257static inline bool kaslr_enabled(void) { return false; } 258#endif 259 260/* 261 * Allow all memory at the discovery stage. We will clip it later. 262 */ 263#define MIN_MEMBLOCK_ADDR 0 264#define MAX_MEMBLOCK_ADDR U64_MAX 265 266/* 267 * PFNs are used to describe any physical page; this means 268 * PFN 0 == physical address 0. 269 * 270 * This is the PFN of the first RAM page in the kernel 271 * direct-mapped view. We assume this is the first page 272 * of RAM in the mem_map as well. 273 */ 274#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 275 276/* 277 * When dealing with data aborts, watchpoints, or instruction traps we may end 278 * up with a tagged userland pointer. Clear the tag to get a sane pointer to 279 * pass on to access_ok(), for instance. 280 */ 281#define __untagged_addr(addr) \ 282 ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) 283 284#define untagged_addr(addr) ({ \ 285 u64 __addr = (__force u64)(addr); \ 286 __addr &= __untagged_addr(__addr); \ 287 (__force __typeof__(addr))__addr; \ 288}) 289 290#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 291#define __tag_shifted(tag) ((u64)(tag) << 56) 292#define __tag_reset(addr) __untagged_addr(addr) 293#define __tag_get(addr) (__u8)((u64)(addr) >> 56) 294#else 295#define __tag_shifted(tag) 0UL 296#define __tag_reset(addr) (addr) 297#define __tag_get(addr) 0 298#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 299 300static inline const void *__tag_set(const void *addr, u8 tag) 301{ 302 u64 __addr = (u64)addr & ~__tag_shifted(0xff); 303 return (const void *)(__addr | __tag_shifted(tag)); 304} 305 306#ifdef CONFIG_KASAN_HW_TAGS 307#define arch_enable_tag_checks_sync() mte_enable_kernel_sync() 308#define arch_enable_tag_checks_async() mte_enable_kernel_async() 309#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm() 310#define arch_suppress_tag_checks_start() mte_enable_tco() 311#define arch_suppress_tag_checks_stop() mte_disable_tco() 312#define arch_force_async_tag_fault() mte_check_tfsr_exit() 313#define arch_get_random_tag() mte_get_random_tag() 314#define arch_get_mem_tag(addr) mte_get_mem_tag(addr) 315#define arch_set_mem_tag_range(addr, size, tag, init) \ 316 mte_set_mem_tag_range((addr), (size), (tag), (init)) 317#endif /* CONFIG_KASAN_HW_TAGS */ 318 319/* 320 * Physical vs virtual RAM address space conversion. These are 321 * private definitions which should NOT be used outside memory.h 322 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 323 */ 324 325 326/* 327 * Check whether an arbitrary address is within the linear map, which 328 * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the 329 * kernel's TTBR1 address range. 330 */ 331#define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) 332 333#define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET) 334#define __kimg_to_phys(addr) ((addr) - kimage_voffset) 335 336#define __virt_to_phys_nodebug(x) ({ \ 337 phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ 338 __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ 339}) 340 341#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) 342 343#ifdef CONFIG_DEBUG_VIRTUAL 344extern phys_addr_t __virt_to_phys(unsigned long x); 345extern phys_addr_t __phys_addr_symbol(unsigned long x); 346#else 347#define __virt_to_phys(x) __virt_to_phys_nodebug(x) 348#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 349#endif /* CONFIG_DEBUG_VIRTUAL */ 350 351#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) 352#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) 353 354/* 355 * Convert a page to/from a physical address 356 */ 357#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 358#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 359 360/* 361 * Note: Drivers should NOT use these. They are the wrong 362 * translation for translating DMA addresses. Use the driver 363 * DMA support - see dma-mapping.h. 364 */ 365#define virt_to_phys virt_to_phys 366static inline phys_addr_t virt_to_phys(const volatile void *x) 367{ 368 return __virt_to_phys((unsigned long)(x)); 369} 370 371#define phys_to_virt phys_to_virt 372static inline void *phys_to_virt(phys_addr_t x) 373{ 374 return (void *)(__phys_to_virt(x)); 375} 376 377/* Needed already here for resolving __phys_to_pfn() in virt_to_pfn() */ 378#include <asm-generic/memory_model.h> 379 380static inline unsigned long virt_to_pfn(const void *kaddr) 381{ 382 return __phys_to_pfn(virt_to_phys(kaddr)); 383} 384 385/* 386 * Drivers should NOT use these either. 387 */ 388#define __pa(x) __virt_to_phys((unsigned long)(x)) 389#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 390#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) 391#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 392#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 393#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 394 395/* 396 * virt_to_page(x) convert a _valid_ virtual address to struct page * 397 * virt_addr_valid(x) indicates whether a virtual address is valid 398 */ 399#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) 400 401#if defined(CONFIG_DEBUG_VIRTUAL) 402#define page_to_virt(x) ({ \ 403 __typeof__(x) __page = x; \ 404 void *__addr = __va(page_to_phys(__page)); \ 405 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ 406}) 407#define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) 408#else 409#define page_to_virt(x) ({ \ 410 __typeof__(x) __page = x; \ 411 u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ 412 u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ 413 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ 414}) 415 416#define virt_to_page(x) ({ \ 417 u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ 418 u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ 419 (struct page *)__addr; \ 420}) 421#endif /* CONFIG_DEBUG_VIRTUAL */ 422 423#define virt_addr_valid(addr) ({ \ 424 __typeof__(addr) __addr = __tag_reset(addr); \ 425 __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr)); \ 426}) 427 428void dump_mem_limit(void); 429#endif /* !ASSEMBLY */ 430 431/* 432 * Given that the GIC architecture permits ITS implementations that can only be 433 * configured with a LPI table address once, GICv3 systems with many CPUs may 434 * end up reserving a lot of different regions after a kexec for their LPI 435 * tables (one per CPU), as we are forced to reuse the same memory after kexec 436 * (and thus reserve it persistently with EFI beforehand) 437 */ 438#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) 439# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) 440#endif 441 442/* 443 * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory 444 * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into 445 * multiple parts. As a result, the number of memory regions is large. 446 */ 447#ifdef CONFIG_EFI 448#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8) 449#endif 450 451 452#endif /* __ASM_MEMORY_H */ 453