1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * arch/arm/include/asm/pgtable.h 4 * 5 * Copyright (C) 1995-2002 Russell King 6 */ 7#ifndef _ASMARM_PGTABLE_H 8#define _ASMARM_PGTABLE_H 9 10#include <linux/const.h> 11#include <asm/proc-fns.h> 12 13#ifndef __ASSEMBLY__ 14/* 15 * ZERO_PAGE is a global shared page that is always zero: used 16 * for zero-mapped memory areas etc.. 17 */ 18extern struct page *empty_zero_page; 19#define ZERO_PAGE(vaddr) (empty_zero_page) 20#endif 21 22#ifndef CONFIG_MMU 23 24#include <asm-generic/pgtable-nopud.h> 25#include <asm/pgtable-nommu.h> 26 27#else 28 29#include <asm-generic/pgtable-nopud.h> 30#include <asm/page.h> 31#include <asm/pgtable-hwdef.h> 32 33 34#include <asm/tlbflush.h> 35 36#ifdef CONFIG_ARM_LPAE 37#include <asm/pgtable-3level.h> 38#else 39#include <asm/pgtable-2level.h> 40#endif 41 42/* 43 * Just any arbitrary offset to the start of the vmalloc VM area: the 44 * current 8MB value just means that there will be a 8MB "hole" after the 45 * physical memory until the kernel virtual memory starts. That means that 46 * any out-of-bounds memory accesses will hopefully be caught. 47 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 48 * area for the same reason. ;) 49 */ 50#define VMALLOC_OFFSET (8*1024*1024) 51#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 52#define VMALLOC_END 0xff800000UL 53 54#define LIBRARY_TEXT_START 0x0c000000 55 56#ifndef __ASSEMBLY__ 57extern void __pte_error(const char *file, int line, pte_t); 58extern void __pmd_error(const char *file, int line, pmd_t); 59extern void __pgd_error(const char *file, int line, pgd_t); 60 61#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) 62#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) 63#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) 64 65/* 66 * This is the lowest virtual address we can permit any user space 67 * mapping to be mapped at. This is particularly important for 68 * non-high vector CPUs. 69 */ 70#define FIRST_USER_ADDRESS (PAGE_SIZE * 2) 71 72/* 73 * Use TASK_SIZE as the ceiling argument for free_pgtables() and 74 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd 75 * page shared between user and kernel). 76 */ 77#ifdef CONFIG_ARM_LPAE 78#define USER_PGTABLES_CEILING TASK_SIZE 79#endif 80 81/* 82 * The pgprot_* and protection_map entries will be fixed up in runtime 83 * to include the cachable and bufferable bits based on memory policy, 84 * as well as any architecture dependent bits like global/ASID and SMP 85 * shared mapping bits. 86 */ 87#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG 88 89extern pgprot_t pgprot_user; 90extern pgprot_t pgprot_kernel; 91 92#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 93 94#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) 95#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) 96#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) 97#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 98#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) 99#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 100#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) 101#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 102#define PAGE_KERNEL_EXEC pgprot_kernel 103 104#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) 105#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 106#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 107#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 108#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) 109#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 110#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) 111 112#define __pgprot_modify(prot,mask,bits) \ 113 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 114 115#define pgprot_noncached(prot) \ 116 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) 117 118#define pgprot_writecombine(prot) \ 119 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) 120 121#define pgprot_stronglyordered(prot) \ 122 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) 123 124#define pgprot_device(prot) \ 125 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN) 126 127#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 128#define pgprot_dmacoherent(prot) \ 129 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) 130#define __HAVE_PHYS_MEM_ACCESS_PROT 131struct file; 132extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 133 unsigned long size, pgprot_t vma_prot); 134#else 135#define pgprot_dmacoherent(prot) \ 136 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) 137#endif 138 139#endif /* __ASSEMBLY__ */ 140 141/* 142 * The table below defines the page protection levels that we insert into our 143 * Linux page table version. These get translated into the best that the 144 * architecture can perform. Note that on most ARM hardware: 145 * 1) We cannot do execute protection 146 * 2) If we could do execute protection, then read is implied 147 * 3) write implies read permissions 148 */ 149 150#ifndef __ASSEMBLY__ 151 152extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 153 154#define pgdp_get(pgpd) READ_ONCE(*pgdp) 155 156#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) 157#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) 158 159#define pmd_none(pmd) (!pmd_val(pmd)) 160 161static inline pte_t *pmd_page_vaddr(pmd_t pmd) 162{ 163 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); 164} 165 166#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 167 168#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) 169#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) 170 171#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 172#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) 173 174#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 175 176#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ 177 : !!(pte_val(pte) & (val))) 178#define pte_isclear(pte, val) (!(pte_val(pte) & (val))) 179 180#define pte_none(pte) (!pte_val(pte)) 181#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) 182#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) 183#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 184#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY)) 185#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) 186#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) 187#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) 188 189#define pte_valid_user(pte) \ 190 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) 191 192static inline bool pte_access_permitted(pte_t pte, bool write) 193{ 194 pteval_t mask = L_PTE_PRESENT | L_PTE_USER; 195 pteval_t needed = mask; 196 197 if (write) 198 mask |= L_PTE_RDONLY; 199 200 return (pte_val(pte) & mask) == needed; 201} 202#define pte_access_permitted pte_access_permitted 203 204#if __LINUX_ARM_ARCH__ < 6 205static inline void __sync_icache_dcache(pte_t pteval) 206{ 207} 208#else 209extern void __sync_icache_dcache(pte_t pteval); 210#endif 211 212#define PFN_PTE_SHIFT PAGE_SHIFT 213 214void set_ptes(struct mm_struct *mm, unsigned long addr, 215 pte_t *ptep, pte_t pteval, unsigned int nr); 216#define set_ptes set_ptes 217 218static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 219{ 220 pte_val(pte) &= ~pgprot_val(prot); 221 return pte; 222} 223 224static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 225{ 226 pte_val(pte) |= pgprot_val(prot); 227 return pte; 228} 229 230static inline pte_t pte_wrprotect(pte_t pte) 231{ 232 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); 233} 234 235static inline pte_t pte_mkwrite_novma(pte_t pte) 236{ 237 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); 238} 239 240static inline pte_t pte_mkclean(pte_t pte) 241{ 242 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); 243} 244 245static inline pte_t pte_mkdirty(pte_t pte) 246{ 247 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); 248} 249 250static inline pte_t pte_mkold(pte_t pte) 251{ 252 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); 253} 254 255static inline pte_t pte_mkyoung(pte_t pte) 256{ 257 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); 258} 259 260static inline pte_t pte_mkexec(pte_t pte) 261{ 262 return clear_pte_bit(pte, __pgprot(L_PTE_XN)); 263} 264 265static inline pte_t pte_mknexec(pte_t pte) 266{ 267 return set_pte_bit(pte, __pgprot(L_PTE_XN)); 268} 269 270static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 271{ 272 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | 273 L_PTE_NONE | L_PTE_VALID; 274 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 275 return pte; 276} 277 278/* 279 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 280 * are !pte_none() && !pte_present(). 281 * 282 * Format of swap PTEs: 283 * 284 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 285 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 286 * <------------------- offset ------------------> E < type -> 0 0 287 * 288 * E is the exclusive marker that is not stored in swap entries. 289 * 290 * This gives us up to 31 swap files and 64GB per swap file. Note that 291 * the offset field is always non-zero. 292 */ 293#define __SWP_TYPE_SHIFT 2 294#define __SWP_TYPE_BITS 5 295#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 296#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1) 297 298#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 299#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 300#define __swp_entry(type, offset) ((swp_entry_t) { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \ 301 ((offset) << __SWP_OFFSET_SHIFT) }) 302 303#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 304#define __swp_entry_to_pte(swp) __pte((swp).val) 305 306static inline int pte_swp_exclusive(pte_t pte) 307{ 308 return pte_isset(pte, L_PTE_SWP_EXCLUSIVE); 309} 310 311static inline pte_t pte_swp_mkexclusive(pte_t pte) 312{ 313 return set_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE)); 314} 315 316static inline pte_t pte_swp_clear_exclusive(pte_t pte) 317{ 318 return clear_pte_bit(pte, __pgprot(L_PTE_SWP_EXCLUSIVE)); 319} 320 321/* 322 * It is an error for the kernel to have more swap files than we can 323 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES 324 * is increased beyond what we presently support. 325 */ 326#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 327 328/* 329 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 330 */ 331#define HAVE_ARCH_UNMAPPED_AREA 332#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 333 334#endif /* !__ASSEMBLY__ */ 335 336#endif /* CONFIG_MMU */ 337 338#endif /* _ASMARM_PGTABLE_H */ 339