1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 4 * Copyright 2003 PathScale, Inc. 5 * Derived from include/asm-i386/pgtable.h 6 */ 7 8#ifndef __UM_PGTABLE_H 9#define __UM_PGTABLE_H 10 11#include <asm/fixmap.h> 12 13#define _PAGE_PRESENT 0x001 14#define _PAGE_NEWPAGE 0x002 15#define _PAGE_NEWPROT 0x004 16#define _PAGE_RW 0x020 17#define _PAGE_USER 0x040 18#define _PAGE_ACCESSED 0x080 19#define _PAGE_DIRTY 0x100 20/* If _PAGE_PRESENT is clear, we use these: */ 21#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; 22 pte_present gives true */ 23 24/* We borrow bit 10 to store the exclusive marker in swap PTEs. */ 25#define _PAGE_SWP_EXCLUSIVE 0x400 26 27#ifdef CONFIG_3_LEVEL_PGTABLES 28#include <asm/pgtable-3level.h> 29#else 30#include <asm/pgtable-2level.h> 31#endif 32 33extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 34 35/* zero page used for uninitialized stuff */ 36extern unsigned long *empty_zero_page; 37 38/* Just any arbitrary offset to the start of the vmalloc VM area: the 39 * current 8MB value just means that there will be a 8MB "hole" after the 40 * physical memory until the kernel virtual memory starts. That means that 41 * any out-of-bounds memory accesses will hopefully be caught. 42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 43 * area for the same reason. ;) 44 */ 45 46extern unsigned long end_iomem; 47 48#define VMALLOC_OFFSET (__va_space) 49#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 50#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) 51#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 52#define MODULES_VADDR VMALLOC_START 53#define MODULES_END VMALLOC_END 54#define MODULES_LEN (MODULES_VADDR - MODULES_END) 55 56#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 58#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 59#define __PAGE_KERNEL_EXEC \ 60 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 61#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 62#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 63#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 64#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 65#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 66#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 67 68/* 69 * The i386 can't do page protection for execute, and considers that the same 70 * are read. 71 * Also, write permissions imply read permissions. This is the closest we can 72 * get.. 73 */ 74 75/* 76 * ZERO_PAGE is a global shared page that is always zero: used 77 * for zero-mapped memory areas etc.. 78 */ 79#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 80 81#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) 82 83#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) 84#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 85 86#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 87#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) 88 89#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) 90#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) 91 92#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) 93#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) 94 95#define p4d_newpage(x) (p4d_val(x) & _PAGE_NEWPAGE) 96#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE) 97 98#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 99#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 100 101#define pte_page(x) pfn_to_page(pte_pfn(x)) 102 103#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) 104 105/* 106 * ================================= 107 * Flags checking section. 108 * ================================= 109 */ 110 111static inline int pte_none(pte_t pte) 112{ 113 return pte_is_zero(pte); 114} 115 116/* 117 * The following only work if pte_present() is true. 118 * Undefined behaviour if not.. 119 */ 120static inline int pte_read(pte_t pte) 121{ 122 return((pte_get_bits(pte, _PAGE_USER)) && 123 !(pte_get_bits(pte, _PAGE_PROTNONE))); 124} 125 126static inline int pte_exec(pte_t pte){ 127 return((pte_get_bits(pte, _PAGE_USER)) && 128 !(pte_get_bits(pte, _PAGE_PROTNONE))); 129} 130 131static inline int pte_write(pte_t pte) 132{ 133 return((pte_get_bits(pte, _PAGE_RW)) && 134 !(pte_get_bits(pte, _PAGE_PROTNONE))); 135} 136 137static inline int pte_dirty(pte_t pte) 138{ 139 return pte_get_bits(pte, _PAGE_DIRTY); 140} 141 142static inline int pte_young(pte_t pte) 143{ 144 return pte_get_bits(pte, _PAGE_ACCESSED); 145} 146 147static inline int pte_newpage(pte_t pte) 148{ 149 return pte_get_bits(pte, _PAGE_NEWPAGE); 150} 151 152static inline int pte_newprot(pte_t pte) 153{ 154 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); 155} 156 157/* 158 * ================================= 159 * Flags setting section. 160 * ================================= 161 */ 162 163static inline pte_t pte_mknewprot(pte_t pte) 164{ 165 pte_set_bits(pte, _PAGE_NEWPROT); 166 return(pte); 167} 168 169static inline pte_t pte_mkclean(pte_t pte) 170{ 171 pte_clear_bits(pte, _PAGE_DIRTY); 172 return(pte); 173} 174 175static inline pte_t pte_mkold(pte_t pte) 176{ 177 pte_clear_bits(pte, _PAGE_ACCESSED); 178 return(pte); 179} 180 181static inline pte_t pte_wrprotect(pte_t pte) 182{ 183 if (likely(pte_get_bits(pte, _PAGE_RW))) 184 pte_clear_bits(pte, _PAGE_RW); 185 else 186 return pte; 187 return(pte_mknewprot(pte)); 188} 189 190static inline pte_t pte_mkread(pte_t pte) 191{ 192 if (unlikely(pte_get_bits(pte, _PAGE_USER))) 193 return pte; 194 pte_set_bits(pte, _PAGE_USER); 195 return(pte_mknewprot(pte)); 196} 197 198static inline pte_t pte_mkdirty(pte_t pte) 199{ 200 pte_set_bits(pte, _PAGE_DIRTY); 201 return(pte); 202} 203 204static inline pte_t pte_mkyoung(pte_t pte) 205{ 206 pte_set_bits(pte, _PAGE_ACCESSED); 207 return(pte); 208} 209 210static inline pte_t pte_mkwrite_novma(pte_t pte) 211{ 212 if (unlikely(pte_get_bits(pte, _PAGE_RW))) 213 return pte; 214 pte_set_bits(pte, _PAGE_RW); 215 return(pte_mknewprot(pte)); 216} 217 218static inline pte_t pte_mkuptodate(pte_t pte) 219{ 220 pte_clear_bits(pte, _PAGE_NEWPAGE); 221 if(pte_present(pte)) 222 pte_clear_bits(pte, _PAGE_NEWPROT); 223 return(pte); 224} 225 226static inline pte_t pte_mknewpage(pte_t pte) 227{ 228 pte_set_bits(pte, _PAGE_NEWPAGE); 229 return(pte); 230} 231 232static inline void set_pte(pte_t *pteptr, pte_t pteval) 233{ 234 pte_copy(*pteptr, pteval); 235 236 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so 237 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to 238 * mapped pages. 239 */ 240 241 *pteptr = pte_mknewpage(*pteptr); 242 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); 243} 244 245#define PFN_PTE_SHIFT PAGE_SHIFT 246 247#define __HAVE_ARCH_PTE_SAME 248static inline int pte_same(pte_t pte_a, pte_t pte_b) 249{ 250 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); 251} 252 253/* 254 * Conversion functions: convert a page and protection to a page entry, 255 * and a page entry and page directory to the page they refer to. 256 */ 257 258#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 259#define __virt_to_page(virt) phys_to_page(__pa(virt)) 260#define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) 261#define virt_to_page(addr) __virt_to_page((const unsigned long) addr) 262 263#define mk_pte(page, pgprot) \ 264 ({ pte_t pte; \ 265 \ 266 pte_set_val(pte, page_to_phys(page), (pgprot)); \ 267 if (pte_present(pte)) \ 268 pte_mknewprot(pte_mknewpage(pte)); \ 269 pte;}) 270 271static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 272{ 273 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); 274 return pte; 275} 276 277/* 278 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 279 * 280 * this macro returns the index of the entry in the pmd page which would 281 * control the given virtual address 282 */ 283#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 284 285struct mm_struct; 286extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 287 288#define update_mmu_cache(vma,address,ptep) do {} while (0) 289#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0) 290 291/* 292 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 293 * are !pte_none() && !pte_present(). 294 * 295 * Format of swap PTEs: 296 * 297 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 298 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 299 * <--------------- offset ----------------> E < type -> 0 0 0 1 0 300 * 301 * E is the exclusive marker that is not stored in swap entries. 302 * _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte(). 303 */ 304#define __swp_type(x) (((x).val >> 5) & 0x1f) 305#define __swp_offset(x) ((x).val >> 11) 306 307#define __swp_entry(type, offset) \ 308 ((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) }) 309#define __pte_to_swp_entry(pte) \ 310 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) 311#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 312 313static inline int pte_swp_exclusive(pte_t pte) 314{ 315 return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE); 316} 317 318static inline pte_t pte_swp_mkexclusive(pte_t pte) 319{ 320 pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE); 321 return pte; 322} 323 324static inline pte_t pte_swp_clear_exclusive(pte_t pte) 325{ 326 pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE); 327 return pte; 328} 329 330/* Clear a kernel PTE and flush it from the TLB */ 331#define kpte_clear_flush(ptep, vaddr) \ 332do { \ 333 pte_clear(&init_mm, (vaddr), (ptep)); \ 334 __flush_tlb_one((vaddr)); \ 335} while (0) 336 337#endif 338