1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5#ifndef __ASM_PGTABLE_H 6#define __ASM_PGTABLE_H 7 8#include <asm/bug.h> 9#include <asm/proc-fns.h> 10 11#include <asm/memory.h> 12#include <asm/mte.h> 13#include <asm/pgtable-hwdef.h> 14#include <asm/pgtable-prot.h> 15#include <asm/tlbflush.h> 16 17/* 18 * VMALLOC range. 19 * 20 * VMALLOC_START: beginning of the kernel vmalloc space 21 * VMALLOC_END: extends to the available space below vmemmap 22 */ 23#define VMALLOC_START (MODULES_END) 24#if VA_BITS == VA_BITS_MIN 25#define VMALLOC_END (VMEMMAP_START - SZ_8M) 26#else 27#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29#endif 30 31#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 32 33#ifndef __ASSEMBLY__ 34 35#include <asm/cmpxchg.h> 36#include <asm/fixmap.h> 37#include <linux/mmdebug.h> 38#include <linux/mm_types.h> 39#include <linux/sched.h> 40#include <linux/page_table_check.h> 41 42#ifdef CONFIG_TRANSPARENT_HUGEPAGE 43#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 44 45/* Set stride and tlb_level in flush_*_tlb_range */ 46#define flush_pmd_tlb_range(vma, addr, end) \ 47 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 48#define flush_pud_tlb_range(vma, addr, end) \ 49 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 50#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 51 52static inline bool arch_thp_swp_supported(void) 53{ 54 return !system_supports_mte(); 55} 56#define arch_thp_swp_supported arch_thp_swp_supported 57 58/* 59 * Outside of a few very special situations (e.g. hibernation), we always 60 * use broadcast TLB invalidation instructions, therefore a spurious page 61 * fault on one CPU which has been handled concurrently by another CPU 62 * does not need to perform additional invalidation. 63 */ 64#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 65 66/* 67 * ZERO_PAGE is a global shared page that is always zero: used 68 * for zero-mapped memory areas etc.. 69 */ 70extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 71#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 72 73#define pte_ERROR(e) \ 74 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 75 76/* 77 * Macros to convert between a physical address and its placement in a 78 * page table entry, taking care of 52-bit addresses. 79 */ 80#ifdef CONFIG_ARM64_PA_BITS_52 81static inline phys_addr_t __pte_to_phys(pte_t pte) 82{ 83 pte_val(pte) &= ~PTE_MAYBE_SHARED; 84 return (pte_val(pte) & PTE_ADDR_LOW) | 85 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 86} 87static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 88{ 89 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 90} 91#else 92#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW) 93#define __phys_to_pte_val(phys) (phys) 94#endif 95 96#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 97#define pfn_pte(pfn,prot) \ 98 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 99 100#define pte_none(pte) (!pte_val(pte)) 101#define __pte_clear(mm, addr, ptep) \ 102 __set_pte(ptep, __pte(0)) 103#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 104 105/* 106 * The following only work if pte_present(). Undefined behaviour otherwise. 107 */ 108#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 109#define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 110#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 111#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 112#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 113#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 114#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 115#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 116#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 117#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 118 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 119 120#define pte_cont_addr_end(addr, end) \ 121({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 122 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 123}) 124 125#define pmd_cont_addr_end(addr, end) \ 126({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 127 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 128}) 129 130#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 131#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 132#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 133 134#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 135/* 136 * Execute-only user mappings do not have the PTE_USER bit set. All valid 137 * kernel mappings have the PTE_UXN bit set. 138 */ 139#define pte_valid_not_user(pte) \ 140 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 141/* 142 * Returns true if the pte is valid and has the contiguous bit set. 143 */ 144#define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 145/* 146 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 147 * so that we don't erroneously return false for pages that have been 148 * remapped as PROT_NONE but are yet to be flushed from the TLB. 149 * Note that we can't make any assumptions based on the state of the access 150 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 151 * TLB. 152 */ 153#define pte_accessible(mm, pte) \ 154 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 155 156/* 157 * p??_access_permitted() is true for valid user mappings (PTE_USER 158 * bit set, subject to the write permission check). For execute-only 159 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 160 * not set) must return false. PROT_NONE mappings do not have the 161 * PTE_VALID bit set. 162 */ 163#define pte_access_permitted(pte, write) \ 164 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 165#define pmd_access_permitted(pmd, write) \ 166 (pte_access_permitted(pmd_pte(pmd), (write))) 167#define pud_access_permitted(pud, write) \ 168 (pte_access_permitted(pud_pte(pud), (write))) 169 170static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 171{ 172 pte_val(pte) &= ~pgprot_val(prot); 173 return pte; 174} 175 176static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 177{ 178 pte_val(pte) |= pgprot_val(prot); 179 return pte; 180} 181 182static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 183{ 184 pmd_val(pmd) &= ~pgprot_val(prot); 185 return pmd; 186} 187 188static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 189{ 190 pmd_val(pmd) |= pgprot_val(prot); 191 return pmd; 192} 193 194static inline pte_t pte_mkwrite_novma(pte_t pte) 195{ 196 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 197 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 198 return pte; 199} 200 201static inline pte_t pte_mkclean(pte_t pte) 202{ 203 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 204 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 205 206 return pte; 207} 208 209static inline pte_t pte_mkdirty(pte_t pte) 210{ 211 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 212 213 if (pte_write(pte)) 214 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 215 216 return pte; 217} 218 219static inline pte_t pte_wrprotect(pte_t pte) 220{ 221 /* 222 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 223 * clear), set the PTE_DIRTY bit. 224 */ 225 if (pte_hw_dirty(pte)) 226 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 227 228 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 229 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 230 return pte; 231} 232 233static inline pte_t pte_mkold(pte_t pte) 234{ 235 return clear_pte_bit(pte, __pgprot(PTE_AF)); 236} 237 238static inline pte_t pte_mkyoung(pte_t pte) 239{ 240 return set_pte_bit(pte, __pgprot(PTE_AF)); 241} 242 243static inline pte_t pte_mkspecial(pte_t pte) 244{ 245 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 246} 247 248static inline pte_t pte_mkcont(pte_t pte) 249{ 250 pte = set_pte_bit(pte, __pgprot(PTE_CONT)); 251 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); 252} 253 254static inline pte_t pte_mknoncont(pte_t pte) 255{ 256 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 257} 258 259static inline pte_t pte_mkpresent(pte_t pte) 260{ 261 return set_pte_bit(pte, __pgprot(PTE_VALID)); 262} 263 264static inline pmd_t pmd_mkcont(pmd_t pmd) 265{ 266 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 267} 268 269static inline pte_t pte_mkdevmap(pte_t pte) 270{ 271 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 272} 273 274static inline void __set_pte(pte_t *ptep, pte_t pte) 275{ 276 WRITE_ONCE(*ptep, pte); 277 278 /* 279 * Only if the new pte is valid and kernel, otherwise TLB maintenance 280 * or update_mmu_cache() have the necessary barriers. 281 */ 282 if (pte_valid_not_user(pte)) { 283 dsb(ishst); 284 isb(); 285 } 286} 287 288static inline pte_t __ptep_get(pte_t *ptep) 289{ 290 return READ_ONCE(*ptep); 291} 292 293extern void __sync_icache_dcache(pte_t pteval); 294bool pgattr_change_is_safe(u64 old, u64 new); 295 296/* 297 * PTE bits configuration in the presence of hardware Dirty Bit Management 298 * (PTE_WRITE == PTE_DBM): 299 * 300 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 301 * 0 0 | 1 0 0 302 * 0 1 | 1 1 0 303 * 1 0 | 1 0 1 304 * 1 1 | 0 1 x 305 * 306 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 307 * the page fault mechanism. Checking the dirty status of a pte becomes: 308 * 309 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 310 */ 311 312static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 313 pte_t pte) 314{ 315 pte_t old_pte; 316 317 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 318 return; 319 320 old_pte = __ptep_get(ptep); 321 322 if (!pte_valid(old_pte) || !pte_valid(pte)) 323 return; 324 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 325 return; 326 327 /* 328 * Check for potential race with hardware updates of the pte 329 * (__ptep_set_access_flags safely changes valid ptes without going 330 * through an invalid entry). 331 */ 332 VM_WARN_ONCE(!pte_young(pte), 333 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 334 __func__, pte_val(old_pte), pte_val(pte)); 335 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 336 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 337 __func__, pte_val(old_pte), pte_val(pte)); 338 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 339 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 340 __func__, pte_val(old_pte), pte_val(pte)); 341} 342 343static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 344{ 345 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 346 __sync_icache_dcache(pte); 347 348 /* 349 * If the PTE would provide user space access to the tags associated 350 * with it then ensure that the MTE tags are synchronised. Although 351 * pte_access_permitted() returns false for exec only mappings, they 352 * don't expose tags (instruction fetches don't check tags). 353 */ 354 if (system_supports_mte() && pte_access_permitted(pte, false) && 355 !pte_special(pte) && pte_tagged(pte)) 356 mte_sync_tags(pte, nr_pages); 357} 358 359/* 360 * Select all bits except the pfn 361 */ 362static inline pgprot_t pte_pgprot(pte_t pte) 363{ 364 unsigned long pfn = pte_pfn(pte); 365 366 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 367} 368 369#define pte_advance_pfn pte_advance_pfn 370static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 371{ 372 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 373} 374 375static inline void __set_ptes(struct mm_struct *mm, 376 unsigned long __always_unused addr, 377 pte_t *ptep, pte_t pte, unsigned int nr) 378{ 379 page_table_check_ptes_set(mm, ptep, pte, nr); 380 __sync_cache_and_tags(pte, nr); 381 382 for (;;) { 383 __check_safe_pte_update(mm, ptep, pte); 384 __set_pte(ptep, pte); 385 if (--nr == 0) 386 break; 387 ptep++; 388 pte = pte_advance_pfn(pte, 1); 389 } 390} 391 392/* 393 * Huge pte definitions. 394 */ 395#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 396 397/* 398 * Hugetlb definitions. 399 */ 400#define HUGE_MAX_HSTATE 4 401#define HPAGE_SHIFT PMD_SHIFT 402#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 403#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 404#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 405 406static inline pte_t pgd_pte(pgd_t pgd) 407{ 408 return __pte(pgd_val(pgd)); 409} 410 411static inline pte_t p4d_pte(p4d_t p4d) 412{ 413 return __pte(p4d_val(p4d)); 414} 415 416static inline pte_t pud_pte(pud_t pud) 417{ 418 return __pte(pud_val(pud)); 419} 420 421static inline pud_t pte_pud(pte_t pte) 422{ 423 return __pud(pte_val(pte)); 424} 425 426static inline pmd_t pud_pmd(pud_t pud) 427{ 428 return __pmd(pud_val(pud)); 429} 430 431static inline pte_t pmd_pte(pmd_t pmd) 432{ 433 return __pte(pmd_val(pmd)); 434} 435 436static inline pmd_t pte_pmd(pte_t pte) 437{ 438 return __pmd(pte_val(pte)); 439} 440 441static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 442{ 443 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 444} 445 446static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 447{ 448 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 449} 450 451static inline pte_t pte_swp_mkexclusive(pte_t pte) 452{ 453 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 454} 455 456static inline int pte_swp_exclusive(pte_t pte) 457{ 458 return pte_val(pte) & PTE_SWP_EXCLUSIVE; 459} 460 461static inline pte_t pte_swp_clear_exclusive(pte_t pte) 462{ 463 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 464} 465 466#ifdef CONFIG_NUMA_BALANCING 467/* 468 * See the comment in include/linux/pgtable.h 469 */ 470static inline int pte_protnone(pte_t pte) 471{ 472 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; 473} 474 475static inline int pmd_protnone(pmd_t pmd) 476{ 477 return pte_protnone(pmd_pte(pmd)); 478} 479#endif 480 481#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID)) 482 483static inline int pmd_present(pmd_t pmd) 484{ 485 return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd); 486} 487 488/* 489 * THP definitions. 490 */ 491 492#ifdef CONFIG_TRANSPARENT_HUGEPAGE 493static inline int pmd_trans_huge(pmd_t pmd) 494{ 495 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 496} 497#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 498 499#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 500#define pmd_young(pmd) pte_young(pmd_pte(pmd)) 501#define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 502#define pmd_user(pmd) pte_user(pmd_pte(pmd)) 503#define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 504#define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 505#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 506#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 507#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 508#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 509#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 510#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 511 512static inline pmd_t pmd_mkinvalid(pmd_t pmd) 513{ 514 pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID)); 515 pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID)); 516 517 return pmd; 518} 519 520#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 521 522#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 523 524#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 525 526#ifdef CONFIG_TRANSPARENT_HUGEPAGE 527#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 528#endif 529static inline pmd_t pmd_mkdevmap(pmd_t pmd) 530{ 531 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 532} 533 534#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 535#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 536#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 537#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 538#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 539 540#define pud_young(pud) pte_young(pud_pte(pud)) 541#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 542#define pud_write(pud) pte_write(pud_pte(pud)) 543 544#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 545 546#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 547#define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 548#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 549#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 550 551static inline void __set_pte_at(struct mm_struct *mm, 552 unsigned long __always_unused addr, 553 pte_t *ptep, pte_t pte, unsigned int nr) 554{ 555 __sync_cache_and_tags(pte, nr); 556 __check_safe_pte_update(mm, ptep, pte); 557 __set_pte(ptep, pte); 558} 559 560static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 561 pmd_t *pmdp, pmd_t pmd) 562{ 563 page_table_check_pmd_set(mm, pmdp, pmd); 564 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), 565 PMD_SIZE >> PAGE_SHIFT); 566} 567 568static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 569 pud_t *pudp, pud_t pud) 570{ 571 page_table_check_pud_set(mm, pudp, pud); 572 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), 573 PUD_SIZE >> PAGE_SHIFT); 574} 575 576#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 577#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 578 579#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 580#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 581 582#define __pgprot_modify(prot,mask,bits) \ 583 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 584 585#define pgprot_nx(prot) \ 586 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 587 588/* 589 * Mark the prot value as uncacheable and unbufferable. 590 */ 591#define pgprot_noncached(prot) \ 592 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 593#define pgprot_writecombine(prot) \ 594 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 595#define pgprot_device(prot) \ 596 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 597#define pgprot_tagged(prot) \ 598 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 599#define pgprot_mhp pgprot_tagged 600/* 601 * DMA allocations for non-coherent devices use what the Arm architecture calls 602 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 603 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 604 * is intended for MMIO and thus forbids speculation, preserves access size, 605 * requires strict alignment and can also force write responses to come from the 606 * endpoint. 607 */ 608#define pgprot_dmacoherent(prot) \ 609 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 610 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 611 612#define __HAVE_PHYS_MEM_ACCESS_PROT 613struct file; 614extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 615 unsigned long size, pgprot_t vma_prot); 616 617#define pmd_none(pmd) (!pmd_val(pmd)) 618 619#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 620 PMD_TYPE_TABLE) 621#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 622 PMD_TYPE_SECT) 623#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 624#define pmd_bad(pmd) (!pmd_table(pmd)) 625 626#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 627#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 628 629#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 630static inline bool pud_sect(pud_t pud) { return false; } 631static inline bool pud_table(pud_t pud) { return true; } 632#else 633#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 634 PUD_TYPE_SECT) 635#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 636 PUD_TYPE_TABLE) 637#endif 638 639extern pgd_t init_pg_dir[]; 640extern pgd_t init_pg_end[]; 641extern pgd_t swapper_pg_dir[]; 642extern pgd_t idmap_pg_dir[]; 643extern pgd_t tramp_pg_dir[]; 644extern pgd_t reserved_pg_dir[]; 645 646extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 647 648static inline bool in_swapper_pgdir(void *addr) 649{ 650 return ((unsigned long)addr & PAGE_MASK) == 651 ((unsigned long)swapper_pg_dir & PAGE_MASK); 652} 653 654static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 655{ 656#ifdef __PAGETABLE_PMD_FOLDED 657 if (in_swapper_pgdir(pmdp)) { 658 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 659 return; 660 } 661#endif /* __PAGETABLE_PMD_FOLDED */ 662 663 WRITE_ONCE(*pmdp, pmd); 664 665 if (pmd_valid(pmd)) { 666 dsb(ishst); 667 isb(); 668 } 669} 670 671static inline void pmd_clear(pmd_t *pmdp) 672{ 673 set_pmd(pmdp, __pmd(0)); 674} 675 676static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 677{ 678 return __pmd_to_phys(pmd); 679} 680 681static inline unsigned long pmd_page_vaddr(pmd_t pmd) 682{ 683 return (unsigned long)__va(pmd_page_paddr(pmd)); 684} 685 686/* Find an entry in the third-level page table. */ 687#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 688 689#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 690#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 691#define pte_clear_fixmap() clear_fixmap(FIX_PTE) 692 693#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 694 695/* use ONLY for statically allocated translation tables */ 696#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 697 698/* 699 * Conversion functions: convert a page and protection to a page entry, 700 * and a page entry and page directory to the page they refer to. 701 */ 702#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 703 704#if CONFIG_PGTABLE_LEVELS > 2 705 706#define pmd_ERROR(e) \ 707 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 708 709#define pud_none(pud) (!pud_val(pud)) 710#define pud_bad(pud) (!pud_table(pud)) 711#define pud_present(pud) pte_present(pud_pte(pud)) 712#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 713#define pud_valid(pud) pte_valid(pud_pte(pud)) 714#define pud_user(pud) pte_user(pud_pte(pud)) 715#define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 716 717static inline bool pgtable_l4_enabled(void); 718 719static inline void set_pud(pud_t *pudp, pud_t pud) 720{ 721 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 722 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 723 return; 724 } 725 726 WRITE_ONCE(*pudp, pud); 727 728 if (pud_valid(pud)) { 729 dsb(ishst); 730 isb(); 731 } 732} 733 734static inline void pud_clear(pud_t *pudp) 735{ 736 set_pud(pudp, __pud(0)); 737} 738 739static inline phys_addr_t pud_page_paddr(pud_t pud) 740{ 741 return __pud_to_phys(pud); 742} 743 744static inline pmd_t *pud_pgtable(pud_t pud) 745{ 746 return (pmd_t *)__va(pud_page_paddr(pud)); 747} 748 749/* Find an entry in the second-level page table. */ 750#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 751 752#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 753#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 754#define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 755 756#define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 757 758/* use ONLY for statically allocated translation tables */ 759#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 760 761#else 762 763#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 764#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 765 766/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 767#define pmd_set_fixmap(addr) NULL 768#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 769#define pmd_clear_fixmap() 770 771#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 772 773#endif /* CONFIG_PGTABLE_LEVELS > 2 */ 774 775#if CONFIG_PGTABLE_LEVELS > 3 776 777static __always_inline bool pgtable_l4_enabled(void) 778{ 779 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 780 return true; 781 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 782 return vabits_actual == VA_BITS; 783 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 784} 785 786static inline bool mm_pud_folded(const struct mm_struct *mm) 787{ 788 return !pgtable_l4_enabled(); 789} 790#define mm_pud_folded mm_pud_folded 791 792#define pud_ERROR(e) \ 793 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 794 795#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 796#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2)) 797#define p4d_present(p4d) (!p4d_none(p4d)) 798 799static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 800{ 801 if (in_swapper_pgdir(p4dp)) { 802 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 803 return; 804 } 805 806 WRITE_ONCE(*p4dp, p4d); 807 dsb(ishst); 808 isb(); 809} 810 811static inline void p4d_clear(p4d_t *p4dp) 812{ 813 if (pgtable_l4_enabled()) 814 set_p4d(p4dp, __p4d(0)); 815} 816 817static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 818{ 819 return __p4d_to_phys(p4d); 820} 821 822#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 823 824static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 825{ 826 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 827} 828 829static inline pud_t *p4d_pgtable(p4d_t p4d) 830{ 831 return (pud_t *)__va(p4d_page_paddr(p4d)); 832} 833 834static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 835{ 836 BUG_ON(!pgtable_l4_enabled()); 837 838 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 839} 840 841static inline 842pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 843{ 844 if (!pgtable_l4_enabled()) 845 return p4d_to_folded_pud(p4dp, addr); 846 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 847} 848#define pud_offset_lockless pud_offset_lockless 849 850static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 851{ 852 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 853} 854#define pud_offset pud_offset 855 856static inline pud_t *pud_set_fixmap(unsigned long addr) 857{ 858 if (!pgtable_l4_enabled()) 859 return NULL; 860 return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 861} 862 863static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 864{ 865 if (!pgtable_l4_enabled()) 866 return p4d_to_folded_pud(p4dp, addr); 867 return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 868} 869 870static inline void pud_clear_fixmap(void) 871{ 872 if (pgtable_l4_enabled()) 873 clear_fixmap(FIX_PUD); 874} 875 876/* use ONLY for statically allocated translation tables */ 877static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 878{ 879 if (!pgtable_l4_enabled()) 880 return p4d_to_folded_pud(p4dp, addr); 881 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 882} 883 884#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 885 886#else 887 888static inline bool pgtable_l4_enabled(void) { return false; } 889 890#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 891 892/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 893#define pud_set_fixmap(addr) NULL 894#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 895#define pud_clear_fixmap() 896 897#define pud_offset_kimg(dir,addr) ((pud_t *)dir) 898 899#endif /* CONFIG_PGTABLE_LEVELS > 3 */ 900 901#if CONFIG_PGTABLE_LEVELS > 4 902 903static __always_inline bool pgtable_l5_enabled(void) 904{ 905 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 906 return vabits_actual == VA_BITS; 907 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 908} 909 910static inline bool mm_p4d_folded(const struct mm_struct *mm) 911{ 912 return !pgtable_l5_enabled(); 913} 914#define mm_p4d_folded mm_p4d_folded 915 916#define p4d_ERROR(e) \ 917 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 918 919#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 920#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2)) 921#define pgd_present(pgd) (!pgd_none(pgd)) 922 923static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 924{ 925 if (in_swapper_pgdir(pgdp)) { 926 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 927 return; 928 } 929 930 WRITE_ONCE(*pgdp, pgd); 931 dsb(ishst); 932 isb(); 933} 934 935static inline void pgd_clear(pgd_t *pgdp) 936{ 937 if (pgtable_l5_enabled()) 938 set_pgd(pgdp, __pgd(0)); 939} 940 941static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 942{ 943 return __pgd_to_phys(pgd); 944} 945 946#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 947 948static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 949{ 950 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 951} 952 953static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 954{ 955 BUG_ON(!pgtable_l5_enabled()); 956 957 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 958} 959 960static inline 961p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 962{ 963 if (!pgtable_l5_enabled()) 964 return pgd_to_folded_p4d(pgdp, addr); 965 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 966} 967#define p4d_offset_lockless p4d_offset_lockless 968 969static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 970{ 971 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 972} 973 974static inline p4d_t *p4d_set_fixmap(unsigned long addr) 975{ 976 if (!pgtable_l5_enabled()) 977 return NULL; 978 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 979} 980 981static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 982{ 983 if (!pgtable_l5_enabled()) 984 return pgd_to_folded_p4d(pgdp, addr); 985 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 986} 987 988static inline void p4d_clear_fixmap(void) 989{ 990 if (pgtable_l5_enabled()) 991 clear_fixmap(FIX_P4D); 992} 993 994/* use ONLY for statically allocated translation tables */ 995static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 996{ 997 if (!pgtable_l5_enabled()) 998 return pgd_to_folded_p4d(pgdp, addr); 999 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 1000} 1001 1002#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1003 1004#else 1005 1006static inline bool pgtable_l5_enabled(void) { return false; } 1007 1008/* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 1009#define p4d_set_fixmap(addr) NULL 1010#define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 1011#define p4d_clear_fixmap() 1012 1013#define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 1014 1015#endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1016 1017#define pgd_ERROR(e) \ 1018 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 1019 1020#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1021#define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1022 1023static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 1024{ 1025 /* 1026 * Normal and Normal-Tagged are two different memory types and indices 1027 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 1028 */ 1029 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1030 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP | 1031 PTE_ATTRINDX_MASK; 1032 /* preserve the hardware dirty information */ 1033 if (pte_hw_dirty(pte)) 1034 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 1035 1036 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 1037 /* 1038 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 1039 * dirtiness again. 1040 */ 1041 if (pte_sw_dirty(pte)) 1042 pte = pte_mkdirty(pte); 1043 return pte; 1044} 1045 1046static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1047{ 1048 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 1049} 1050 1051extern int __ptep_set_access_flags(struct vm_area_struct *vma, 1052 unsigned long address, pte_t *ptep, 1053 pte_t entry, int dirty); 1054 1055#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1056#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1057static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1058 unsigned long address, pmd_t *pmdp, 1059 pmd_t entry, int dirty) 1060{ 1061 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1062 pmd_pte(entry), dirty); 1063} 1064 1065static inline int pud_devmap(pud_t pud) 1066{ 1067 return 0; 1068} 1069 1070static inline int pgd_devmap(pgd_t pgd) 1071{ 1072 return 0; 1073} 1074#endif 1075 1076#ifdef CONFIG_PAGE_TABLE_CHECK 1077static inline bool pte_user_accessible_page(pte_t pte) 1078{ 1079 return pte_present(pte) && (pte_user(pte) || pte_user_exec(pte)); 1080} 1081 1082static inline bool pmd_user_accessible_page(pmd_t pmd) 1083{ 1084 return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1085} 1086 1087static inline bool pud_user_accessible_page(pud_t pud) 1088{ 1089 return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud)); 1090} 1091#endif 1092 1093/* 1094 * Atomic pte/pmd modifications. 1095 */ 1096static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 1097 unsigned long address, 1098 pte_t *ptep) 1099{ 1100 pte_t old_pte, pte; 1101 1102 pte = __ptep_get(ptep); 1103 do { 1104 old_pte = pte; 1105 pte = pte_mkold(pte); 1106 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1107 pte_val(old_pte), pte_val(pte)); 1108 } while (pte_val(pte) != pte_val(old_pte)); 1109 1110 return pte_young(pte); 1111} 1112 1113static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 1114 unsigned long address, pte_t *ptep) 1115{ 1116 int young = __ptep_test_and_clear_young(vma, address, ptep); 1117 1118 if (young) { 1119 /* 1120 * We can elide the trailing DSB here since the worst that can 1121 * happen is that a CPU continues to use the young entry in its 1122 * TLB and we mistakenly reclaim the associated page. The 1123 * window for such an event is bounded by the next 1124 * context-switch, which provides a DSB to complete the TLB 1125 * invalidation. 1126 */ 1127 flush_tlb_page_nosync(vma, address); 1128 } 1129 1130 return young; 1131} 1132 1133#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1134#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1135static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1136 unsigned long address, 1137 pmd_t *pmdp) 1138{ 1139 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 1140} 1141#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1142 1143static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 1144 unsigned long address, pte_t *ptep) 1145{ 1146 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 1147 1148 page_table_check_pte_clear(mm, pte); 1149 1150 return pte; 1151} 1152 1153static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1154 pte_t *ptep, unsigned int nr, int full) 1155{ 1156 for (;;) { 1157 __ptep_get_and_clear(mm, addr, ptep); 1158 if (--nr == 0) 1159 break; 1160 ptep++; 1161 addr += PAGE_SIZE; 1162 } 1163} 1164 1165static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 1166 unsigned long addr, pte_t *ptep, 1167 unsigned int nr, int full) 1168{ 1169 pte_t pte, tmp_pte; 1170 1171 pte = __ptep_get_and_clear(mm, addr, ptep); 1172 while (--nr) { 1173 ptep++; 1174 addr += PAGE_SIZE; 1175 tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 1176 if (pte_dirty(tmp_pte)) 1177 pte = pte_mkdirty(pte); 1178 if (pte_young(tmp_pte)) 1179 pte = pte_mkyoung(pte); 1180 } 1181 return pte; 1182} 1183 1184#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1185#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1186static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1187 unsigned long address, pmd_t *pmdp) 1188{ 1189 pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); 1190 1191 page_table_check_pmd_clear(mm, pmd); 1192 1193 return pmd; 1194} 1195#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1196 1197static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1198 unsigned long address, pte_t *ptep, 1199 pte_t pte) 1200{ 1201 pte_t old_pte; 1202 1203 do { 1204 old_pte = pte; 1205 pte = pte_wrprotect(pte); 1206 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1207 pte_val(old_pte), pte_val(pte)); 1208 } while (pte_val(pte) != pte_val(old_pte)); 1209} 1210 1211/* 1212 * __ptep_set_wrprotect - mark read-only while trasferring potential hardware 1213 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 1214 */ 1215static inline void __ptep_set_wrprotect(struct mm_struct *mm, 1216 unsigned long address, pte_t *ptep) 1217{ 1218 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1219} 1220 1221static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1222 pte_t *ptep, unsigned int nr) 1223{ 1224 unsigned int i; 1225 1226 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1227 __ptep_set_wrprotect(mm, address, ptep); 1228} 1229 1230#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1231#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1232static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1233 unsigned long address, pmd_t *pmdp) 1234{ 1235 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1236} 1237 1238#define pmdp_establish pmdp_establish 1239static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1240 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1241{ 1242 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 1243 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 1244} 1245#endif 1246 1247/* 1248 * Encode and decode a swap entry: 1249 * bits 0-1: present (must be zero) 1250 * bits 2: remember PG_anon_exclusive 1251 * bits 3-7: swap type 1252 * bits 8-57: swap offset 1253 * bit 58: PTE_PROT_NONE (must be zero) 1254 */ 1255#define __SWP_TYPE_SHIFT 3 1256#define __SWP_TYPE_BITS 5 1257#define __SWP_OFFSET_BITS 50 1258#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 1259#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 1260#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 1261 1262#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1263#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 1264#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 1265 1266#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1267#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 1268 1269#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1270#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1271#define __swp_entry_to_pmd(swp) __pmd((swp).val) 1272#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1273 1274/* 1275 * Ensure that there are not more swap files than can be encoded in the kernel 1276 * PTEs. 1277 */ 1278#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1279 1280#ifdef CONFIG_ARM64_MTE 1281 1282#define __HAVE_ARCH_PREPARE_TO_SWAP 1283static inline int arch_prepare_to_swap(struct page *page) 1284{ 1285 if (system_supports_mte()) 1286 return mte_save_tags(page); 1287 return 0; 1288} 1289 1290#define __HAVE_ARCH_SWAP_INVALIDATE 1291static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1292{ 1293 if (system_supports_mte()) 1294 mte_invalidate_tags(type, offset); 1295} 1296 1297static inline void arch_swap_invalidate_area(int type) 1298{ 1299 if (system_supports_mte()) 1300 mte_invalidate_tags_area(type); 1301} 1302 1303#define __HAVE_ARCH_SWAP_RESTORE 1304static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) 1305{ 1306 if (system_supports_mte()) 1307 mte_restore_tags(entry, &folio->page); 1308} 1309 1310#endif /* CONFIG_ARM64_MTE */ 1311 1312/* 1313 * On AArch64, the cache coherency is handled via the __set_ptes() function. 1314 */ 1315static inline void update_mmu_cache_range(struct vm_fault *vmf, 1316 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1317 unsigned int nr) 1318{ 1319 /* 1320 * We don't do anything here, so there's a very small chance of 1321 * us retaking a user fault which we just fixed up. The alternative 1322 * is doing a dsb(ishst), but that penalises the fastpath. 1323 */ 1324} 1325 1326#define update_mmu_cache(vma, addr, ptep) \ 1327 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1328#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1329 1330#ifdef CONFIG_ARM64_PA_BITS_52 1331#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1332#else 1333#define phys_to_ttbr(addr) (addr) 1334#endif 1335 1336/* 1337 * On arm64 without hardware Access Flag, copying from user will fail because 1338 * the pte is old and cannot be marked young. So we always end up with zeroed 1339 * page after fork() + CoW for pfn mappings. We don't always have a 1340 * hardware-managed access flag on arm64. 1341 */ 1342#define arch_has_hw_pte_young cpu_has_hw_af 1343 1344/* 1345 * Experimentally, it's cheap to set the access flag in hardware and we 1346 * benefit from prefaulting mappings as 'old' to start with. 1347 */ 1348#define arch_wants_old_prefaulted_pte cpu_has_hw_af 1349 1350static inline bool pud_sect_supported(void) 1351{ 1352 return PAGE_SIZE == SZ_4K; 1353} 1354 1355 1356#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1357#define ptep_modify_prot_start ptep_modify_prot_start 1358extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1359 unsigned long addr, pte_t *ptep); 1360 1361#define ptep_modify_prot_commit ptep_modify_prot_commit 1362extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1363 unsigned long addr, pte_t *ptep, 1364 pte_t old_pte, pte_t new_pte); 1365 1366#ifdef CONFIG_ARM64_CONTPTE 1367 1368/* 1369 * The contpte APIs are used to transparently manage the contiguous bit in ptes 1370 * where it is possible and makes sense to do so. The PTE_CONT bit is considered 1371 * a private implementation detail of the public ptep API (see below). 1372 */ 1373extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1374 pte_t *ptep, pte_t pte); 1375extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 1376 pte_t *ptep, pte_t pte); 1377extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 1378extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 1379extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 1380 pte_t *ptep, pte_t pte, unsigned int nr); 1381extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1382 pte_t *ptep, unsigned int nr, int full); 1383extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 1384 unsigned long addr, pte_t *ptep, 1385 unsigned int nr, int full); 1386extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 1387 unsigned long addr, pte_t *ptep); 1388extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 1389 unsigned long addr, pte_t *ptep); 1390extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1391 pte_t *ptep, unsigned int nr); 1392extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 1393 unsigned long addr, pte_t *ptep, 1394 pte_t entry, int dirty); 1395 1396static __always_inline void contpte_try_fold(struct mm_struct *mm, 1397 unsigned long addr, pte_t *ptep, pte_t pte) 1398{ 1399 /* 1400 * Only bother trying if both the virtual and physical addresses are 1401 * aligned and correspond to the last entry in a contig range. The core 1402 * code mostly modifies ranges from low to high, so this is the likely 1403 * the last modification in the contig range, so a good time to fold. 1404 * We can't fold special mappings, because there is no associated folio. 1405 */ 1406 1407 const unsigned long contmask = CONT_PTES - 1; 1408 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1409 1410 if (unlikely(valign)) { 1411 bool palign = (pte_pfn(pte) & contmask) == contmask; 1412 1413 if (unlikely(palign && 1414 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1415 __contpte_try_fold(mm, addr, ptep, pte); 1416 } 1417} 1418 1419static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1420 unsigned long addr, pte_t *ptep, pte_t pte) 1421{ 1422 if (unlikely(pte_valid_cont(pte))) 1423 __contpte_try_unfold(mm, addr, ptep, pte); 1424} 1425 1426#define pte_batch_hint pte_batch_hint 1427static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1428{ 1429 if (!pte_valid_cont(pte)) 1430 return 1; 1431 1432 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1433} 1434 1435/* 1436 * The below functions constitute the public API that arm64 presents to the 1437 * core-mm to manipulate PTE entries within their page tables (or at least this 1438 * is the subset of the API that arm64 needs to implement). These public 1439 * versions will automatically and transparently apply the contiguous bit where 1440 * it makes sense to do so. Therefore any users that are contig-aware (e.g. 1441 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 1442 * private versions, which are prefixed with double underscore. All of these 1443 * APIs except for ptep_get_lockless() are expected to be called with the PTL 1444 * held. Although the contiguous bit is considered private to the 1445 * implementation, it is deliberately allowed to leak through the getters (e.g. 1446 * ptep_get()), back to core code. This is required so that pte_leaf_size() can 1447 * provide an accurate size for perf_get_pgtable_size(). But this leakage means 1448 * its possible a pte will be passed to a setter with the contiguous bit set, so 1449 * we explicitly clear the contiguous bit in those cases to prevent accidentally 1450 * setting it in the pgtable. 1451 */ 1452 1453#define ptep_get ptep_get 1454static inline pte_t ptep_get(pte_t *ptep) 1455{ 1456 pte_t pte = __ptep_get(ptep); 1457 1458 if (likely(!pte_valid_cont(pte))) 1459 return pte; 1460 1461 return contpte_ptep_get(ptep, pte); 1462} 1463 1464#define ptep_get_lockless ptep_get_lockless 1465static inline pte_t ptep_get_lockless(pte_t *ptep) 1466{ 1467 pte_t pte = __ptep_get(ptep); 1468 1469 if (likely(!pte_valid_cont(pte))) 1470 return pte; 1471 1472 return contpte_ptep_get_lockless(ptep); 1473} 1474 1475static inline void set_pte(pte_t *ptep, pte_t pte) 1476{ 1477 /* 1478 * We don't have the mm or vaddr so cannot unfold contig entries (since 1479 * it requires tlb maintenance). set_pte() is not used in core code, so 1480 * this should never even be called. Regardless do our best to service 1481 * any call and emit a warning if there is any attempt to set a pte on 1482 * top of an existing contig range. 1483 */ 1484 pte_t orig_pte = __ptep_get(ptep); 1485 1486 WARN_ON_ONCE(pte_valid_cont(orig_pte)); 1487 __set_pte(ptep, pte_mknoncont(pte)); 1488} 1489 1490#define set_ptes set_ptes 1491static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1492 pte_t *ptep, pte_t pte, unsigned int nr) 1493{ 1494 pte = pte_mknoncont(pte); 1495 1496 if (likely(nr == 1)) { 1497 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1498 __set_ptes(mm, addr, ptep, pte, 1); 1499 contpte_try_fold(mm, addr, ptep, pte); 1500 } else { 1501 contpte_set_ptes(mm, addr, ptep, pte, nr); 1502 } 1503} 1504 1505static inline void pte_clear(struct mm_struct *mm, 1506 unsigned long addr, pte_t *ptep) 1507{ 1508 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1509 __pte_clear(mm, addr, ptep); 1510} 1511 1512#define clear_full_ptes clear_full_ptes 1513static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1514 pte_t *ptep, unsigned int nr, int full) 1515{ 1516 if (likely(nr == 1)) { 1517 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1518 __clear_full_ptes(mm, addr, ptep, nr, full); 1519 } else { 1520 contpte_clear_full_ptes(mm, addr, ptep, nr, full); 1521 } 1522} 1523 1524#define get_and_clear_full_ptes get_and_clear_full_ptes 1525static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 1526 unsigned long addr, pte_t *ptep, 1527 unsigned int nr, int full) 1528{ 1529 pte_t pte; 1530 1531 if (likely(nr == 1)) { 1532 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1533 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1534 } else { 1535 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1536 } 1537 1538 return pte; 1539} 1540 1541#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1542static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1543 unsigned long addr, pte_t *ptep) 1544{ 1545 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1546 return __ptep_get_and_clear(mm, addr, ptep); 1547} 1548 1549#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1550static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1551 unsigned long addr, pte_t *ptep) 1552{ 1553 pte_t orig_pte = __ptep_get(ptep); 1554 1555 if (likely(!pte_valid_cont(orig_pte))) 1556 return __ptep_test_and_clear_young(vma, addr, ptep); 1557 1558 return contpte_ptep_test_and_clear_young(vma, addr, ptep); 1559} 1560 1561#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1562static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1563 unsigned long addr, pte_t *ptep) 1564{ 1565 pte_t orig_pte = __ptep_get(ptep); 1566 1567 if (likely(!pte_valid_cont(orig_pte))) 1568 return __ptep_clear_flush_young(vma, addr, ptep); 1569 1570 return contpte_ptep_clear_flush_young(vma, addr, ptep); 1571} 1572 1573#define wrprotect_ptes wrprotect_ptes 1574static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1575 unsigned long addr, pte_t *ptep, unsigned int nr) 1576{ 1577 if (likely(nr == 1)) { 1578 /* 1579 * Optimization: wrprotect_ptes() can only be called for present 1580 * ptes so we only need to check contig bit as condition for 1581 * unfold, and we can remove the contig bit from the pte we read 1582 * to avoid re-reading. This speeds up fork() which is sensitive 1583 * for order-0 folios. Equivalent to contpte_try_unfold(). 1584 */ 1585 pte_t orig_pte = __ptep_get(ptep); 1586 1587 if (unlikely(pte_cont(orig_pte))) { 1588 __contpte_try_unfold(mm, addr, ptep, orig_pte); 1589 orig_pte = pte_mknoncont(orig_pte); 1590 } 1591 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1592 } else { 1593 contpte_wrprotect_ptes(mm, addr, ptep, nr); 1594 } 1595} 1596 1597#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1598static inline void ptep_set_wrprotect(struct mm_struct *mm, 1599 unsigned long addr, pte_t *ptep) 1600{ 1601 wrprotect_ptes(mm, addr, ptep, 1); 1602} 1603 1604#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1605static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1606 unsigned long addr, pte_t *ptep, 1607 pte_t entry, int dirty) 1608{ 1609 pte_t orig_pte = __ptep_get(ptep); 1610 1611 entry = pte_mknoncont(entry); 1612 1613 if (likely(!pte_valid_cont(orig_pte))) 1614 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1615 1616 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1617} 1618 1619#else /* CONFIG_ARM64_CONTPTE */ 1620 1621#define ptep_get __ptep_get 1622#define set_pte __set_pte 1623#define set_ptes __set_ptes 1624#define pte_clear __pte_clear 1625#define clear_full_ptes __clear_full_ptes 1626#define get_and_clear_full_ptes __get_and_clear_full_ptes 1627#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1628#define ptep_get_and_clear __ptep_get_and_clear 1629#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1630#define ptep_test_and_clear_young __ptep_test_and_clear_young 1631#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1632#define ptep_clear_flush_young __ptep_clear_flush_young 1633#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1634#define ptep_set_wrprotect __ptep_set_wrprotect 1635#define wrprotect_ptes __wrprotect_ptes 1636#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1637#define ptep_set_access_flags __ptep_set_access_flags 1638 1639#endif /* CONFIG_ARM64_CONTPTE */ 1640 1641#endif /* !__ASSEMBLY__ */ 1642 1643#endif /* __ASM_PGTABLE_H */ 1644