1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#ifndef _ASM_RISCV_PGTABLE_H 7#define _ASM_RISCV_PGTABLE_H 8 9#include <linux/mmzone.h> 10#include <linux/sizes.h> 11 12#include <asm/pgtable-bits.h> 13 14#ifndef CONFIG_MMU 15#define KERNEL_LINK_ADDR PAGE_OFFSET 16#define KERN_VIRT_SIZE (UL(-1)) 17#else 18 19#define ADDRESS_SPACE_END (UL(-1)) 20 21#ifdef CONFIG_64BIT 22/* Leave 2GB for kernel and BPF at the end of the address space */ 23#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) 24#else 25#define KERNEL_LINK_ADDR PAGE_OFFSET 26#endif 27 28/* Number of entries in the page global directory */ 29#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 30/* Number of entries in the page table */ 31#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 32 33/* 34 * Half of the kernel address space (1/4 of the entries of the page global 35 * directory) is for the direct mapping. 36 */ 37#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) 38 39#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 40#define VMALLOC_END PAGE_OFFSET 41#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 42 43#define BPF_JIT_REGION_SIZE (SZ_128M) 44#ifdef CONFIG_64BIT 45#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) 46#define BPF_JIT_REGION_END (MODULES_END) 47#else 48#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) 49#define BPF_JIT_REGION_END (VMALLOC_END) 50#endif 51 52/* Modules always live before the kernel */ 53#ifdef CONFIG_64BIT 54/* This is used to define the end of the KASAN shadow region */ 55#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) 56#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) 57#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) 58#endif 59 60/* 61 * Roughly size the vmemmap space to be large enough to fit enough 62 * struct pages to map half the virtual address space. Then 63 * position vmemmap directly below the VMALLOC region. 64 */ 65#define VA_BITS_SV32 32 66#ifdef CONFIG_64BIT 67#define VA_BITS_SV39 39 68#define VA_BITS_SV48 48 69#define VA_BITS_SV57 57 70 71#define VA_BITS (pgtable_l5_enabled ? \ 72 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39)) 73#else 74#define VA_BITS VA_BITS_SV32 75#endif 76 77#define VMEMMAP_SHIFT \ 78 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 79#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 80#define VMEMMAP_END VMALLOC_START 81#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 82 83/* 84 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel 85 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. 86 */ 87#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT)) 88 89#define PCI_IO_SIZE SZ_16M 90#define PCI_IO_END VMEMMAP_START 91#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 92 93#define FIXADDR_TOP PCI_IO_START 94#ifdef CONFIG_64BIT 95#define MAX_FDT_SIZE PMD_SIZE 96#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) 97#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) 98#else 99#define MAX_FDT_SIZE PGDIR_SIZE 100#define FIX_FDT_SIZE MAX_FDT_SIZE 101#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) 102#endif 103#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 104 105#endif 106 107#ifdef CONFIG_XIP_KERNEL 108#define XIP_OFFSET SZ_32M 109#define XIP_OFFSET_MASK (SZ_32M - 1) 110#else 111#define XIP_OFFSET 0 112#endif 113 114#ifndef __ASSEMBLY__ 115 116#include <asm/page.h> 117#include <asm/tlbflush.h> 118#include <linux/mm_types.h> 119#include <asm/compat.h> 120 121#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) 122 123#ifdef CONFIG_64BIT 124#include <asm/pgtable-64.h> 125 126#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1)) 127#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1)) 128#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1)) 129 130#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) 131#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) 132#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) 133#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) 134#else 135#include <asm/pgtable-32.h> 136#endif /* CONFIG_64BIT */ 137 138#include <linux/page_table_check.h> 139 140#ifdef CONFIG_XIP_KERNEL 141#define XIP_FIXUP(addr) ({ \ 142 uintptr_t __a = (uintptr_t)(addr); \ 143 (__a >= CONFIG_XIP_PHYS_ADDR && \ 144 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ 145 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ 146 __a; \ 147 }) 148#else 149#define XIP_FIXUP(addr) (addr) 150#endif /* CONFIG_XIP_KERNEL */ 151 152struct pt_alloc_ops { 153 pte_t *(*get_pte_virt)(phys_addr_t pa); 154 phys_addr_t (*alloc_pte)(uintptr_t va); 155#ifndef __PAGETABLE_PMD_FOLDED 156 pmd_t *(*get_pmd_virt)(phys_addr_t pa); 157 phys_addr_t (*alloc_pmd)(uintptr_t va); 158 pud_t *(*get_pud_virt)(phys_addr_t pa); 159 phys_addr_t (*alloc_pud)(uintptr_t va); 160 p4d_t *(*get_p4d_virt)(phys_addr_t pa); 161 phys_addr_t (*alloc_p4d)(uintptr_t va); 162#endif 163}; 164 165extern struct pt_alloc_ops pt_ops __initdata; 166 167#ifdef CONFIG_MMU 168/* Number of PGD entries that a user-mode program can use */ 169#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 170 171/* Page protection bits */ 172#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 173 174#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) 175#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 176#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 177#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 178#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 179#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 180 _PAGE_EXEC | _PAGE_WRITE) 181 182#define PAGE_COPY PAGE_READ 183#define PAGE_COPY_EXEC PAGE_READ_EXEC 184#define PAGE_SHARED PAGE_WRITE 185#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 186 187#define _PAGE_KERNEL (_PAGE_READ \ 188 | _PAGE_WRITE \ 189 | _PAGE_PRESENT \ 190 | _PAGE_ACCESSED \ 191 | _PAGE_DIRTY \ 192 | _PAGE_GLOBAL) 193 194#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 195#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 196#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 197#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ 198 | _PAGE_EXEC) 199 200#define PAGE_TABLE __pgprot(_PAGE_TABLE) 201 202#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) 203#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) 204 205extern pgd_t swapper_pg_dir[]; 206extern pgd_t trampoline_pg_dir[]; 207extern pgd_t early_pg_dir[]; 208 209#ifdef CONFIG_TRANSPARENT_HUGEPAGE 210static inline int pmd_present(pmd_t pmd) 211{ 212 /* 213 * Checking for _PAGE_LEAF is needed too because: 214 * When splitting a THP, split_huge_page() will temporarily clear 215 * the present bit, in this situation, pmd_present() and 216 * pmd_trans_huge() still needs to return true. 217 */ 218 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); 219} 220#else 221static inline int pmd_present(pmd_t pmd) 222{ 223 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 224} 225#endif 226 227static inline int pmd_none(pmd_t pmd) 228{ 229 return (pmd_val(pmd) == 0); 230} 231 232static inline int pmd_bad(pmd_t pmd) 233{ 234 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); 235} 236 237#define pmd_leaf pmd_leaf 238static inline bool pmd_leaf(pmd_t pmd) 239{ 240 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); 241} 242 243static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 244{ 245 WRITE_ONCE(*pmdp, pmd); 246} 247 248static inline void pmd_clear(pmd_t *pmdp) 249{ 250 set_pmd(pmdp, __pmd(0)); 251} 252 253static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 254{ 255 unsigned long prot_val = pgprot_val(prot); 256 257 ALT_THEAD_PMA(prot_val); 258 259 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val); 260} 261 262static inline unsigned long _pgd_pfn(pgd_t pgd) 263{ 264 return __page_val_to_pfn(pgd_val(pgd)); 265} 266 267static inline struct page *pmd_page(pmd_t pmd) 268{ 269 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd))); 270} 271 272static inline unsigned long pmd_page_vaddr(pmd_t pmd) 273{ 274 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd))); 275} 276 277static inline pte_t pmd_pte(pmd_t pmd) 278{ 279 return __pte(pmd_val(pmd)); 280} 281 282static inline pte_t pud_pte(pud_t pud) 283{ 284 return __pte(pud_val(pud)); 285} 286 287#ifdef CONFIG_RISCV_ISA_SVNAPOT 288#include <asm/cpufeature.h> 289 290static __always_inline bool has_svnapot(void) 291{ 292 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT); 293} 294 295static inline unsigned long pte_napot(pte_t pte) 296{ 297 return pte_val(pte) & _PAGE_NAPOT; 298} 299 300static inline pte_t pte_mknapot(pte_t pte, unsigned int order) 301{ 302 int pos = order - 1 + _PAGE_PFN_SHIFT; 303 unsigned long napot_bit = BIT(pos); 304 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT); 305 306 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT); 307} 308 309#else 310 311static __always_inline bool has_svnapot(void) { return false; } 312 313static inline unsigned long pte_napot(pte_t pte) 314{ 315 return 0; 316} 317 318#endif /* CONFIG_RISCV_ISA_SVNAPOT */ 319 320/* Yields the page frame number (PFN) of a page table entry */ 321static inline unsigned long pte_pfn(pte_t pte) 322{ 323 unsigned long res = __page_val_to_pfn(pte_val(pte)); 324 325 if (has_svnapot() && pte_napot(pte)) 326 res = res & (res - 1UL); 327 328 return res; 329} 330 331#define pte_page(x) pfn_to_page(pte_pfn(x)) 332 333/* Constructs a page table entry */ 334static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 335{ 336 unsigned long prot_val = pgprot_val(prot); 337 338 ALT_THEAD_PMA(prot_val); 339 340 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); 341} 342 343#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 344 345static inline int pte_present(pte_t pte) 346{ 347 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 348} 349 350static inline int pte_none(pte_t pte) 351{ 352 return (pte_val(pte) == 0); 353} 354 355static inline int pte_write(pte_t pte) 356{ 357 return pte_val(pte) & _PAGE_WRITE; 358} 359 360static inline int pte_exec(pte_t pte) 361{ 362 return pte_val(pte) & _PAGE_EXEC; 363} 364 365static inline int pte_user(pte_t pte) 366{ 367 return pte_val(pte) & _PAGE_USER; 368} 369 370static inline int pte_huge(pte_t pte) 371{ 372 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); 373} 374 375static inline int pte_dirty(pte_t pte) 376{ 377 return pte_val(pte) & _PAGE_DIRTY; 378} 379 380static inline int pte_young(pte_t pte) 381{ 382 return pte_val(pte) & _PAGE_ACCESSED; 383} 384 385static inline int pte_special(pte_t pte) 386{ 387 return pte_val(pte) & _PAGE_SPECIAL; 388} 389 390/* static inline pte_t pte_rdprotect(pte_t pte) */ 391 392static inline pte_t pte_wrprotect(pte_t pte) 393{ 394 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 395} 396 397/* static inline pte_t pte_mkread(pte_t pte) */ 398 399static inline pte_t pte_mkwrite_novma(pte_t pte) 400{ 401 return __pte(pte_val(pte) | _PAGE_WRITE); 402} 403 404/* static inline pte_t pte_mkexec(pte_t pte) */ 405 406static inline pte_t pte_mkdirty(pte_t pte) 407{ 408 return __pte(pte_val(pte) | _PAGE_DIRTY); 409} 410 411static inline pte_t pte_mkclean(pte_t pte) 412{ 413 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 414} 415 416static inline pte_t pte_mkyoung(pte_t pte) 417{ 418 return __pte(pte_val(pte) | _PAGE_ACCESSED); 419} 420 421static inline pte_t pte_mkold(pte_t pte) 422{ 423 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 424} 425 426static inline pte_t pte_mkspecial(pte_t pte) 427{ 428 return __pte(pte_val(pte) | _PAGE_SPECIAL); 429} 430 431static inline pte_t pte_mkhuge(pte_t pte) 432{ 433 return pte; 434} 435 436#ifdef CONFIG_RISCV_ISA_SVNAPOT 437#define pte_leaf_size(pte) (pte_napot(pte) ? \ 438 napot_cont_size(napot_cont_order(pte)) :\ 439 PAGE_SIZE) 440#endif 441 442#ifdef CONFIG_NUMA_BALANCING 443/* 444 * See the comment in include/asm-generic/pgtable.h 445 */ 446static inline int pte_protnone(pte_t pte) 447{ 448 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; 449} 450 451static inline int pmd_protnone(pmd_t pmd) 452{ 453 return pte_protnone(pmd_pte(pmd)); 454} 455#endif 456 457/* Modify page protection bits */ 458static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 459{ 460 unsigned long newprot_val = pgprot_val(newprot); 461 462 ALT_THEAD_PMA(newprot_val); 463 464 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val); 465} 466 467#define pgd_ERROR(e) \ 468 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 469 470 471/* Commit new configuration to MMU hardware */ 472static inline void update_mmu_cache_range(struct vm_fault *vmf, 473 struct vm_area_struct *vma, unsigned long address, 474 pte_t *ptep, unsigned int nr) 475{ 476 /* 477 * The kernel assumes that TLBs don't cache invalid entries, but 478 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 479 * cache flush; it is necessary even after writing invalid entries. 480 * Relying on flush_tlb_fix_spurious_fault would suffice, but 481 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 482 */ 483 while (nr--) 484 local_flush_tlb_page(address + nr * PAGE_SIZE); 485} 486#define update_mmu_cache(vma, addr, ptep) \ 487 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 488 489#define __HAVE_ARCH_UPDATE_MMU_TLB 490#define update_mmu_tlb update_mmu_cache 491 492static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 493 unsigned long address, pmd_t *pmdp) 494{ 495 pte_t *ptep = (pte_t *)pmdp; 496 497 update_mmu_cache(vma, address, ptep); 498} 499 500#define __HAVE_ARCH_PTE_SAME 501static inline int pte_same(pte_t pte_a, pte_t pte_b) 502{ 503 return pte_val(pte_a) == pte_val(pte_b); 504} 505 506/* 507 * Certain architectures need to do special things when PTEs within 508 * a page table are directly modified. Thus, the following hook is 509 * made available. 510 */ 511static inline void set_pte(pte_t *ptep, pte_t pteval) 512{ 513 WRITE_ONCE(*ptep, pteval); 514} 515 516void flush_icache_pte(struct mm_struct *mm, pte_t pte); 517 518static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) 519{ 520 if (pte_present(pteval) && pte_exec(pteval)) 521 flush_icache_pte(mm, pteval); 522 523 set_pte(ptep, pteval); 524} 525 526#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT 527 528static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 529 pte_t *ptep, pte_t pteval, unsigned int nr) 530{ 531 page_table_check_ptes_set(mm, ptep, pteval, nr); 532 533 for (;;) { 534 __set_pte_at(mm, ptep, pteval); 535 if (--nr == 0) 536 break; 537 ptep++; 538 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT; 539 } 540} 541#define set_ptes set_ptes 542 543static inline void pte_clear(struct mm_struct *mm, 544 unsigned long addr, pte_t *ptep) 545{ 546 __set_pte_at(mm, ptep, __pte(0)); 547} 548 549#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ 550extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 551 pte_t *ptep, pte_t entry, int dirty); 552#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */ 553extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, 554 pte_t *ptep); 555 556#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 557static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 558 unsigned long address, pte_t *ptep) 559{ 560 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); 561 562 page_table_check_pte_clear(mm, pte); 563 564 return pte; 565} 566 567#define __HAVE_ARCH_PTEP_SET_WRPROTECT 568static inline void ptep_set_wrprotect(struct mm_struct *mm, 569 unsigned long address, pte_t *ptep) 570{ 571 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 572} 573 574#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 575static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 576 unsigned long address, pte_t *ptep) 577{ 578 /* 579 * This comment is borrowed from x86, but applies equally to RISC-V: 580 * 581 * Clearing the accessed bit without a TLB flush 582 * doesn't cause data corruption. [ It could cause incorrect 583 * page aging and the (mistaken) reclaim of hot pages, but the 584 * chance of that should be relatively low. ] 585 * 586 * So as a performance optimization don't flush the TLB when 587 * clearing the accessed bit, it will eventually be flushed by 588 * a context switch or a VM operation anyway. [ In the rare 589 * event of it not getting flushed for a long time the delay 590 * shouldn't really matter because there's no real memory 591 * pressure for swapout to react to. ] 592 */ 593 return ptep_test_and_clear_young(vma, address, ptep); 594} 595 596#define pgprot_nx pgprot_nx 597static inline pgprot_t pgprot_nx(pgprot_t _prot) 598{ 599 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC); 600} 601 602#define pgprot_noncached pgprot_noncached 603static inline pgprot_t pgprot_noncached(pgprot_t _prot) 604{ 605 unsigned long prot = pgprot_val(_prot); 606 607 prot &= ~_PAGE_MTMASK; 608 prot |= _PAGE_IO; 609 610 return __pgprot(prot); 611} 612 613#define pgprot_writecombine pgprot_writecombine 614static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 615{ 616 unsigned long prot = pgprot_val(_prot); 617 618 prot &= ~_PAGE_MTMASK; 619 prot |= _PAGE_NOCACHE; 620 621 return __pgprot(prot); 622} 623 624/* 625 * THP functions 626 */ 627static inline pmd_t pte_pmd(pte_t pte) 628{ 629 return __pmd(pte_val(pte)); 630} 631 632static inline pmd_t pmd_mkhuge(pmd_t pmd) 633{ 634 return pmd; 635} 636 637static inline pmd_t pmd_mkinvalid(pmd_t pmd) 638{ 639 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); 640} 641 642#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT) 643 644static inline unsigned long pmd_pfn(pmd_t pmd) 645{ 646 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); 647} 648 649#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) 650 651static inline unsigned long pud_pfn(pud_t pud) 652{ 653 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); 654} 655 656static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 657{ 658 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 659} 660 661#define pmd_write pmd_write 662static inline int pmd_write(pmd_t pmd) 663{ 664 return pte_write(pmd_pte(pmd)); 665} 666 667#define pud_write pud_write 668static inline int pud_write(pud_t pud) 669{ 670 return pte_write(pud_pte(pud)); 671} 672 673#define pmd_dirty pmd_dirty 674static inline int pmd_dirty(pmd_t pmd) 675{ 676 return pte_dirty(pmd_pte(pmd)); 677} 678 679#define pmd_young pmd_young 680static inline int pmd_young(pmd_t pmd) 681{ 682 return pte_young(pmd_pte(pmd)); 683} 684 685static inline int pmd_user(pmd_t pmd) 686{ 687 return pte_user(pmd_pte(pmd)); 688} 689 690static inline pmd_t pmd_mkold(pmd_t pmd) 691{ 692 return pte_pmd(pte_mkold(pmd_pte(pmd))); 693} 694 695static inline pmd_t pmd_mkyoung(pmd_t pmd) 696{ 697 return pte_pmd(pte_mkyoung(pmd_pte(pmd))); 698} 699 700static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 701{ 702 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))); 703} 704 705static inline pmd_t pmd_wrprotect(pmd_t pmd) 706{ 707 return pte_pmd(pte_wrprotect(pmd_pte(pmd))); 708} 709 710static inline pmd_t pmd_mkclean(pmd_t pmd) 711{ 712 return pte_pmd(pte_mkclean(pmd_pte(pmd))); 713} 714 715static inline pmd_t pmd_mkdirty(pmd_t pmd) 716{ 717 return pte_pmd(pte_mkdirty(pmd_pte(pmd))); 718} 719 720static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 721 pmd_t *pmdp, pmd_t pmd) 722{ 723 page_table_check_pmd_set(mm, pmdp, pmd); 724 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); 725} 726 727static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 728 pud_t *pudp, pud_t pud) 729{ 730 page_table_check_pud_set(mm, pudp, pud); 731 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); 732} 733 734#ifdef CONFIG_PAGE_TABLE_CHECK 735static inline bool pte_user_accessible_page(pte_t pte) 736{ 737 return pte_present(pte) && pte_user(pte); 738} 739 740static inline bool pmd_user_accessible_page(pmd_t pmd) 741{ 742 return pmd_leaf(pmd) && pmd_user(pmd); 743} 744 745static inline bool pud_user_accessible_page(pud_t pud) 746{ 747 return pud_leaf(pud) && pud_user(pud); 748} 749#endif 750 751#ifdef CONFIG_TRANSPARENT_HUGEPAGE 752static inline int pmd_trans_huge(pmd_t pmd) 753{ 754 return pmd_leaf(pmd); 755} 756 757#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 758static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 759 unsigned long address, pmd_t *pmdp, 760 pmd_t entry, int dirty) 761{ 762 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 763} 764 765#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 766static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 767 unsigned long address, pmd_t *pmdp) 768{ 769 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 770} 771 772#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 773static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 774 unsigned long address, pmd_t *pmdp) 775{ 776 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); 777 778 page_table_check_pmd_clear(mm, pmd); 779 780 return pmd; 781} 782 783#define __HAVE_ARCH_PMDP_SET_WRPROTECT 784static inline void pmdp_set_wrprotect(struct mm_struct *mm, 785 unsigned long address, pmd_t *pmdp) 786{ 787 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 788} 789 790#define pmdp_establish pmdp_establish 791static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 792 unsigned long address, pmd_t *pmdp, pmd_t pmd) 793{ 794 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 795 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); 796} 797 798#define pmdp_collapse_flush pmdp_collapse_flush 799extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 800 unsigned long address, pmd_t *pmdp); 801#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 802 803/* 804 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 805 * are !pte_none() && !pte_present(). 806 * 807 * Format of swap PTE: 808 * bit 0: _PAGE_PRESENT (zero) 809 * bit 1 to 3: _PAGE_LEAF (zero) 810 * bit 5: _PAGE_PROT_NONE (zero) 811 * bit 6: exclusive marker 812 * bits 7 to 11: swap type 813 * bits 12 to XLEN-1: swap offset 814 */ 815#define __SWP_TYPE_SHIFT 7 816#define __SWP_TYPE_BITS 5 817#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 818#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 819 820#define MAX_SWAPFILES_CHECK() \ 821 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 822 823#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 824#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 825#define __swp_entry(type, offset) ((swp_entry_t) \ 826 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \ 827 ((offset) << __SWP_OFFSET_SHIFT) }) 828 829#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 830#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 831 832static inline int pte_swp_exclusive(pte_t pte) 833{ 834 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 835} 836 837static inline pte_t pte_swp_mkexclusive(pte_t pte) 838{ 839 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); 840} 841 842static inline pte_t pte_swp_clear_exclusive(pte_t pte) 843{ 844 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); 845} 846 847#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 848#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 849#define __swp_entry_to_pmd(swp) __pmd((swp).val) 850#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 851 852/* 853 * In the RV64 Linux scheme, we give the user half of the virtual-address space 854 * and give the kernel the other (upper) half. 855 */ 856#ifdef CONFIG_64BIT 857#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) 858#else 859#define KERN_VIRT_START FIXADDR_START 860#endif 861 862/* 863 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 864 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 865 * Task size is: 866 * - 0x9fc00000 (~2.5GB) for RV32. 867 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu 868 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu 869 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu 870 * 871 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V 872 * Instruction Set Manual Volume II: Privileged Architecture" states that 873 * "load and store effective addresses, which are 64bits, must have bits 874 * 63���48 all equal to bit 47, or else a page-fault exception will occur." 875 * Similarly for SV57, bits 63���57 must be equal to bit 56. 876 */ 877#ifdef CONFIG_64BIT 878#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) 879#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) 880 881#ifdef CONFIG_COMPAT 882#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) 883#define TASK_SIZE (is_compat_task() ? \ 884 TASK_SIZE_32 : TASK_SIZE_64) 885#else 886#define TASK_SIZE TASK_SIZE_64 887#endif 888 889#else 890#define TASK_SIZE FIXADDR_START 891#define TASK_SIZE_MIN TASK_SIZE 892#endif 893 894#else /* CONFIG_MMU */ 895 896#define PAGE_SHARED __pgprot(0) 897#define PAGE_KERNEL __pgprot(0) 898#define swapper_pg_dir NULL 899#define TASK_SIZE 0xffffffffUL 900#define VMALLOC_START _AC(0, UL) 901#define VMALLOC_END TASK_SIZE 902 903#endif /* !CONFIG_MMU */ 904 905extern char _start[]; 906extern void *_dtb_early_va; 907extern uintptr_t _dtb_early_pa; 908#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) 909#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) 910#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) 911#else 912#define dtb_early_va _dtb_early_va 913#define dtb_early_pa _dtb_early_pa 914#endif /* CONFIG_XIP_KERNEL */ 915extern u64 satp_mode; 916 917void paging_init(void); 918void misc_mem_init(void); 919 920/* 921 * ZERO_PAGE is a global shared page that is always zero, 922 * used for zero-mapped memory areas, etc. 923 */ 924extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 925#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 926 927#endif /* !__ASSEMBLY__ */ 928 929#endif /* _ASM_RISCV_PGTABLE_H */ 930