1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (weigand@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "include/asm-i386/pgtable.h" 10 */ 11 12#ifndef _ASM_S390_PGTABLE_H 13#define _ASM_S390_PGTABLE_H 14 15#include <linux/sched.h> 16#include <linux/mm_types.h> 17#include <linux/page-flags.h> 18#include <linux/radix-tree.h> 19#include <linux/atomic.h> 20#include <asm/sections.h> 21#include <asm/ctlreg.h> 22#include <asm/bug.h> 23#include <asm/page.h> 24#include <asm/uv.h> 25 26extern pgd_t swapper_pg_dir[]; 27extern pgd_t invalid_pg_dir[]; 28extern void paging_init(void); 29extern struct ctlreg s390_invalid_asce; 30 31enum { 32 PG_DIRECT_MAP_4K = 0, 33 PG_DIRECT_MAP_1M, 34 PG_DIRECT_MAP_2G, 35 PG_DIRECT_MAP_MAX 36}; 37 38extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]); 39 40static inline void update_page_count(int level, long count) 41{ 42 if (IS_ENABLED(CONFIG_PROC_FS)) 43 atomic_long_add(count, &direct_pages_count[level]); 44} 45 46/* 47 * The S390 doesn't have any external MMU info: the kernel page 48 * tables contain all the necessary information. 49 */ 50#define update_mmu_cache(vma, address, ptep) do { } while (0) 51#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0) 52#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 53 54/* 55 * ZERO_PAGE is a global shared page that is always zero; used 56 * for zero-mapped memory areas etc.. 57 */ 58 59extern unsigned long empty_zero_page; 60extern unsigned long zero_page_mask; 61 62#define ZERO_PAGE(vaddr) \ 63 (virt_to_page((void *)(empty_zero_page + \ 64 (((unsigned long)(vaddr)) &zero_page_mask)))) 65#define __HAVE_COLOR_ZERO_PAGE 66 67/* TODO: s390 cannot support io_remap_pfn_range... */ 68 69#define pte_ERROR(e) \ 70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 71#define pmd_ERROR(e) \ 72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 73#define pud_ERROR(e) \ 74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 75#define p4d_ERROR(e) \ 76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e)) 77#define pgd_ERROR(e) \ 78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 79 80/* 81 * The vmalloc and module area will always be on the topmost area of the 82 * kernel mapping. 512GB are reserved for vmalloc by default. 83 * At the top of the vmalloc area a 2GB area is reserved where modules 84 * will reside. That makes sure that inter module branches always 85 * happen without trampolines and in addition the placement within a 86 * 2GB frame is branch prediction unit friendly. 87 */ 88extern unsigned long __bootdata_preserved(VMALLOC_START); 89extern unsigned long __bootdata_preserved(VMALLOC_END); 90#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN) 91extern struct page *__bootdata_preserved(vmemmap); 92extern unsigned long __bootdata_preserved(vmemmap_size); 93 94extern unsigned long __bootdata_preserved(MODULES_VADDR); 95extern unsigned long __bootdata_preserved(MODULES_END); 96#define MODULES_VADDR MODULES_VADDR 97#define MODULES_END MODULES_END 98#define MODULES_LEN (1UL << 31) 99 100static inline int is_module_addr(void *addr) 101{ 102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 103 if (addr < (void *)MODULES_VADDR) 104 return 0; 105 if (addr > (void *)MODULES_END) 106 return 0; 107 return 1; 108} 109 110/* 111 * A 64 bit pagetable entry of S390 has following format: 112 * | PFRA |0IPC| OS | 113 * 0000000000111111111122222222223333333333444444444455555555556666 114 * 0123456789012345678901234567890123456789012345678901234567890123 115 * 116 * I Page-Invalid Bit: Page is not available for address-translation 117 * P Page-Protection Bit: Store access not possible for page 118 * C Change-bit override: HW is not required to set change bit 119 * 120 * A 64 bit segmenttable entry of S390 has following format: 121 * | P-table origin | TT 122 * 0000000000111111111122222222223333333333444444444455555555556666 123 * 0123456789012345678901234567890123456789012345678901234567890123 124 * 125 * I Segment-Invalid Bit: Segment is not available for address-translation 126 * C Common-Segment Bit: Segment is not private (PoP 3-30) 127 * P Page-Protection Bit: Store access not possible for page 128 * TT Type 00 129 * 130 * A 64 bit region table entry of S390 has following format: 131 * | S-table origin | TF TTTL 132 * 0000000000111111111122222222223333333333444444444455555555556666 133 * 0123456789012345678901234567890123456789012345678901234567890123 134 * 135 * I Segment-Invalid Bit: Segment is not available for address-translation 136 * TT Type 01 137 * TF 138 * TL Table length 139 * 140 * The 64 bit regiontable origin of S390 has following format: 141 * | region table origon | DTTL 142 * 0000000000111111111122222222223333333333444444444455555555556666 143 * 0123456789012345678901234567890123456789012345678901234567890123 144 * 145 * X Space-Switch event: 146 * G Segment-Invalid Bit: 147 * P Private-Space Bit: 148 * S Storage-Alteration: 149 * R Real space 150 * TL Table-Length: 151 * 152 * A storage key has the following format: 153 * | ACC |F|R|C|0| 154 * 0 3 4 5 6 7 155 * ACC: access key 156 * F : fetch protection bit 157 * R : referenced bit 158 * C : changed bit 159 */ 160 161/* Hardware bits in the page table entry */ 162#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ 163#define _PAGE_PROTECT 0x200 /* HW read-only bit */ 164#define _PAGE_INVALID 0x400 /* HW invalid bit */ 165#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 166 167/* Software bits in the page table entry */ 168#define _PAGE_PRESENT 0x001 /* SW pte present bit */ 169#define _PAGE_YOUNG 0x004 /* SW pte young bit */ 170#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 171#define _PAGE_READ 0x010 /* SW pte read bit */ 172#define _PAGE_WRITE 0x020 /* SW pte write bit */ 173#define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 174#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 175 176#ifdef CONFIG_MEM_SOFT_DIRTY 177#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 178#else 179#define _PAGE_SOFT_DIRTY 0x000 180#endif 181 182#define _PAGE_SW_BITS 0xffUL /* All SW bits */ 183 184#define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */ 185 186/* Set of bits not changed in pte_modify */ 187#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 188 _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 189 190/* 191 * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT 192 * HW bit and all SW bits. 193 */ 194#define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS) 195 196/* 197 * handle_pte_fault uses pte_present and pte_none to find out the pte type 198 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to 199 * distinguish present from not-present ptes. It is changed only with the page 200 * table lock held. 201 * 202 * The following table gives the different possible bit combinations for 203 * the pte hardware and software bits in the last 12 bits of a pte 204 * (. unassigned bit, x don't care, t swap type): 205 * 206 * 842100000000 207 * 000084210000 208 * 000000008421 209 * .IR.uswrdy.p 210 * empty .10.00000000 211 * swap .11..ttttt.0 212 * prot-none, clean, old .11.xx0000.1 213 * prot-none, clean, young .11.xx0001.1 214 * prot-none, dirty, old .11.xx0010.1 215 * prot-none, dirty, young .11.xx0011.1 216 * read-only, clean, old .11.xx0100.1 217 * read-only, clean, young .01.xx0101.1 218 * read-only, dirty, old .11.xx0110.1 219 * read-only, dirty, young .01.xx0111.1 220 * read-write, clean, old .11.xx1100.1 221 * read-write, clean, young .01.xx1101.1 222 * read-write, dirty, old .10.xx1110.1 223 * read-write, dirty, young .00.xx1111.1 224 * HW-bits: R read-only, I invalid 225 * SW-bits: p present, y young, d dirty, r read, w write, s special, 226 * u unused, l large 227 * 228 * pte_none is true for the bit pattern .10.00000000, pte == 0x400 229 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 230 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 231 */ 232 233/* Bits in the segment/region table address-space-control-element */ 234#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ 235#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 236#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 237#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 238#define _ASCE_REAL_SPACE 0x20 /* real space control */ 239#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 240#define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 241#define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 242#define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 243#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 244#define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 245 246/* Bits in the region table entry */ 247#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 248#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 249#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ 250#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ 251#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 252#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */ 253#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 254#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 255#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 256#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 257 258#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 259#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 260#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 261#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 262#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 263#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 264 265#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 266#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 267#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 268#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ 269#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */ 270#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */ 271 272#ifdef CONFIG_MEM_SOFT_DIRTY 273#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ 274#else 275#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 276#endif 277 278#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL 279 280/* Bits in the segment table entry */ 281#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 282#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL 283#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL 284#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 285#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ 286#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ 287#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ 288#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 289#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ 290 291#define _SEGMENT_ENTRY (0) 292#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 293 294#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 295#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 296#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 297#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ 298#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ 299 300#ifdef CONFIG_MEM_SOFT_DIRTY 301#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 302#else 303#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 304#endif 305 306#define _CRST_ENTRIES 2048 /* number of region/segment table entries */ 307#define _PAGE_ENTRIES 256 /* number of page table entries */ 308 309#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) 310#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) 311 312#define _REGION1_SHIFT 53 313#define _REGION2_SHIFT 42 314#define _REGION3_SHIFT 31 315#define _SEGMENT_SHIFT 20 316 317#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) 318#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) 319#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) 320#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) 321#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) 322 323#define _REGION1_SIZE (1UL << _REGION1_SHIFT) 324#define _REGION2_SIZE (1UL << _REGION2_SHIFT) 325#define _REGION3_SIZE (1UL << _REGION3_SHIFT) 326#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) 327 328#define _REGION1_MASK (~(_REGION1_SIZE - 1)) 329#define _REGION2_MASK (~(_REGION2_SIZE - 1)) 330#define _REGION3_MASK (~(_REGION3_SIZE - 1)) 331#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) 332 333#define PMD_SHIFT _SEGMENT_SHIFT 334#define PUD_SHIFT _REGION3_SHIFT 335#define P4D_SHIFT _REGION2_SHIFT 336#define PGDIR_SHIFT _REGION1_SHIFT 337 338#define PMD_SIZE _SEGMENT_SIZE 339#define PUD_SIZE _REGION3_SIZE 340#define P4D_SIZE _REGION2_SIZE 341#define PGDIR_SIZE _REGION1_SIZE 342 343#define PMD_MASK _SEGMENT_MASK 344#define PUD_MASK _REGION3_MASK 345#define P4D_MASK _REGION2_MASK 346#define PGDIR_MASK _REGION1_MASK 347 348#define PTRS_PER_PTE _PAGE_ENTRIES 349#define PTRS_PER_PMD _CRST_ENTRIES 350#define PTRS_PER_PUD _CRST_ENTRIES 351#define PTRS_PER_P4D _CRST_ENTRIES 352#define PTRS_PER_PGD _CRST_ENTRIES 353 354/* 355 * Segment table and region3 table entry encoding 356 * (R = read-only, I = invalid, y = young bit): 357 * dy..R...I...wr 358 * prot-none, clean, old 00..1...1...00 359 * prot-none, clean, young 01..1...1...00 360 * prot-none, dirty, old 10..1...1...00 361 * prot-none, dirty, young 11..1...1...00 362 * read-only, clean, old 00..1...1...01 363 * read-only, clean, young 01..1...0...01 364 * read-only, dirty, old 10..1...1...01 365 * read-only, dirty, young 11..1...0...01 366 * read-write, clean, old 00..1...1...11 367 * read-write, clean, young 01..1...0...11 368 * read-write, dirty, old 10..0...1...11 369 * read-write, dirty, young 11..0...0...11 370 * The segment table origin is used to distinguish empty (origin==0) from 371 * read-write, old segment table entries (origin!=0) 372 * HW-bits: R read-only, I invalid 373 * SW-bits: y young, d dirty, r read, w write 374 */ 375 376/* Page status table bits for virtualization */ 377#define PGSTE_ACC_BITS 0xf000000000000000UL 378#define PGSTE_FP_BIT 0x0800000000000000UL 379#define PGSTE_PCL_BIT 0x0080000000000000UL 380#define PGSTE_HR_BIT 0x0040000000000000UL 381#define PGSTE_HC_BIT 0x0020000000000000UL 382#define PGSTE_GR_BIT 0x0004000000000000UL 383#define PGSTE_GC_BIT 0x0002000000000000UL 384#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 385#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 386#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ 387 388/* Guest Page State used for virtualization */ 389#define _PGSTE_GPS_ZERO 0x0000000080000000UL 390#define _PGSTE_GPS_NODAT 0x0000000040000000UL 391#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 392#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 393#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 394#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL 395#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK 396 397/* 398 * A user page table pointer has the space-switch-event bit, the 399 * private-space-control bit and the storage-alteration-event-control 400 * bit set. A kernel page table pointer doesn't need them. 401 */ 402#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 403 _ASCE_ALT_EVENT) 404 405/* 406 * Page protection definitions. 407 */ 408#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) 409#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 410 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 411#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 412 _PAGE_INVALID | _PAGE_PROTECT) 413#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 414 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 415#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 416 _PAGE_INVALID | _PAGE_PROTECT) 417 418#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 419 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 420#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 421 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 422#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 423 _PAGE_PROTECT | _PAGE_NOEXEC) 424#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 425 _PAGE_YOUNG | _PAGE_DIRTY) 426 427/* 428 * On s390 the page table entry has an invalid bit and a read-only bit. 429 * Read permission implies execute permission and write permission 430 * implies read permission. 431 */ 432 /*xwr*/ 433 434/* 435 * Segment entry (large page) protection definitions. 436 */ 437#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 438 _SEGMENT_ENTRY_PROTECT) 439#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ 440 _SEGMENT_ENTRY_READ | \ 441 _SEGMENT_ENTRY_NOEXEC) 442#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ 443 _SEGMENT_ENTRY_READ) 444#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ 445 _SEGMENT_ENTRY_WRITE | \ 446 _SEGMENT_ENTRY_NOEXEC) 447#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ 448 _SEGMENT_ENTRY_WRITE) 449#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ 450 _SEGMENT_ENTRY_LARGE | \ 451 _SEGMENT_ENTRY_READ | \ 452 _SEGMENT_ENTRY_WRITE | \ 453 _SEGMENT_ENTRY_YOUNG | \ 454 _SEGMENT_ENTRY_DIRTY | \ 455 _SEGMENT_ENTRY_NOEXEC) 456#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ 457 _SEGMENT_ENTRY_LARGE | \ 458 _SEGMENT_ENTRY_READ | \ 459 _SEGMENT_ENTRY_YOUNG | \ 460 _SEGMENT_ENTRY_PROTECT | \ 461 _SEGMENT_ENTRY_NOEXEC) 462#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \ 463 _SEGMENT_ENTRY_LARGE | \ 464 _SEGMENT_ENTRY_READ | \ 465 _SEGMENT_ENTRY_WRITE | \ 466 _SEGMENT_ENTRY_YOUNG | \ 467 _SEGMENT_ENTRY_DIRTY) 468 469/* 470 * Region3 entry (large page) protection definitions. 471 */ 472 473#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ 474 _REGION3_ENTRY_LARGE | \ 475 _REGION3_ENTRY_READ | \ 476 _REGION3_ENTRY_WRITE | \ 477 _REGION3_ENTRY_YOUNG | \ 478 _REGION3_ENTRY_DIRTY | \ 479 _REGION_ENTRY_NOEXEC) 480#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ 481 _REGION3_ENTRY_LARGE | \ 482 _REGION3_ENTRY_READ | \ 483 _REGION3_ENTRY_YOUNG | \ 484 _REGION_ENTRY_PROTECT | \ 485 _REGION_ENTRY_NOEXEC) 486#define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \ 487 _REGION3_ENTRY_LARGE | \ 488 _REGION3_ENTRY_READ | \ 489 _REGION3_ENTRY_WRITE | \ 490 _REGION3_ENTRY_YOUNG | \ 491 _REGION3_ENTRY_DIRTY) 492 493static inline bool mm_p4d_folded(struct mm_struct *mm) 494{ 495 return mm->context.asce_limit <= _REGION1_SIZE; 496} 497#define mm_p4d_folded(mm) mm_p4d_folded(mm) 498 499static inline bool mm_pud_folded(struct mm_struct *mm) 500{ 501 return mm->context.asce_limit <= _REGION2_SIZE; 502} 503#define mm_pud_folded(mm) mm_pud_folded(mm) 504 505static inline bool mm_pmd_folded(struct mm_struct *mm) 506{ 507 return mm->context.asce_limit <= _REGION3_SIZE; 508} 509#define mm_pmd_folded(mm) mm_pmd_folded(mm) 510 511static inline int mm_has_pgste(struct mm_struct *mm) 512{ 513#ifdef CONFIG_PGSTE 514 if (unlikely(mm->context.has_pgste)) 515 return 1; 516#endif 517 return 0; 518} 519 520static inline int mm_is_protected(struct mm_struct *mm) 521{ 522#ifdef CONFIG_PGSTE 523 if (unlikely(atomic_read(&mm->context.protected_count))) 524 return 1; 525#endif 526 return 0; 527} 528 529static inline int mm_alloc_pgste(struct mm_struct *mm) 530{ 531#ifdef CONFIG_PGSTE 532 if (unlikely(mm->context.alloc_pgste)) 533 return 1; 534#endif 535 return 0; 536} 537 538static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 539{ 540 return __pte(pte_val(pte) & ~pgprot_val(prot)); 541} 542 543static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 544{ 545 return __pte(pte_val(pte) | pgprot_val(prot)); 546} 547 548static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 549{ 550 return __pmd(pmd_val(pmd) & ~pgprot_val(prot)); 551} 552 553static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 554{ 555 return __pmd(pmd_val(pmd) | pgprot_val(prot)); 556} 557 558static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot) 559{ 560 return __pud(pud_val(pud) & ~pgprot_val(prot)); 561} 562 563static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot) 564{ 565 return __pud(pud_val(pud) | pgprot_val(prot)); 566} 567 568/* 569 * In the case that a guest uses storage keys 570 * faults should no longer be backed by zero pages 571 */ 572#define mm_forbids_zeropage mm_has_pgste 573static inline int mm_uses_skeys(struct mm_struct *mm) 574{ 575#ifdef CONFIG_PGSTE 576 if (mm->context.uses_skeys) 577 return 1; 578#endif 579 return 0; 580} 581 582static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 583{ 584 union register_pair r1 = { .even = old, .odd = new, }; 585 unsigned long address = (unsigned long)ptr | 1; 586 587 asm volatile( 588 " csp %[r1],%[address]" 589 : [r1] "+&d" (r1.pair), "+m" (*ptr) 590 : [address] "d" (address) 591 : "cc"); 592} 593 594static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) 595{ 596 union register_pair r1 = { .even = old, .odd = new, }; 597 unsigned long address = (unsigned long)ptr | 1; 598 599 asm volatile( 600 " cspg %[r1],%[address]" 601 : [r1] "+&d" (r1.pair), "+m" (*ptr) 602 : [address] "d" (address) 603 : "cc"); 604} 605 606#define CRDTE_DTT_PAGE 0x00UL 607#define CRDTE_DTT_SEGMENT 0x10UL 608#define CRDTE_DTT_REGION3 0x14UL 609#define CRDTE_DTT_REGION2 0x18UL 610#define CRDTE_DTT_REGION1 0x1cUL 611 612static inline void crdte(unsigned long old, unsigned long new, 613 unsigned long *table, unsigned long dtt, 614 unsigned long address, unsigned long asce) 615{ 616 union register_pair r1 = { .even = old, .odd = new, }; 617 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, }; 618 619 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0" 620 : [r1] "+&d" (r1.pair) 621 : [r2] "d" (r2.pair), [asce] "a" (asce) 622 : "memory", "cc"); 623} 624 625/* 626 * pgd/p4d/pud/pmd/pte query functions 627 */ 628static inline int pgd_folded(pgd_t pgd) 629{ 630 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; 631} 632 633static inline int pgd_present(pgd_t pgd) 634{ 635 if (pgd_folded(pgd)) 636 return 1; 637 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 638} 639 640static inline int pgd_none(pgd_t pgd) 641{ 642 if (pgd_folded(pgd)) 643 return 0; 644 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 645} 646 647static inline int pgd_bad(pgd_t pgd) 648{ 649 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1) 650 return 0; 651 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0; 652} 653 654static inline unsigned long pgd_pfn(pgd_t pgd) 655{ 656 unsigned long origin_mask; 657 658 origin_mask = _REGION_ENTRY_ORIGIN; 659 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT; 660} 661 662static inline int p4d_folded(p4d_t p4d) 663{ 664 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; 665} 666 667static inline int p4d_present(p4d_t p4d) 668{ 669 if (p4d_folded(p4d)) 670 return 1; 671 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; 672} 673 674static inline int p4d_none(p4d_t p4d) 675{ 676 if (p4d_folded(p4d)) 677 return 0; 678 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; 679} 680 681static inline unsigned long p4d_pfn(p4d_t p4d) 682{ 683 unsigned long origin_mask; 684 685 origin_mask = _REGION_ENTRY_ORIGIN; 686 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; 687} 688 689static inline int pud_folded(pud_t pud) 690{ 691 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; 692} 693 694static inline int pud_present(pud_t pud) 695{ 696 if (pud_folded(pud)) 697 return 1; 698 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 699} 700 701static inline int pud_none(pud_t pud) 702{ 703 if (pud_folded(pud)) 704 return 0; 705 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 706} 707 708#define pud_leaf pud_leaf 709static inline bool pud_leaf(pud_t pud) 710{ 711 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 712 return 0; 713 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 714} 715 716#define pmd_leaf pmd_leaf 717static inline bool pmd_leaf(pmd_t pmd) 718{ 719 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 720} 721 722static inline int pmd_bad(pmd_t pmd) 723{ 724 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd)) 725 return 1; 726 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 727} 728 729static inline int pud_bad(pud_t pud) 730{ 731 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; 732 733 if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud)) 734 return 1; 735 if (type < _REGION_ENTRY_TYPE_R3) 736 return 0; 737 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 738} 739 740static inline int p4d_bad(p4d_t p4d) 741{ 742 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK; 743 744 if (type > _REGION_ENTRY_TYPE_R2) 745 return 1; 746 if (type < _REGION_ENTRY_TYPE_R2) 747 return 0; 748 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; 749} 750 751static inline int pmd_present(pmd_t pmd) 752{ 753 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; 754} 755 756static inline int pmd_none(pmd_t pmd) 757{ 758 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; 759} 760 761#define pmd_write pmd_write 762static inline int pmd_write(pmd_t pmd) 763{ 764 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 765} 766 767#define pud_write pud_write 768static inline int pud_write(pud_t pud) 769{ 770 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; 771} 772 773#define pmd_dirty pmd_dirty 774static inline int pmd_dirty(pmd_t pmd) 775{ 776 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 777} 778 779#define pmd_young pmd_young 780static inline int pmd_young(pmd_t pmd) 781{ 782 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 783} 784 785static inline int pte_present(pte_t pte) 786{ 787 /* Bit pattern: (pte & 0x001) == 0x001 */ 788 return (pte_val(pte) & _PAGE_PRESENT) != 0; 789} 790 791static inline int pte_none(pte_t pte) 792{ 793 /* Bit pattern: pte == 0x400 */ 794 return pte_val(pte) == _PAGE_INVALID; 795} 796 797static inline int pte_swap(pte_t pte) 798{ 799 /* Bit pattern: (pte & 0x201) == 0x200 */ 800 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) 801 == _PAGE_PROTECT; 802} 803 804static inline int pte_special(pte_t pte) 805{ 806 return (pte_val(pte) & _PAGE_SPECIAL); 807} 808 809#define __HAVE_ARCH_PTE_SAME 810static inline int pte_same(pte_t a, pte_t b) 811{ 812 return pte_val(a) == pte_val(b); 813} 814 815#ifdef CONFIG_NUMA_BALANCING 816static inline int pte_protnone(pte_t pte) 817{ 818 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); 819} 820 821static inline int pmd_protnone(pmd_t pmd) 822{ 823 /* pmd_leaf(pmd) implies pmd_present(pmd) */ 824 return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 825} 826#endif 827 828static inline int pte_swp_exclusive(pte_t pte) 829{ 830 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 831} 832 833static inline pte_t pte_swp_mkexclusive(pte_t pte) 834{ 835 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); 836} 837 838static inline pte_t pte_swp_clear_exclusive(pte_t pte) 839{ 840 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE)); 841} 842 843static inline int pte_soft_dirty(pte_t pte) 844{ 845 return pte_val(pte) & _PAGE_SOFT_DIRTY; 846} 847#define pte_swp_soft_dirty pte_soft_dirty 848 849static inline pte_t pte_mksoft_dirty(pte_t pte) 850{ 851 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); 852} 853#define pte_swp_mksoft_dirty pte_mksoft_dirty 854 855static inline pte_t pte_clear_soft_dirty(pte_t pte) 856{ 857 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); 858} 859#define pte_swp_clear_soft_dirty pte_clear_soft_dirty 860 861static inline int pmd_soft_dirty(pmd_t pmd) 862{ 863 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 864} 865 866static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 867{ 868 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); 869} 870 871static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 872{ 873 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); 874} 875 876/* 877 * query functions pte_write/pte_dirty/pte_young only work if 878 * pte_present() is true. Undefined behaviour if not.. 879 */ 880static inline int pte_write(pte_t pte) 881{ 882 return (pte_val(pte) & _PAGE_WRITE) != 0; 883} 884 885static inline int pte_dirty(pte_t pte) 886{ 887 return (pte_val(pte) & _PAGE_DIRTY) != 0; 888} 889 890static inline int pte_young(pte_t pte) 891{ 892 return (pte_val(pte) & _PAGE_YOUNG) != 0; 893} 894 895#define __HAVE_ARCH_PTE_UNUSED 896static inline int pte_unused(pte_t pte) 897{ 898 return pte_val(pte) & _PAGE_UNUSED; 899} 900 901/* 902 * Extract the pgprot value from the given pte while at the same time making it 903 * usable for kernel address space mappings where fault driven dirty and 904 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID 905 * must not be set. 906 */ 907static inline pgprot_t pte_pgprot(pte_t pte) 908{ 909 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK; 910 911 if (pte_write(pte)) 912 pte_flags |= pgprot_val(PAGE_KERNEL); 913 else 914 pte_flags |= pgprot_val(PAGE_KERNEL_RO); 915 pte_flags |= pte_val(pte) & mio_wb_bit_mask; 916 917 return __pgprot(pte_flags); 918} 919 920/* 921 * pgd/pmd/pte modification functions 922 */ 923 924static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 925{ 926 WRITE_ONCE(*pgdp, pgd); 927} 928 929static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 930{ 931 WRITE_ONCE(*p4dp, p4d); 932} 933 934static inline void set_pud(pud_t *pudp, pud_t pud) 935{ 936 WRITE_ONCE(*pudp, pud); 937} 938 939static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 940{ 941 WRITE_ONCE(*pmdp, pmd); 942} 943 944static inline void set_pte(pte_t *ptep, pte_t pte) 945{ 946 WRITE_ONCE(*ptep, pte); 947} 948 949static inline void pgd_clear(pgd_t *pgd) 950{ 951 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 952 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY)); 953} 954 955static inline void p4d_clear(p4d_t *p4d) 956{ 957 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 958 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY)); 959} 960 961static inline void pud_clear(pud_t *pud) 962{ 963 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 964 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY)); 965} 966 967static inline void pmd_clear(pmd_t *pmdp) 968{ 969 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 970} 971 972static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 973{ 974 set_pte(ptep, __pte(_PAGE_INVALID)); 975} 976 977/* 978 * The following pte modification functions only work if 979 * pte_present() is true. Undefined behaviour if not.. 980 */ 981static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 982{ 983 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK)); 984 pte = set_pte_bit(pte, newprot); 985 /* 986 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX 987 * has the invalid bit set, clear it again for readable, young pages 988 */ 989 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 990 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); 991 /* 992 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page 993 * protection bit set, clear it again for writable, dirty pages 994 */ 995 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 996 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 997 return pte; 998} 999 1000static inline pte_t pte_wrprotect(pte_t pte) 1001{ 1002 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE)); 1003 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 1004} 1005 1006static inline pte_t pte_mkwrite_novma(pte_t pte) 1007{ 1008 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE)); 1009 if (pte_val(pte) & _PAGE_DIRTY) 1010 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 1011 return pte; 1012} 1013 1014static inline pte_t pte_mkclean(pte_t pte) 1015{ 1016 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY)); 1017 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 1018} 1019 1020static inline pte_t pte_mkdirty(pte_t pte) 1021{ 1022 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); 1023 if (pte_val(pte) & _PAGE_WRITE) 1024 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 1025 return pte; 1026} 1027 1028static inline pte_t pte_mkold(pte_t pte) 1029{ 1030 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG)); 1031 return set_pte_bit(pte, __pgprot(_PAGE_INVALID)); 1032} 1033 1034static inline pte_t pte_mkyoung(pte_t pte) 1035{ 1036 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG)); 1037 if (pte_val(pte) & _PAGE_READ) 1038 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); 1039 return pte; 1040} 1041 1042static inline pte_t pte_mkspecial(pte_t pte) 1043{ 1044 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL)); 1045} 1046 1047#ifdef CONFIG_HUGETLB_PAGE 1048static inline pte_t pte_mkhuge(pte_t pte) 1049{ 1050 return set_pte_bit(pte, __pgprot(_PAGE_LARGE)); 1051} 1052#endif 1053 1054#define IPTE_GLOBAL 0 1055#define IPTE_LOCAL 1 1056 1057#define IPTE_NODAT 0x400 1058#define IPTE_GUEST_ASCE 0x800 1059 1060static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, 1061 unsigned long opt, unsigned long asce, 1062 int local) 1063{ 1064 unsigned long pto; 1065 1066 pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1); 1067 asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]" 1068 : "+m" (*ptep) 1069 : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt), 1070 [asce] "a" (asce), [m4] "i" (local)); 1071} 1072 1073static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, 1074 unsigned long opt, unsigned long asce, 1075 int local) 1076{ 1077 unsigned long pto = __pa(ptep); 1078 1079 if (__builtin_constant_p(opt) && opt == 0) { 1080 /* Invalidation + TLB flush for the pte */ 1081 asm volatile( 1082 " ipte %[r1],%[r2],0,%[m4]" 1083 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), 1084 [m4] "i" (local)); 1085 return; 1086 } 1087 1088 /* Invalidate ptes with options + TLB flush of the ptes */ 1089 opt = opt | (asce & _ASCE_ORIGIN); 1090 asm volatile( 1091 " ipte %[r1],%[r2],%[r3],%[m4]" 1092 : [r2] "+a" (address), [r3] "+a" (opt) 1093 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1094} 1095 1096static __always_inline void __ptep_ipte_range(unsigned long address, int nr, 1097 pte_t *ptep, int local) 1098{ 1099 unsigned long pto = __pa(ptep); 1100 1101 /* Invalidate a range of ptes + TLB flush of the ptes */ 1102 do { 1103 asm volatile( 1104 " ipte %[r1],%[r2],%[r3],%[m4]" 1105 : [r2] "+a" (address), [r3] "+a" (nr) 1106 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1107 } while (nr != 255); 1108} 1109 1110/* 1111 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1112 * both clear the TLB for the unmapped pte. The reason is that 1113 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1114 * to modify an active pte. The sequence is 1115 * 1) ptep_get_and_clear 1116 * 2) set_pte_at 1117 * 3) flush_tlb_range 1118 * On s390 the tlb needs to get flushed with the modification of the pte 1119 * if the pte is active. The only way how this can be implemented is to 1120 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1121 * is a nop. 1122 */ 1123pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); 1124pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); 1125 1126#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1127static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1128 unsigned long addr, pte_t *ptep) 1129{ 1130 pte_t pte = *ptep; 1131 1132 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); 1133 return pte_young(pte); 1134} 1135 1136#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1137static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1138 unsigned long address, pte_t *ptep) 1139{ 1140 return ptep_test_and_clear_young(vma, address, ptep); 1141} 1142 1143#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1144static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1145 unsigned long addr, pte_t *ptep) 1146{ 1147 pte_t res; 1148 1149 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1150 /* At this point the reference through the mapping is still present */ 1151 if (mm_is_protected(mm) && pte_present(res)) 1152 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); 1153 return res; 1154} 1155 1156#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1157pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); 1158void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, 1159 pte_t *, pte_t, pte_t); 1160 1161#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1162static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1163 unsigned long addr, pte_t *ptep) 1164{ 1165 pte_t res; 1166 1167 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1168 /* At this point the reference through the mapping is still present */ 1169 if (mm_is_protected(vma->vm_mm) && pte_present(res)) 1170 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); 1171 return res; 1172} 1173 1174/* 1175 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1176 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1177 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1178 * cannot be accessed while the batched unmap is running. In this case 1179 * full==1 and a simple pte_clear is enough. See tlb.h. 1180 */ 1181#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1182static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1183 unsigned long addr, 1184 pte_t *ptep, int full) 1185{ 1186 pte_t res; 1187 1188 if (full) { 1189 res = *ptep; 1190 set_pte(ptep, __pte(_PAGE_INVALID)); 1191 } else { 1192 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1193 } 1194 /* Nothing to do */ 1195 if (!mm_is_protected(mm) || !pte_present(res)) 1196 return res; 1197 /* 1198 * At this point the reference through the mapping is still present. 1199 * The notifier should have destroyed all protected vCPUs at this 1200 * point, so the destroy should be successful. 1201 */ 1202 if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK)) 1203 return res; 1204 /* 1205 * If something went wrong and the page could not be destroyed, or 1206 * if this is not a mm teardown, the slower export is used as 1207 * fallback instead. 1208 */ 1209 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); 1210 return res; 1211} 1212 1213#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1214static inline void ptep_set_wrprotect(struct mm_struct *mm, 1215 unsigned long addr, pte_t *ptep) 1216{ 1217 pte_t pte = *ptep; 1218 1219 if (pte_write(pte)) 1220 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); 1221} 1222 1223/* 1224 * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE 1225 * bits in the comparison. Those might change e.g. because of dirty and young 1226 * tracking. 1227 */ 1228static inline int pte_allow_rdp(pte_t old, pte_t new) 1229{ 1230 /* 1231 * Only allow changes from RO to RW 1232 */ 1233 if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT) 1234 return 0; 1235 1236 return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK); 1237} 1238 1239static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, 1240 unsigned long address, 1241 pte_t *ptep) 1242{ 1243 /* 1244 * RDP might not have propagated the PTE protection reset to all CPUs, 1245 * so there could be spurious TLB protection faults. 1246 * NOTE: This will also be called when a racing pagetable update on 1247 * another thread already installed the correct PTE. Both cases cannot 1248 * really be distinguished. 1249 * Therefore, only do the local TLB flush when RDP can be used, and the 1250 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead. 1251 * A local RDP can be used to do the flush. 1252 */ 1253 if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT)) 1254 __ptep_rdp(address, ptep, 0, 0, 1); 1255} 1256#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault 1257 1258void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 1259 pte_t new); 1260 1261#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1262static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1263 unsigned long addr, pte_t *ptep, 1264 pte_t entry, int dirty) 1265{ 1266 if (pte_same(*ptep, entry)) 1267 return 0; 1268 if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) 1269 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry); 1270 else 1271 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); 1272 return 1; 1273} 1274 1275/* 1276 * Additional functions to handle KVM guest page tables 1277 */ 1278void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, 1279 pte_t *ptep, pte_t entry); 1280void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1281void ptep_notify(struct mm_struct *mm, unsigned long addr, 1282 pte_t *ptep, unsigned long bits); 1283int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, 1284 pte_t *ptep, int prot, unsigned long bit); 1285void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, 1286 pte_t *ptep , int reset); 1287void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1288int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 1289 pte_t *sptep, pte_t *tptep, pte_t pte); 1290void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); 1291 1292bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address, 1293 pte_t *ptep); 1294int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1295 unsigned char key, bool nq); 1296int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1297 unsigned char key, unsigned char *oldkey, 1298 bool nq, bool mr, bool mc); 1299int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); 1300int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1301 unsigned char *key); 1302 1303int set_pgste_bits(struct mm_struct *mm, unsigned long addr, 1304 unsigned long bits, unsigned long value); 1305int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1306int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1307 unsigned long *oldpte, unsigned long *oldpgste); 1308void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); 1309void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); 1310void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); 1311void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); 1312 1313#define pgprot_writecombine pgprot_writecombine 1314pgprot_t pgprot_writecombine(pgprot_t prot); 1315 1316#define pgprot_writethrough pgprot_writethrough 1317pgprot_t pgprot_writethrough(pgprot_t prot); 1318 1319#define PFN_PTE_SHIFT PAGE_SHIFT 1320 1321/* 1322 * Set multiple PTEs to consecutive pages with a single call. All PTEs 1323 * are within the same folio, PMD and VMA. 1324 */ 1325static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1326 pte_t *ptep, pte_t entry, unsigned int nr) 1327{ 1328 if (pte_present(entry)) 1329 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED)); 1330 if (mm_has_pgste(mm)) { 1331 for (;;) { 1332 ptep_set_pte_at(mm, addr, ptep, entry); 1333 if (--nr == 0) 1334 break; 1335 ptep++; 1336 entry = __pte(pte_val(entry) + PAGE_SIZE); 1337 addr += PAGE_SIZE; 1338 } 1339 } else { 1340 for (;;) { 1341 set_pte(ptep, entry); 1342 if (--nr == 0) 1343 break; 1344 ptep++; 1345 entry = __pte(pte_val(entry) + PAGE_SIZE); 1346 } 1347 } 1348} 1349#define set_ptes set_ptes 1350 1351/* 1352 * Conversion functions: convert a page and protection to a page entry, 1353 * and a page entry and page directory to the page they refer to. 1354 */ 1355static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1356{ 1357 pte_t __pte; 1358 1359 __pte = __pte(physpage | pgprot_val(pgprot)); 1360 if (!MACHINE_HAS_NX) 1361 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC)); 1362 return pte_mkyoung(__pte); 1363} 1364 1365static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1366{ 1367 unsigned long physpage = page_to_phys(page); 1368 pte_t __pte = mk_pte_phys(physpage, pgprot); 1369 1370 if (pte_write(__pte) && PageDirty(page)) 1371 __pte = pte_mkdirty(__pte); 1372 return __pte; 1373} 1374 1375#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1376#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) 1377#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1378#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1379 1380#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN)) 1381#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN)) 1382 1383static inline unsigned long pmd_deref(pmd_t pmd) 1384{ 1385 unsigned long origin_mask; 1386 1387 origin_mask = _SEGMENT_ENTRY_ORIGIN; 1388 if (pmd_leaf(pmd)) 1389 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 1390 return (unsigned long)__va(pmd_val(pmd) & origin_mask); 1391} 1392 1393static inline unsigned long pmd_pfn(pmd_t pmd) 1394{ 1395 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT; 1396} 1397 1398static inline unsigned long pud_deref(pud_t pud) 1399{ 1400 unsigned long origin_mask; 1401 1402 origin_mask = _REGION_ENTRY_ORIGIN; 1403 if (pud_leaf(pud)) 1404 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 1405 return (unsigned long)__va(pud_val(pud) & origin_mask); 1406} 1407 1408static inline unsigned long pud_pfn(pud_t pud) 1409{ 1410 return __pa(pud_deref(pud)) >> PAGE_SHIFT; 1411} 1412 1413/* 1414 * The pgd_offset function *always* adds the index for the top-level 1415 * region/segment table. This is done to get a sequence like the 1416 * following to work: 1417 * pgdp = pgd_offset(current->mm, addr); 1418 * pgd = READ_ONCE(*pgdp); 1419 * p4dp = p4d_offset(&pgd, addr); 1420 * ... 1421 * The subsequent p4d_offset, pud_offset and pmd_offset functions 1422 * only add an index if they dereferenced the pointer. 1423 */ 1424static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) 1425{ 1426 unsigned long rste; 1427 unsigned int shift; 1428 1429 /* Get the first entry of the top level table */ 1430 rste = pgd_val(*pgd); 1431 /* Pick up the shift from the table type of the first entry */ 1432 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20; 1433 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1)); 1434} 1435 1436#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) 1437 1438static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) 1439{ 1440 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) 1441 return (p4d_t *) pgd_deref(pgd) + p4d_index(address); 1442 return (p4d_t *) pgdp; 1443} 1444#define p4d_offset_lockless p4d_offset_lockless 1445 1446static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) 1447{ 1448 return p4d_offset_lockless(pgdp, *pgdp, address); 1449} 1450 1451static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) 1452{ 1453 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) 1454 return (pud_t *) p4d_deref(p4d) + pud_index(address); 1455 return (pud_t *) p4dp; 1456} 1457#define pud_offset_lockless pud_offset_lockless 1458 1459static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) 1460{ 1461 return pud_offset_lockless(p4dp, *p4dp, address); 1462} 1463#define pud_offset pud_offset 1464 1465static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) 1466{ 1467 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) 1468 return (pmd_t *) pud_deref(pud) + pmd_index(address); 1469 return (pmd_t *) pudp; 1470} 1471#define pmd_offset_lockless pmd_offset_lockless 1472 1473static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) 1474{ 1475 return pmd_offset_lockless(pudp, *pudp, address); 1476} 1477#define pmd_offset pmd_offset 1478 1479static inline unsigned long pmd_page_vaddr(pmd_t pmd) 1480{ 1481 return (unsigned long) pmd_deref(pmd); 1482} 1483 1484static inline bool gup_fast_permitted(unsigned long start, unsigned long end) 1485{ 1486 return end <= current->mm->context.asce_limit; 1487} 1488#define gup_fast_permitted gup_fast_permitted 1489 1490#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot)) 1491#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1492#define pte_page(x) pfn_to_page(pte_pfn(x)) 1493 1494#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1495#define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1496#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) 1497#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) 1498 1499static inline pmd_t pmd_wrprotect(pmd_t pmd) 1500{ 1501 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); 1502 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1503} 1504 1505static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 1506{ 1507 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); 1508 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) 1509 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1510 return pmd; 1511} 1512 1513static inline pmd_t pmd_mkclean(pmd_t pmd) 1514{ 1515 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY)); 1516 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1517} 1518 1519static inline pmd_t pmd_mkdirty(pmd_t pmd) 1520{ 1521 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY)); 1522 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1523 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1524 return pmd; 1525} 1526 1527static inline pud_t pud_wrprotect(pud_t pud) 1528{ 1529 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); 1530 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1531} 1532 1533static inline pud_t pud_mkwrite(pud_t pud) 1534{ 1535 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); 1536 if (pud_val(pud) & _REGION3_ENTRY_DIRTY) 1537 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1538 return pud; 1539} 1540 1541static inline pud_t pud_mkclean(pud_t pud) 1542{ 1543 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY)); 1544 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1545} 1546 1547static inline pud_t pud_mkdirty(pud_t pud) 1548{ 1549 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY)); 1550 if (pud_val(pud) & _REGION3_ENTRY_WRITE) 1551 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1552 return pud; 1553} 1554 1555#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1556static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1557{ 1558 /* 1559 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX 1560 * (see __Pxxx / __Sxxx). Convert to segment table entry format. 1561 */ 1562 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1563 return pgprot_val(SEGMENT_NONE); 1564 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1565 return pgprot_val(SEGMENT_RO); 1566 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) 1567 return pgprot_val(SEGMENT_RX); 1568 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) 1569 return pgprot_val(SEGMENT_RW); 1570 return pgprot_val(SEGMENT_RWX); 1571} 1572 1573static inline pmd_t pmd_mkyoung(pmd_t pmd) 1574{ 1575 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); 1576 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1577 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); 1578 return pmd; 1579} 1580 1581static inline pmd_t pmd_mkold(pmd_t pmd) 1582{ 1583 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); 1584 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); 1585} 1586 1587static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1588{ 1589 unsigned long mask; 1590 1591 mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 1592 mask |= _SEGMENT_ENTRY_DIRTY; 1593 mask |= _SEGMENT_ENTRY_YOUNG; 1594 mask |= _SEGMENT_ENTRY_LARGE; 1595 mask |= _SEGMENT_ENTRY_SOFT_DIRTY; 1596 pmd = __pmd(pmd_val(pmd) & mask); 1597 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot))); 1598 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1599 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1600 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1601 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); 1602 return pmd; 1603} 1604 1605static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1606{ 1607 return __pmd(physpage + massage_pgprot_pmd(pgprot)); 1608} 1609 1610#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1611 1612static inline void __pmdp_csp(pmd_t *pmdp) 1613{ 1614 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1615 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1616} 1617 1618#define IDTE_GLOBAL 0 1619#define IDTE_LOCAL 1 1620 1621#define IDTE_PTOA 0x0800 1622#define IDTE_NODAT 0x1000 1623#define IDTE_GUEST_ASCE 0x2000 1624 1625static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, 1626 unsigned long opt, unsigned long asce, 1627 int local) 1628{ 1629 unsigned long sto; 1630 1631 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t); 1632 if (__builtin_constant_p(opt) && opt == 0) { 1633 /* flush without guest asce */ 1634 asm volatile( 1635 " idte %[r1],0,%[r2],%[m4]" 1636 : "+m" (*pmdp) 1637 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), 1638 [m4] "i" (local) 1639 : "cc" ); 1640 } else { 1641 /* flush with guest asce */ 1642 asm volatile( 1643 " idte %[r1],%[r3],%[r2],%[m4]" 1644 : "+m" (*pmdp) 1645 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), 1646 [r3] "a" (asce), [m4] "i" (local) 1647 : "cc" ); 1648 } 1649} 1650 1651static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, 1652 unsigned long opt, unsigned long asce, 1653 int local) 1654{ 1655 unsigned long r3o; 1656 1657 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t); 1658 r3o |= _ASCE_TYPE_REGION3; 1659 if (__builtin_constant_p(opt) && opt == 0) { 1660 /* flush without guest asce */ 1661 asm volatile( 1662 " idte %[r1],0,%[r2],%[m4]" 1663 : "+m" (*pudp) 1664 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), 1665 [m4] "i" (local) 1666 : "cc"); 1667 } else { 1668 /* flush with guest asce */ 1669 asm volatile( 1670 " idte %[r1],%[r3],%[r2],%[m4]" 1671 : "+m" (*pudp) 1672 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), 1673 [r3] "a" (asce), [m4] "i" (local) 1674 : "cc" ); 1675 } 1676} 1677 1678pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1679pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1680pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); 1681 1682#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1683 1684#define __HAVE_ARCH_PGTABLE_DEPOSIT 1685void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1686 pgtable_t pgtable); 1687 1688#define __HAVE_ARCH_PGTABLE_WITHDRAW 1689pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1690 1691#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1692static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1693 unsigned long addr, pmd_t *pmdp, 1694 pmd_t entry, int dirty) 1695{ 1696 VM_BUG_ON(addr & ~HPAGE_MASK); 1697 1698 entry = pmd_mkyoung(entry); 1699 if (dirty) 1700 entry = pmd_mkdirty(entry); 1701 if (pmd_val(*pmdp) == pmd_val(entry)) 1702 return 0; 1703 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); 1704 return 1; 1705} 1706 1707#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1708static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1709 unsigned long addr, pmd_t *pmdp) 1710{ 1711 pmd_t pmd = *pmdp; 1712 1713 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); 1714 return pmd_young(pmd); 1715} 1716 1717#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1718static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 1719 unsigned long addr, pmd_t *pmdp) 1720{ 1721 VM_BUG_ON(addr & ~HPAGE_MASK); 1722 return pmdp_test_and_clear_young(vma, addr, pmdp); 1723} 1724 1725static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1726 pmd_t *pmdp, pmd_t entry) 1727{ 1728 if (!MACHINE_HAS_NX) 1729 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); 1730 set_pmd(pmdp, entry); 1731} 1732 1733static inline pmd_t pmd_mkhuge(pmd_t pmd) 1734{ 1735 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE)); 1736 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); 1737 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1738} 1739 1740#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1741static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1742 unsigned long addr, pmd_t *pmdp) 1743{ 1744 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1745} 1746 1747#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1748static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, 1749 unsigned long addr, 1750 pmd_t *pmdp, int full) 1751{ 1752 if (full) { 1753 pmd_t pmd = *pmdp; 1754 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1755 return pmd; 1756 } 1757 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1758} 1759 1760#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1761static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 1762 unsigned long addr, pmd_t *pmdp) 1763{ 1764 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 1765} 1766 1767#define __HAVE_ARCH_PMDP_INVALIDATE 1768static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, 1769 unsigned long addr, pmd_t *pmdp) 1770{ 1771 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1772 1773 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); 1774} 1775 1776#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1777static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1778 unsigned long addr, pmd_t *pmdp) 1779{ 1780 pmd_t pmd = *pmdp; 1781 1782 if (pmd_write(pmd)) 1783 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); 1784} 1785 1786static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1787 unsigned long address, 1788 pmd_t *pmdp) 1789{ 1790 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1791} 1792#define pmdp_collapse_flush pmdp_collapse_flush 1793 1794#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot)) 1795#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1796 1797static inline int pmd_trans_huge(pmd_t pmd) 1798{ 1799 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1800} 1801 1802#define has_transparent_hugepage has_transparent_hugepage 1803static inline int has_transparent_hugepage(void) 1804{ 1805 return MACHINE_HAS_EDAT1 ? 1 : 0; 1806} 1807#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1808 1809/* 1810 * 64 bit swap entry format: 1811 * A page-table entry has some bits we have to treat in a special way. 1812 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte 1813 * as invalid. 1814 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 1815 * | offset |E11XX|type |S0| 1816 * |0000000000111111111122222222223333333333444444444455|55555|55566|66| 1817 * |0123456789012345678901234567890123456789012345678901|23456|78901|23| 1818 * 1819 * Bits 0-51 store the offset. 1820 * Bit 52 (E) is used to remember PG_anon_exclusive. 1821 * Bits 57-61 store the type. 1822 * Bit 62 (S) is used for softdirty tracking. 1823 * Bits 55 and 56 (X) are unused. 1824 */ 1825 1826#define __SWP_OFFSET_MASK ((1UL << 52) - 1) 1827#define __SWP_OFFSET_SHIFT 12 1828#define __SWP_TYPE_MASK ((1UL << 5) - 1) 1829#define __SWP_TYPE_SHIFT 2 1830 1831static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1832{ 1833 unsigned long pteval; 1834 1835 pteval = _PAGE_INVALID | _PAGE_PROTECT; 1836 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1837 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1838 return __pte(pteval); 1839} 1840 1841static inline unsigned long __swp_type(swp_entry_t entry) 1842{ 1843 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; 1844} 1845 1846static inline unsigned long __swp_offset(swp_entry_t entry) 1847{ 1848 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; 1849} 1850 1851static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) 1852{ 1853 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; 1854} 1855 1856#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1857#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1858 1859extern int vmem_add_mapping(unsigned long start, unsigned long size); 1860extern void vmem_remove_mapping(unsigned long start, unsigned long size); 1861extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc); 1862extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot); 1863extern void vmem_unmap_4k_page(unsigned long addr); 1864extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc); 1865extern int s390_enable_sie(void); 1866extern int s390_enable_skey(void); 1867extern void s390_reset_cmma(struct mm_struct *mm); 1868 1869/* s390 has a private copy of get unmapped area to deal with cache synonyms */ 1870#define HAVE_ARCH_UNMAPPED_AREA 1871#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1872 1873#define pmd_pgtable(pmd) \ 1874 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)) 1875 1876#endif /* _S390_PAGE_H */ 1877