1#ifndef _PPC64_PGTABLE_H 2#define _PPC64_PGTABLE_H 3 4/* 5 * This file contains the functions and defines necessary to modify and use 6 * the ppc64 hashed page table. 7 */ 8 9#ifndef __ASSEMBLY__ 10#include <asm/processor.h> /* For TASK_SIZE */ 11#include <asm/mmu.h> 12#include <asm/page.h> 13#endif /* __ASSEMBLY__ */ 14 15/* PMD_SHIFT determines what a second-level page table entry can map */ 16#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) 17#define PMD_SIZE (1UL << PMD_SHIFT) 18#define PMD_MASK (~(PMD_SIZE-1)) 19 20/* PGDIR_SHIFT determines what a third-level page table entry can map */ 21#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2)) 22#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 23#define PGDIR_MASK (~(PGDIR_SIZE-1)) 24 25/* 26 * Entries per page directory level. The PTE level must use a 64b record 27 * for each page table entry. The PMD and PGD level use a 32b record for 28 * each entry by assuming that each entry is page aligned. 29 */ 30#define PTE_INDEX_SIZE 9 31#define PMD_INDEX_SIZE 10 32#define PGD_INDEX_SIZE 10 33 34#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 35#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 36#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 37 38#define USER_PTRS_PER_PGD (1024) 39#define FIRST_USER_PGD_NR 0 40 41#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 42 PGD_INDEX_SIZE + PAGE_SHIFT) 43 44/* 45 * Define the address range of the vmalloc VM area. 46 */ 47#define VMALLOC_START (0xD000000000000000) 48#define VMALLOC_VMADDR(x) ((unsigned long)(x)) 49#define VMALLOC_END (VMALLOC_START + VALID_EA_BITS) 50 51/* 52 * Define the address range of the imalloc VM area. 53 * (used for ioremap) 54 */ 55#define IMALLOC_START (ioremap_bot) 56#define IMALLOC_VMADDR(x) ((unsigned long)(x)) 57#define IMALLOC_BASE (0xE000000000000000) 58#define IMALLOC_END (IMALLOC_BASE + VALID_EA_BITS) 59 60/* 61 * Define the address range mapped virt <-> physical 62 */ 63#define KRANGE_START KERNELBASE 64#define KRANGE_END (KRANGE_START + VALID_EA_BITS) 65 66/* 67 * Define the user address range 68 */ 69#define USER_START (0UL) 70#define USER_END (USER_START + VALID_EA_BITS) 71 72 73/* 74 * Bits in a linux-style PTE. These match the bits in the 75 * (hardware-defined) PowerPC PTE as closely as possible. 76 */ 77#define _PAGE_PRESENT 0x001UL /* software: pte contains a translation */ 78#define _PAGE_USER 0x002UL /* matches one of the PP bits */ 79#define _PAGE_RW 0x004UL /* software: user write access allowed */ 80#define _PAGE_GUARDED 0x008UL 81#define _PAGE_COHERENT 0x010UL /* M: enforce memory coherence (SMP systems) */ 82#define _PAGE_NO_CACHE 0x020UL /* I: cache inhibit */ 83#define _PAGE_WRITETHRU 0x040UL /* W: cache write-through */ 84#define _PAGE_DIRTY 0x080UL /* C: page changed */ 85#define _PAGE_ACCESSED 0x100UL /* R: page referenced */ 86#define _PAGE_HPTENOIX 0x200UL /* software: pte HPTE slot unknown */ 87#define _PAGE_HASHPTE 0x400UL /* software: pte has an associated HPTE */ 88#define _PAGE_EXEC 0x800UL /* software: i-cache coherence required */ 89#define _PAGE_SECONDARY 0x8000UL /* software: HPTE is in secondary group */ 90#define _PAGE_GROUP_IX 0x7000UL /* software: HPTE index within group */ 91/* Bits 0x7000 identify the index within an HPT Group */ 92#define _PAGE_HPTEFLAGS (_PAGE_HASHPTE | _PAGE_HPTENOIX | _PAGE_SECONDARY | _PAGE_GROUP_IX) 93/* PAGE_MASK gives the right answer below, but only by accident */ 94/* It should be preserving the high 48 bits and then specifically */ 95/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 96#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) 97 98#define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT 99 100#define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY 101 102/* __pgprot defined in asm-ppc64/page.h */ 103#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 104 105#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 106#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 107#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 108#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 109#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 110#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 111#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) 112#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ 113 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) 114 115/* 116 * The PowerPC can only do execute protection on a segment (256MB) basis, 117 * not on a page basis. So we consider execute permission the same as read. 118 * Also, write permissions imply read permissions. 119 * This is the closest we can get.. 120 */ 121#define __P000 PAGE_NONE 122#define __P001 PAGE_READONLY_X 123#define __P010 PAGE_COPY 124#define __P011 PAGE_COPY_X 125#define __P100 PAGE_READONLY 126#define __P101 PAGE_READONLY_X 127#define __P110 PAGE_COPY 128#define __P111 PAGE_COPY_X 129 130#define __S000 PAGE_NONE 131#define __S001 PAGE_READONLY_X 132#define __S010 PAGE_SHARED 133#define __S011 PAGE_SHARED_X 134#define __S100 PAGE_READONLY 135#define __S101 PAGE_READONLY_X 136#define __S110 PAGE_SHARED 137#define __S111 PAGE_SHARED_X 138 139#ifndef __ASSEMBLY__ 140 141/* 142 * ZERO_PAGE is a global shared page that is always zero: used 143 * for zero-mapped memory areas etc.. 144 */ 145extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 146#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) 147#endif /* __ASSEMBLY__ */ 148 149/* shift to put page number into pte */ 150#define PTE_SHIFT (16) 151 152#ifndef __ASSEMBLY__ 153 154/* 155 * Conversion functions: convert a page and protection to a page entry, 156 * and a page entry and page directory to the page they refer to. 157 * 158 * mk_pte_phys takes a physical address as input 159 * 160 * mk_pte takes a (struct page *) as input 161 */ 162 163#define mk_pte_phys(physpage,pgprot) \ 164({ \ 165 pte_t pte; \ 166 pte_val(pte) = (((physpage)<<(PTE_SHIFT-PAGE_SHIFT)) | pgprot_val(pgprot)); \ 167 pte; \ 168}) 169 170#define mk_pte(page,pgprot) \ 171({ \ 172 pte_t pte; \ 173 pte_val(pte) = ((unsigned long)((page) - mem_map) << PTE_SHIFT) | \ 174 pgprot_val(pgprot); \ 175 pte; \ 176}) 177 178#define pte_modify(_pte, newprot) \ 179 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 180 181#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) 182#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 183 184/* pte_clear moved to later in this file */ 185 186#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 187#define pte_page(x) (mem_map+pte_pagenr(x)) 188 189#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep))) 190#define pmd_none(pmd) (!pmd_val(pmd)) 191#define pmd_bad(pmd) ((pmd_val(pmd)) == 0) 192#define pmd_present(pmd) ((pmd_val(pmd)) != 0) 193#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 194#define pmd_page(pmd) (__bpn_to_ba(pmd_val(pmd))) 195#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) 196#define pgd_none(pgd) (!pgd_val(pgd)) 197#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) 198#define pgd_present(pgd) (pgd_val(pgd) != 0UL) 199#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 200#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd))) 201 202/* 203 * Find an entry in a page-table-directory. We combine the address region 204 * (the high order N bits) and the pgd portion of the address. 205 */ 206#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD -1)) 207 208#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 209 210/* Find an entry in the second-level page table.. */ 211#define pmd_offset(dir,addr) \ 212 ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 213 214/* Find an entry in the third-level page table.. */ 215#define pte_offset(dir,addr) \ 216 ((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 217 218/* to find an entry in a kernel page-table-directory */ 219/* This now only contains the vmalloc pages */ 220#define pgd_offset_k(address) pgd_offset(&init_mm, address) 221 222/* to find an entry in the ioremap page-table-directory */ 223#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) 224 225#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 226 227/* 228 * The following only work if pte_present() is true. 229 * Undefined behaviour if not.. 230 */ 231static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} 232static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} 233static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} 234static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 235static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 236 237static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 238static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 239 240static inline pte_t pte_rdprotect(pte_t pte) { 241 pte_val(pte) &= ~_PAGE_USER; return pte; } 242static inline pte_t pte_exprotect(pte_t pte) { 243 pte_val(pte) &= ~_PAGE_EXEC; return pte; } 244static inline pte_t pte_wrprotect(pte_t pte) { 245 pte_val(pte) &= ~(_PAGE_RW); return pte; } 246static inline pte_t pte_mkclean(pte_t pte) { 247 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } 248static inline pte_t pte_mkold(pte_t pte) { 249 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 250 251static inline pte_t pte_mkread(pte_t pte) { 252 pte_val(pte) |= _PAGE_USER; return pte; } 253static inline pte_t pte_mkexec(pte_t pte) { 254 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } 255static inline pte_t pte_mkwrite(pte_t pte) { 256 pte_val(pte) |= _PAGE_RW; return pte; } 257static inline pte_t pte_mkdirty(pte_t pte) { 258 pte_val(pte) |= _PAGE_DIRTY; return pte; } 259static inline pte_t pte_mkyoung(pte_t pte) { 260 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 261 262/* Atomic PTE updates */ 263 264static inline unsigned long pte_update( pte_t *p, unsigned long clr, 265 unsigned long set ) 266{ 267 unsigned long old, tmp; 268 269 __asm__ __volatile__("\n\ 2701: ldarx %0,0,%3 \n\ 271 andc %1,%0,%4 \n\ 272 or %1,%1,%5 \n\ 273 stdcx. %1,0,%3 \n\ 274 bne- 1b" 275 : "=&r" (old), "=&r" (tmp), "=m" (*p) 276 : "r" (p), "r" (clr), "r" (set), "m" (*p) 277 : "cc" ); 278 return old; 279} 280 281static inline int ptep_test_and_clear_young(pte_t *ptep) 282{ 283 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; 284} 285 286static inline int ptep_test_and_clear_dirty(pte_t *ptep) 287{ 288 return (pte_update(ptep, _PAGE_DIRTY, 0) & _PAGE_DIRTY) != 0; 289} 290 291static inline pte_t ptep_get_and_clear(pte_t *ptep) 292{ 293 return __pte(pte_update(ptep, ~_PAGE_HPTEFLAGS, 0)); 294} 295 296static inline void ptep_set_wrprotect(pte_t *ptep) 297{ 298 pte_update(ptep, _PAGE_RW, 0); 299} 300 301static inline void ptep_mkdirty(pte_t *ptep) 302{ 303 pte_update(ptep, 0, _PAGE_DIRTY); 304} 305 306#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 307 308/* 309 * set_pte stores a linux PTE into the linux page table. 310 * On machines which use an MMU hash table we avoid changing the 311 * _PAGE_HASHPTE bit. 312 */ 313static inline void set_pte(pte_t *ptep, pte_t pte) 314{ 315 pte_update(ptep, ~_PAGE_HPTEFLAGS, pte_val(pte) & ~_PAGE_HPTEFLAGS); 316} 317 318static inline void pte_clear(pte_t * ptep) 319{ 320 pte_update(ptep, ~_PAGE_HPTEFLAGS, 0); 321} 322 323struct mm_struct; 324struct vm_area_struct; 325extern void local_flush_tlb_all(void); 326extern void local_flush_tlb_mm(struct mm_struct *mm); 327extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 328extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, 329 unsigned long end); 330 331#define flush_tlb_all local_flush_tlb_all 332#define flush_tlb_mm local_flush_tlb_mm 333#define flush_tlb_page local_flush_tlb_page 334#define flush_tlb_range local_flush_tlb_range 335 336static inline void flush_tlb_pgtables(struct mm_struct *mm, 337 unsigned long start, unsigned long end) 338{ 339 /* PPC has hw page tables. */ 340} 341 342/* 343 * No cache flushing is required when address mappings are 344 * changed, because the caches on PowerPCs are physically 345 * addressed. 346 */ 347#define flush_cache_all() do { } while (0) 348#define flush_cache_mm(mm) do { } while (0) 349#define flush_cache_range(mm, a, b) do { } while (0) 350#define flush_cache_page(vma, p) do { } while (0) 351#define flush_page_to_ram(page) do { } while (0) 352 353extern void flush_icache_user_range(struct vm_area_struct *vma, 354 struct page *page, unsigned long addr, int len); 355extern void flush_icache_range(unsigned long, unsigned long); 356extern void __flush_dcache_icache(void *page_va); 357extern void flush_dcache_page(struct page *page); 358extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); 359 360extern unsigned long va_to_phys(unsigned long address); 361extern pte_t *va_to_pte(unsigned long address); 362extern unsigned long ioremap_bot, ioremap_base; 363 364#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 365#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 366 367#define pte_ERROR(e) \ 368 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 369#define pmd_ERROR(e) \ 370 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 371#define pgd_ERROR(e) \ 372 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 373 374extern pgd_t swapper_pg_dir[1024]; 375extern pgd_t ioremap_dir[1024]; 376 377extern void paging_init(void); 378 379/* 380 * Page tables may have changed. We don't need to do anything here 381 * as entries are faulted into the hash table by the low-level 382 * data/instruction access exception handlers. 383 */ 384/* 385 * We won't be able to use update_mmu_cache to update the 386 * hardware page table because we need to update the pte 387 * as well, but we don't get the address of the pte, only 388 * its value. 389 */ 390#define update_mmu_cache(vma, addr, pte) do { } while (0) 391 392extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid); 393extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t *ptep); 394extern void build_valid_hpte(unsigned long vsid, unsigned long ea, 395 unsigned long pa, pte_t * ptep, 396 unsigned hpteflags, unsigned bolted ); 397 398/* Encode and de-code a swap entry */ 399#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f) 400#define SWP_OFFSET(entry) ((entry).val >> 8) 401#define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) 402#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) 403#define swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) 404 405/* 406 * kern_addr_valid is intended to indicate whether an address is a valid 407 * kernel address. Most 32-bit archs define it as always true (like this) 408 * but most 64-bit archs actually perform a test. What should we do here? 409 * The only use is in fs/ncpfs/dir.c 410 */ 411#define kern_addr_valid(addr) (1) 412 413#define io_remap_page_range remap_page_range 414 415/* 416 * No page table caches to initialise 417 */ 418#define pgtable_cache_init() do { } while (0) 419 420extern void updateBoltedHptePP(unsigned long newpp, unsigned long ea); 421extern void hpte_init_pSeries(void); 422extern void hpte_init_iSeries(void); 423 424extern void make_pte(HPTE * htab, unsigned long va, unsigned long pa, 425 int mode, unsigned long hash_mask, int large); 426 427#endif /* __ASSEMBLY__ */ 428#endif /* _PPC64_PGTABLE_H */ 429