1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9#ifndef _ASM_PGTABLE_64_H 10#define _ASM_PGTABLE_64_H 11 12#include <linux/linkage.h> 13 14#include <asm/addrspace.h> 15#include <asm/page.h> 16#include <asm/cachectl.h> 17#include <asm/fixmap.h> 18 19#ifdef CONFIG_PAGE_SIZE_64KB 20#include <asm-generic/pgtable-nopmd.h> 21#else 22#include <asm-generic/pgtable-nopud.h> 23#endif 24 25/* 26 * Each address space has 2 4K pages as its page directory, giving 1024 27 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a 28 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page 29 * tables. Each page table is also a single 4K page, giving 512 (== 30 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to 31 * invalid_pmd_table, each pmd entry is initialized to point to 32 * invalid_pte_table, each pte is initialized to 0. When memory is low, 33 * and a pmd table or a page table allocation fails, empty_bad_pmd_table 34 * and empty_bad_page_table is returned back to higher layer code, so 35 * that the failure is recognized later on. Linux does not seem to 36 * handle these failures very well though. The empty_bad_page_table has 37 * invalid pte entries in it, to force page faults. 38 * 39 * Kernel mappings: kernel mappings are held in the swapper_pg_table. 40 * The layout is identical to userspace except it's indexed with the 41 * fault address - VMALLOC_START. 42 */ 43 44 45/* PGDIR_SHIFT determines what a third-level page table entry can map */ 46#ifdef __PAGETABLE_PMD_FOLDED 47#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3) 48#else 49 50/* PMD_SHIFT determines the size of the area a second-level page table can map */ 51#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) 52#define PMD_SIZE (1UL << PMD_SHIFT) 53#define PMD_MASK (~(PMD_SIZE-1)) 54 55 56#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) 57#endif 58#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 59#define PGDIR_MASK (~(PGDIR_SIZE-1)) 60 61/* 62 * For 4kB page size we use a 3 level page tree and an 8kB pud, which 63 * permits us mapping 40 bits of virtual address space. 64 * 65 * We used to implement 41 bits by having an order 1 pmd level but that seemed 66 * rather pointless. 67 * 68 * For 8kB page size we use a 3 level page tree which permits a total of 69 * 8TB of address space. Alternatively a 33-bit / 8GB organization using 70 * two levels would be easy to implement. 71 * 72 * For 16kB page size we use a 2 level page tree which permits a total of 73 * 36 bits of virtual address space. We could add a third level but it seems 74 * like at the moment there's no need for this. 75 * 76 * For 64kB page size we use a 2 level page table tree for a total of 42 bits 77 * of virtual address space. 78 */ 79#ifdef CONFIG_PAGE_SIZE_4KB 80#define PGD_ORDER 1 81#define PUD_ORDER aieeee_attempt_to_allocate_pud 82#define PMD_ORDER 0 83#define PTE_ORDER 0 84#endif 85#ifdef CONFIG_PAGE_SIZE_8KB 86#define PGD_ORDER 0 87#define PUD_ORDER aieeee_attempt_to_allocate_pud 88#define PMD_ORDER 0 89#define PTE_ORDER 0 90#endif 91#ifdef CONFIG_PAGE_SIZE_16KB 92#define PGD_ORDER 0 93#define PUD_ORDER aieeee_attempt_to_allocate_pud 94#define PMD_ORDER 0 95#define PTE_ORDER 0 96#endif 97#ifdef CONFIG_PAGE_SIZE_32KB 98#define PGD_ORDER 0 99#define PUD_ORDER aieeee_attempt_to_allocate_pud 100#define PMD_ORDER 0 101#define PTE_ORDER 0 102#endif 103#ifdef CONFIG_PAGE_SIZE_64KB 104#define PGD_ORDER 0 105#define PUD_ORDER aieeee_attempt_to_allocate_pud 106#define PMD_ORDER aieeee_attempt_to_allocate_pmd 107#define PTE_ORDER 0 108#endif 109 110#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 111#ifndef __PAGETABLE_PMD_FOLDED 112#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) 113#endif 114#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 115 116#if PGDIR_SIZE >= TASK_SIZE 117#define USER_PTRS_PER_PGD (1) 118#else 119#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 120#endif 121#define FIRST_USER_ADDRESS 0UL 122 123/* 124 * TLB refill handlers also map the vmalloc area into xuseg. Avoid 125 * the first couple of pages so NULL pointer dereferences will still 126 * reliably trap. 127 */ 128#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE)) 129#define VMALLOC_END \ 130 (MAP_BASE + \ 131 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \ 132 (1UL << cpu_vmbits)) - (1UL << 32)) 133 134#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && VMALLOC_START != CKSSEG 135/* Load modules into 32bit-compatible segment. */ 136#define MODULE_START CKSSEG 137#define MODULE_END (FIXADDR_START-2*PAGE_SIZE) 138#endif 139 140#define pte_ERROR(e) \ 141 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 142#ifndef __PAGETABLE_PMD_FOLDED 143#define pmd_ERROR(e) \ 144 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 145#endif 146#define pgd_ERROR(e) \ 147 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 148 149extern pte_t invalid_pte_table[PTRS_PER_PTE]; 150extern pte_t empty_bad_page_table[PTRS_PER_PTE]; 151 152 153#ifndef __PAGETABLE_PMD_FOLDED 154/* 155 * For 3-level pagetables we defines these ourselves, for 2-level the 156 * definitions are supplied by <asm-generic/pgtable-nopmd.h>. 157 */ 158typedef struct { unsigned long pmd; } pmd_t; 159#define pmd_val(x) ((x).pmd) 160#define __pmd(x) ((pmd_t) { (x) } ) 161 162 163extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 164extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; 165#endif 166 167/* 168 * Empty pgd/pmd entries point to the invalid_pte_table. 169 */ 170static inline int pmd_none(pmd_t pmd) 171{ 172 return pmd_val(pmd) == (unsigned long) invalid_pte_table; 173} 174 175#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 176 177static inline int pmd_present(pmd_t pmd) 178{ 179 return pmd_val(pmd) != (unsigned long) invalid_pte_table; 180} 181 182static inline void pmd_clear(pmd_t *pmdp) 183{ 184 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 185} 186#ifndef __PAGETABLE_PMD_FOLDED 187 188/* 189 * Empty pud entries point to the invalid_pmd_table. 190 */ 191static inline int pud_none(pud_t pud) 192{ 193 return pud_val(pud) == (unsigned long) invalid_pmd_table; 194} 195 196static inline int pud_bad(pud_t pud) 197{ 198 return pud_val(pud) & ~PAGE_MASK; 199} 200 201static inline int pud_present(pud_t pud) 202{ 203 return pud_val(pud) != (unsigned long) invalid_pmd_table; 204} 205 206static inline void pud_clear(pud_t *pudp) 207{ 208 pud_val(*pudp) = ((unsigned long) invalid_pmd_table); 209} 210#endif 211 212#define pte_page(x) pfn_to_page(pte_pfn(x)) 213 214#ifdef CONFIG_CPU_VR41XX 215#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 216#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 217#else 218#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 219#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 220#endif 221 222#define __pgd_offset(address) pgd_index(address) 223#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 224#define __pmd_offset(address) pmd_index(address) 225 226/* to find an entry in a kernel page-table-directory */ 227#define pgd_offset_k(address) pgd_offset(&init_mm, address) 228 229#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 230#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 231 232/* to find an entry in a page-table-directory */ 233#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) 234 235#ifndef __PAGETABLE_PMD_FOLDED 236static inline unsigned long pud_page_vaddr(pud_t pud) 237{ 238 return pud_val(pud); 239} 240#define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) 241#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) 242 243/* Find an entry in the second-level page table.. */ 244static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) 245{ 246 return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address); 247} 248#endif 249 250/* Find an entry in the third-level page table.. */ 251#define __pte_offset(address) \ 252 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 253#define pte_offset(dir, address) \ 254 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 255#define pte_offset_kernel(dir, address) \ 256 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) 257#define pte_offset_map(dir, address) \ 258 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 259#define pte_offset_map_nested(dir, address) \ 260 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 261#define pte_unmap(pte) ((void)(pte)) 262#define pte_unmap_nested(pte) ((void)(pte)) 263 264/* 265 * Initialize a new pgd / pmd table with invalid pointers. 266 */ 267extern void pgd_init(unsigned long page); 268extern void pmd_init(unsigned long page, unsigned long pagetable); 269 270/* 271 * Non-present pages: high 24 bits are offset, next 8 bits type, 272 * low 32 bits zero. 273 */ 274static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 275{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } 276 277#define __swp_type(x) (((x).val >> 32) & 0xff) 278#define __swp_offset(x) ((x).val >> 40) 279#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) 280#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 281#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 282 283/* 284 * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to 285 * make things easier, and only use the upper 56 bits for the page offset... 286 */ 287#define PTE_FILE_MAX_BITS 56 288 289#define pte_to_pgoff(_pte) ((_pte).pte >> 8) 290#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE }) 291 292#endif /* _ASM_PGTABLE_64_H */ 293