1#ifndef __ASM_SH64_PGTABLE_H 2#define __ASM_SH64_PGTABLE_H 3 4#include <asm-generic/4level-fixup.h> 5 6/* 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 * 11 * include/asm-sh64/pgtable.h 12 * 13 * Copyright (C) 2000, 2001 Paolo Alberelli 14 * Copyright (C) 2003, 2004 Paul Mundt 15 * Copyright (C) 2003, 2004 Richard Curnow 16 * 17 * This file contains the functions and defines necessary to modify and use 18 * the SuperH page table tree. 19 */ 20 21#ifndef __ASSEMBLY__ 22#include <asm/processor.h> 23#include <asm/page.h> 24#include <linux/threads.h> 25 26struct vm_area_struct; 27 28extern void paging_init(void); 29 30/* We provide our own get_unmapped_area to avoid cache synonym issue */ 31#define HAVE_ARCH_UNMAPPED_AREA 32 33/* 34 * Basically we have the same two-level (which is the logical three level 35 * Linux page table layout folded) page tables as the i386. 36 */ 37 38/* 39 * ZERO_PAGE is a global shared page that is always zero: used 40 * for zero-mapped memory areas etc.. 41 */ 42extern unsigned char empty_zero_page[PAGE_SIZE]; 43#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) 44 45#endif /* !__ASSEMBLY__ */ 46 47#define NEFF 32 48#define NEFF_SIGN (1LL << (NEFF - 1)) 49#define NEFF_MASK (-1LL << NEFF) 50 51#define NPHYS 32 52#define NPHYS_SIGN (1LL << (NPHYS - 1)) 53#define NPHYS_MASK (-1LL << NPHYS) 54 55/* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond 56 that 3-level would be appropriate. */ 57#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) 58/* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */ 59#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) 60#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ 61#define PTE_SHIFT PAGE_SHIFT 62#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) 63 64/* top level: PMD. */ 65#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) 66#define PGD_BITS (NEFF - PGDIR_SHIFT) 67#define PTRS_PER_PGD (1<<PGD_BITS) 68 69/* middle level: PMD. This doesn't do anything for the 2-level case. */ 70#define PTRS_PER_PMD (1) 71 72#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 73#define PGDIR_MASK (~(PGDIR_SIZE-1)) 74#define PMD_SHIFT PGDIR_SHIFT 75#define PMD_SIZE PGDIR_SIZE 76#define PMD_MASK PGDIR_MASK 77 78#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) 79/* 80 * three-level asymmetric paging structure: PGD is top level. 81 * The asymmetry comes from 32-bit pointers and 64-bit PTEs. 82 */ 83/* bottom level: PTE. It's 9 bits = 512 pointers */ 84#define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) 85#define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ 86#define PTE_SHIFT PAGE_SHIFT 87#define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) 88 89/* middle level: PMD. It's 10 bits = 1024 pointers */ 90#define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *)) 91#define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */ 92#define PMD_SHIFT (PTE_SHIFT + PTE_BITS) 93#define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE) 94 95/* top level: PMD. It's 1 bit = 2 pointers */ 96#define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS) 97#define PGD_BITS (NEFF - PGDIR_SHIFT) 98#define PTRS_PER_PGD (1<<PGD_BITS) 99 100#define PMD_SIZE (1UL << PMD_SHIFT) 101#define PMD_MASK (~(PMD_SIZE-1)) 102#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 103#define PGDIR_MASK (~(PGDIR_SIZE-1)) 104 105#else 106#error "No defined number of page table levels" 107#endif 108 109/* 110 * Error outputs. 111 */ 112#define pte_ERROR(e) \ 113 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) 114#define pmd_ERROR(e) \ 115 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 116#define pgd_ERROR(e) \ 117 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 118 119/* 120 * Table setting routines. Used within arch/mm only. 121 */ 122#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) 123#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) 124 125static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) 126{ 127 unsigned long long x = ((unsigned long long) pteval.pte); 128 unsigned long long *xp = (unsigned long long *) pteptr; 129 /* 130 * Sign-extend based on NPHYS. 131 */ 132 *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; 133} 134#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 135 136static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) 137{ 138 pmd_val(*pmdp) = (unsigned long) ptep; 139} 140 141/* 142 * PGD defines. Top level. 143 */ 144 145/* To find an entry in a generic PGD. */ 146#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 147#define __pgd_offset(address) pgd_index(address) 148#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 149 150/* To find an entry in a kernel PGD. */ 151#define pgd_offset_k(address) pgd_offset(&init_mm, address) 152 153/* 154 * PGD level access routines. 155 * 156 * Note1: 157 * There's no need to use physical addresses since the tree walk is all 158 * in performed in software, until the PTE translation. 159 * 160 * Note 2: 161 * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad, 162 * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain 163 * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must 164 * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and 165 * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a 166 * bad pgd that must be notified via printk(). 167 * 168 */ 169#define _PGD_EMPTY 0x0 170 171#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) 172static inline int pgd_none(pgd_t pgd) { return 0; } 173static inline int pgd_bad(pgd_t pgd) { return 0; } 174#define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0) 175#define pgd_clear(xx) do { } while(0) 176 177#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) 178#define pgd_present(pgd_entry) (1) 179#define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY) 180/* TODO: Think later about what a useful definition of 'bad' would be now. */ 181#define pgd_bad(pgd_entry) (0) 182#define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY))) 183 184#endif 185 186 187#define pgd_page_vaddr(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK)) 188#define pgd_page(pgd) (virt_to_page(pgd_val(pgd))) 189 190 191/* 192 * PMD defines. Middle level. 193 */ 194 195/* PGD to PMD dereferencing */ 196#if defined(CONFIG_SH64_PGTABLE_2_LEVEL) 197static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 198{ 199 return (pmd_t *) dir; 200} 201#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) 202#define __pmd_offset(address) \ 203 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 204#define pmd_offset(dir, addr) \ 205 ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr))) 206#endif 207 208/* 209 * PMD level access routines. Same notes as above. 210 */ 211#define _PMD_EMPTY 0x0 212/* Either the PMD is empty or present, it's not paged out */ 213#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) 214#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) 215#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) 216#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 217 218#define pmd_page_vaddr(pmd_entry) \ 219 ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) 220 221#define pmd_page(pmd) \ 222 (virt_to_page(pmd_val(pmd))) 223 224/* PMD to PTE dereferencing */ 225#define pte_index(address) \ 226 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 227 228#define pte_offset_kernel(dir, addr) \ 229 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) 230 231#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) 232#define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) 233#define pte_unmap(pte) do { } while (0) 234#define pte_unmap_nested(pte) do { } while (0) 235 236/* Round it up ! */ 237#define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE) 238#define FIRST_USER_ADDRESS 0 239 240#ifndef __ASSEMBLY__ 241#define VMALLOC_END 0xff000000 242#define VMALLOC_START 0xf0000000 243#define VMALLOC_VMADDR(x) ((unsigned long)(x)) 244 245#define IOBASE_VADDR 0xff000000 246#define IOBASE_END 0xffffffff 247 248/* 249 * PTEL coherent flags. 250 * See Chapter 17 ST50 CPU Core Volume 1, Architecture. 251 */ 252/* The bits that are required in the SH-5 TLB are placed in the h/w-defined 253 positions, to avoid expensive bit shuffling on every refill. The remaining 254 bits are used for s/w purposes and masked out on each refill. 255 256 Note, the PTE slots are used to hold data of type swp_entry_t when a page is 257 swapped out. Only the _PAGE_PRESENT flag is significant when the page is 258 swapped out, and it must be placed so that it doesn't overlap either the 259 type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type 260 at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This 261 scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit 262 [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split 263 into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ 264#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ 265#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ 266#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ 267#define _PAGE_PRESENT 0x004 /* software: page referenced */ 268#define _PAGE_FILE 0x004 /* software: only when !present */ 269#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ 270#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ 271#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ 272#define _PAGE_READ 0x040 273#define _PAGE_EXECUTE 0x080 274#define _PAGE_WRITE 0x100 275#define _PAGE_USER 0x200 276#define _PAGE_DIRTY 0x400 /* software: page accessed in write */ 277#define _PAGE_ACCESSED 0x800 /* software: page referenced */ 278 279/* Mask which drops software flags */ 280#define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL 281 282/* 283 * HugeTLB support 284 */ 285#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 286#define _PAGE_SZHUGE (_PAGE_SIZE0) 287#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) 288#define _PAGE_SZHUGE (_PAGE_SIZE1) 289#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) 290#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) 291#endif 292 293/* 294 * Default flags for a Kernel page. 295 * This is fundametally also SHARED because the main use of this define 296 * (other than for PGD/PMD entries) is for the VMALLOC pool which is 297 * contextless. 298 * 299 * _PAGE_EXECUTE is required for modules 300 * 301 */ 302#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 303 _PAGE_EXECUTE | \ 304 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ 305 _PAGE_SHARED) 306 307/* Default flags for a User page */ 308#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) 309 310#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 311 312#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) 313#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 314 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \ 315 _PAGE_SHARED) 316/* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default 317 * protection mode for the stack. */ 318#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ 319 _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE) 320#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ 321 _PAGE_ACCESSED | _PAGE_USER) 322#define PAGE_KERNEL __pgprot(_KERNPG_TABLE) 323 324 325/* 326 * In ST50 we have full permissions (Read/Write/Execute/Shared). 327 * Just match'em all. These are for mmap(), therefore all at least 328 * User/Cachable/Present/Accessed. No point in making Fault on Write. 329 */ 330#define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED) 331 /* sxwr */ 332#define __P000 __pgprot(__MMAP_COMMON) 333#define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ) 334#define __P010 __pgprot(__MMAP_COMMON) 335#define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ) 336#define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) 337#define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) 338#define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) 339#define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) 340 341#define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED) 342#define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ) 343#define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE) 344#define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE) 345#define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE) 346#define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ) 347#define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE) 348#define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE) 349 350/* Make it a device mapping for maximum safety (e.g. for mapping device 351 registers into user-space via /dev/map). */ 352#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) 353#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) 354 355/* 356 * Handling allocation failures during page table setup. 357 */ 358extern void __handle_bad_pmd_kernel(pmd_t * pmd); 359#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) 360 361/* 362 * PTE level access routines. 363 * 364 * Note1: 365 * It's the tree walk leaf. This is physical address to be stored. 366 * 367 * Note 2: 368 * Regarding the choice of _PTE_EMPTY: 369 370 We must choose a bit pattern that cannot be valid, whether or not the page 371 is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped 372 out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is 373 left for us to select. If we force bit[7]==0 when swapped out, we could use 374 the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if 375 we force bit[7]==1 when swapped out, we can use all zeroes to indicate 376 empty. This is convenient, because the page tables get cleared to zero 377 when they are allocated. 378 379 */ 380#define _PTE_EMPTY 0x0 381#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 382#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) 383#define pte_none(x) (pte_val(x) == _PTE_EMPTY) 384 385/* 386 * Some definitions to translate between mem_map, PTEs, and page 387 * addresses: 388 */ 389 390/* 391 * Given a PTE, return the index of the mem_map[] entry corresponding 392 * to the page frame the PTE. Get the absolute physical address, make 393 * a relative physical address and translate it to an index. 394 */ 395#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ 396 __MEMORY_START) >> PAGE_SHIFT) 397 398/* 399 * Given a PTE, return the "struct page *". 400 */ 401#define pte_page(x) (mem_map + pte_pagenr(x)) 402 403/* 404 * Return number of (down rounded) MB corresponding to x pages. 405 */ 406#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 407 408 409/* 410 * The following have defined behavior only work if pte_present() is true. 411 */ 412static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } 413static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; } 414static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } 415static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } 416static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 417static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } 418 419static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; } 420static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } 421static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; } 422static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } 423static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } 424 425static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; } 426static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } 427static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } 428static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } 429static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } 430static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } 431 432 433/* 434 * Conversion functions: convert a page and protection to a page entry. 435 * 436 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) 437 */ 438#define mk_pte(page,pgprot) \ 439({ \ 440 pte_t __pte; \ 441 \ 442 set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ 443 __MEMORY_START | pgprot_val((pgprot)))); \ 444 __pte; \ 445}) 446 447/* 448 * This takes a (absolute) physical page address that is used 449 * by the remapping functions 450 */ 451#define mk_pte_phys(physpage, pgprot) \ 452({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) 453 454static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 455{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } 456 457typedef pte_t *pte_addr_t; 458#define pgtable_cache_init() do { } while (0) 459 460extern void update_mmu_cache(struct vm_area_struct * vma, 461 unsigned long address, pte_t pte); 462 463/* Encode and decode a swap entry */ 464#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) 465#define __swp_offset(x) ((x).val >> 8) 466#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) 467#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 468#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 469 470/* Encode and decode a nonlinear file mapping entry */ 471#define PTE_FILE_MAX_BITS 29 472#define pte_to_pgoff(pte) (pte_val(pte)) 473#define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) 474 475/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 476#define PageSkip(page) (0) 477#define kern_addr_valid(addr) (1) 478 479#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 480 remap_pfn_range(vma, vaddr, pfn, size, prot) 481 482#endif /* !__ASSEMBLY__ */ 483 484/* 485 * No page table caches to initialise 486 */ 487#define pgtable_cache_init() do { } while (0) 488 489#define pte_pfn(x) (((unsigned long)((x).pte)) >> PAGE_SHIFT) 490#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 491#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 492 493extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 494 495#include <asm-generic/pgtable.h> 496 497#endif /* __ASM_SH64_PGTABLE_H */ 498