1/* 2 * linux/include/asm-xtensa/pgtable.h 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version2 as 6 * published by the Free Software Foundation. 7 * 8 * Copyright (C) 2001 - 2005 Tensilica Inc. 9 */ 10 11#ifndef _XTENSA_PGTABLE_H 12#define _XTENSA_PGTABLE_H 13 14#include <asm-generic/pgtable-nopmd.h> 15#include <asm/page.h> 16 17/* 18 * We only use two ring levels, user and kernel space. 19 */ 20 21#define USER_RING 1 /* user ring level */ 22#define KERNEL_RING 0 /* kernel ring level */ 23 24/* 25 * The Xtensa architecture port of Linux has a two-level page table system, 26 * i.e. the logical three-level Linux page table layout are folded. 27 * Each task has the following memory page tables: 28 * 29 * PGD table (page directory), ie. 3rd-level page table: 30 * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables 31 * (Architectures that don't have the PMD folded point to the PMD tables) 32 * 33 * The pointer to the PGD table for a given task can be retrieved from 34 * the task structure (struct task_struct*) t, e.g. current(): 35 * (t->mm ? t->mm : t->active_mm)->pgd 36 * 37 * PMD tables (page middle-directory), ie. 2nd-level page tables: 38 * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1). 39 * 40 * PTE tables (page table entry), ie. 1st-level page tables: 41 * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE 42 * invalid_pte_table for absent mappings. 43 * 44 * The individual pages are 4 kB big with special pages for the empty_zero_page. 45 */ 46#define PGDIR_SHIFT 22 47#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 48#define PGDIR_MASK (~(PGDIR_SIZE-1)) 49 50/* 51 * Entries per page directory level: we use two-level, so 52 * we don't really have any PMD directory physically. 53 */ 54#define PTRS_PER_PTE 1024 55#define PTRS_PER_PTE_SHIFT 10 56#define PTRS_PER_PMD 1 57#define PTRS_PER_PGD 1024 58#define PGD_ORDER 0 59#define PMD_ORDER 0 60#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) 61#define FIRST_USER_ADDRESS 0 62#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) 63 64/* virtual memory area. We keep a distance to other memory regions to be 65 * on the safe side. We also use this area for cache aliasing. 66 */ 67 68 69#define VMALLOC_START 0xC0000000 70#define VMALLOC_END 0xC7FF0000 71 72/* Xtensa Linux config PTE layout (when present): 73 * 31-12: PPN 74 * 11-6: Software 75 * 5-4: RING 76 * 3-0: CA 77 * 78 * Similar to the Alpha and MIPS ports, we need to keep track of the ref 79 * and mod bits in software. We have a software "you can read 80 * from this page" bit, and a hardware one which actually lets the 81 * process read from the page. On the same token we have a software 82 * writable bit and the real hardware one which actually lets the 83 * process write to the page. 84 * 85 * See further below for PTE layout for swapped-out pages. 86 */ 87 88#define _PAGE_VALID (1<<0) /* hardware: page is accessible */ 89#define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ 90 91/* None of these cache modes include MP coherency: */ 92#define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ 93#if XCHAL_DCACHE_IS_WRITEBACK 94# define _PAGE_WRITEBACK (1<<2) /* write back */ 95# define _PAGE_WRITETHRU (2<<2) /* write through */ 96#else 97# define _PAGE_WRITEBACK (1<<2) /* assume write through */ 98# define _PAGE_WRITETHRU (1<<2) 99#endif 100#define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */ 101#define _CACHE_MASK (3<<2) 102 103#define _PAGE_USER (1<<4) /* user access (ring=1) */ 104#define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */ 105 106/* Software */ 107#define _PAGE_RW (1<<6) /* software: page writable */ 108#define _PAGE_DIRTY (1<<7) /* software: page dirty */ 109#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ 110#define _PAGE_FILE (1<<9) /* nonlinear file mapping*/ 111 112#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) 113#define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) 114 115#ifdef CONFIG_MMU 116 117# define PAGE_NONE __pgprot(_PAGE_PRESENT) 118# define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW) 119# define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) 120# define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) 121# define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE) 122# define PAGE_INVALID __pgprot(_PAGE_USER) 123 124# if (DCACHE_WAY_SIZE > PAGE_SIZE) 125# define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) 126# else 127# define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) 128# endif 129 130#else /* no mmu */ 131 132# define PAGE_NONE __pgprot(0) 133# define PAGE_SHARED __pgprot(0) 134# define PAGE_COPY __pgprot(0) 135# define PAGE_READONLY __pgprot(0) 136# define PAGE_KERNEL __pgprot(0) 137 138#endif 139 140/* 141 * On certain configurations of Xtensa MMUs (eg. the initial Linux config), 142 * the MMU can't do page protection for execute, and considers that the same as 143 * read. Also, write permissions may imply read permissions. 144 * What follows is the closest we can get by reasonable means.. 145 * See linux/mm/mmap.c for protection_map[] array that uses these definitions. 146 */ 147#define __P000 PAGE_NONE /* private --- */ 148#define __P001 PAGE_READONLY /* private --r */ 149#define __P010 PAGE_COPY /* private -w- */ 150#define __P011 PAGE_COPY /* private -wr */ 151#define __P100 PAGE_READONLY /* private x-- */ 152#define __P101 PAGE_READONLY /* private x-r */ 153#define __P110 PAGE_COPY /* private xw- */ 154#define __P111 PAGE_COPY /* private xwr */ 155 156#define __S000 PAGE_NONE /* shared --- */ 157#define __S001 PAGE_READONLY /* shared --r */ 158#define __S010 PAGE_SHARED /* shared -w- */ 159#define __S011 PAGE_SHARED /* shared -wr */ 160#define __S100 PAGE_READONLY /* shared x-- */ 161#define __S101 PAGE_READONLY /* shared x-r */ 162#define __S110 PAGE_SHARED /* shared xw- */ 163#define __S111 PAGE_SHARED /* shared xwr */ 164 165#ifndef __ASSEMBLY__ 166 167#define pte_ERROR(e) \ 168 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 169#define pgd_ERROR(e) \ 170 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 171 172extern unsigned long empty_zero_page[1024]; 173 174#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 175 176extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; 177 178/* 179 * The pmd contains the kernel virtual address of the pte page. 180 */ 181#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) 182#define pmd_page(pmd) virt_to_page(pmd_val(pmd)) 183 184/* 185 * The following only work if pte_present() is true. 186 */ 187#define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) 188#define pte_present(pte) (pte_val(pte) & _PAGE_VALID) 189#define pte_clear(mm,addr,ptep) \ 190 do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) 191 192#define pmd_none(pmd) (!pmd_val(pmd)) 193#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) 194#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) 195#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 196 197/* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ 198 199static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 200static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 201static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 202static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 203static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 204static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } 205static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } 206static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 207static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 208static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } 209static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 210static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 211static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } 212 213/* 214 * Conversion functions: convert a page and protection to a page entry, 215 * and a page entry and page directory to the page they refer to. 216 */ 217#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 218#define pte_same(a,b) (pte_val(a) == pte_val(b)) 219#define pte_page(x) pfn_to_page(pte_pfn(x)) 220#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 221#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 222 223static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 224{ 225 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 226} 227 228/* 229 * Certain architectures need to do special things when pte's 230 * within a page table are directly modified. Thus, the following 231 * hook is made available. 232 */ 233static inline void update_pte(pte_t *ptep, pte_t pteval) 234{ 235 *ptep = pteval; 236#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 237 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); 238#endif 239} 240 241struct mm_struct; 242 243static inline void 244set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) 245{ 246 update_pte(ptep, pteval); 247} 248 249 250static inline void 251set_pmd(pmd_t *pmdp, pmd_t pmdval) 252{ 253 *pmdp = pmdval; 254#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 255 __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); 256#endif 257} 258 259struct vm_area_struct; 260 261static inline int 262ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, 263 pte_t *ptep) 264{ 265 pte_t pte = *ptep; 266 if (!pte_young(pte)) 267 return 0; 268 update_pte(ptep, pte_mkold(pte)); 269 return 1; 270} 271 272static inline int 273ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, 274 pte_t *ptep) 275{ 276 pte_t pte = *ptep; 277 if (!pte_dirty(pte)) 278 return 0; 279 update_pte(ptep, pte_mkclean(pte)); 280 return 1; 281} 282 283static inline pte_t 284ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 285{ 286 pte_t pte = *ptep; 287 pte_clear(mm, addr, ptep); 288 return pte; 289} 290 291static inline void 292ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 293{ 294 pte_t pte = *ptep; 295 update_pte(ptep, pte_wrprotect(pte)); 296} 297 298/* to find an entry in a kernel page-table-directory */ 299#define pgd_offset_k(address) pgd_offset(&init_mm, address) 300 301/* to find an entry in a page-table-directory */ 302#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address)) 303 304#define pgd_index(address) ((address) >> PGDIR_SHIFT) 305 306/* Find an entry in the second-level page table.. */ 307#define pmd_offset(dir,address) ((pmd_t*)(dir)) 308 309/* Find an entry in the third-level page table.. */ 310#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 311#define pte_offset_kernel(dir,addr) \ 312 ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr)) 313#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) 314#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) 315 316#define pte_unmap(pte) do { } while (0) 317#define pte_unmap_nested(pte) do { } while (0) 318 319 320 321#define __swp_type(entry) (((entry).val >> 7) & 0x3f) 322#define __swp_offset(entry) ((entry).val >> 13) 323#define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) 324#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 325#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 326 327#define PTE_FILE_MAX_BITS 29 328#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 329#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 330 331 332#endif /* !defined (__ASSEMBLY__) */ 333 334 335#ifdef __ASSEMBLY__ 336 337/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long), 338 * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long), 339 * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long) 340 * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long) 341 * 342 * Note: We require an additional temporary register which can be the same as 343 * the register that holds the address. 344 * 345 * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr)) 346 * 347 */ 348#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT 349#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT 350 351#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \ 352 _PGD_INDEX(tmp, adr); \ 353 addx4 mm, tmp, mm 354 355#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \ 356 srli pmd, pmd, PAGE_SHIFT; \ 357 slli pmd, pmd, PAGE_SHIFT; \ 358 addx4 pmd, tmp, pmd 359 360#else 361 362extern void paging_init(void); 363 364#define kern_addr_valid(addr) (1) 365 366extern void update_mmu_cache(struct vm_area_struct * vma, 367 unsigned long address, pte_t pte); 368 369/* 370 * remap a physical page `pfn' of size `size' with page protection `prot' 371 * into virtual address `from' 372 */ 373#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 374 remap_pfn_range(vma, from, pfn, size, prot) 375 376 377/* No page table caches to init */ 378 379#define pgtable_cache_init() do { } while (0) 380 381typedef pte_t *pte_addr_t; 382 383#endif /* !defined (__ASSEMBLY__) */ 384 385#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 386#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 387#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 388#define __HAVE_ARCH_PTEP_SET_WRPROTECT 389#define __HAVE_ARCH_PTEP_MKDIRTY 390#define __HAVE_ARCH_PTE_SAME 391 392#include <asm-generic/pgtable.h> 393 394#endif /* _XTENSA_PGTABLE_H */ 395