1#ifndef _ALPHA_PGTABLE_H
2#define _ALPHA_PGTABLE_H
3
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the Alpha page table tree.
7 *
8 * This hopefully works with any standard Alpha page-size, as defined
9 * in <asm/page.h> (currently 8192).
10 */
11#include <linux/config.h>
12#include <linux/mmzone.h>
13
14#include <asm/page.h>
15#include <asm/processor.h>	/* For TASK_SIZE */
16#include <asm/machvec.h>
17
18/* Certain architectures need to do special things when PTEs
19 * within a page table are directly modified.  Thus, the following
20 * hook is made available.
21 */
22#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
23
24/* PMD_SHIFT determines the size of the area a second-level page table can map */
25#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
26#define PMD_SIZE	(1UL << PMD_SHIFT)
27#define PMD_MASK	(~(PMD_SIZE-1))
28
29/* PGDIR_SHIFT determines what a third-level page table entry can map */
30#define PGDIR_SHIFT	(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
31#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
32#define PGDIR_MASK	(~(PGDIR_SIZE-1))
33
34/*
35 * Entries per page directory level:  the Alpha is three-level, with
36 * all levels having a one-page page table.
37 */
38#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
39#define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
40#define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
41#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
42#define FIRST_USER_PGD_NR	0
43
44/* Number of pointers that fit on a page:  this will go away. */
45#define PTRS_PER_PAGE	(1UL << (PAGE_SHIFT-3))
46
47#ifdef CONFIG_ALPHA_LARGE_VMALLOC
48#define VMALLOC_START		0xfffffe0000000000
49#else
50#define VMALLOC_START		(-2*PGDIR_SIZE)
51#endif
52#define VMALLOC_VMADDR(x)	((unsigned long)(x))
53#define VMALLOC_END		(-PGDIR_SIZE)
54
55/*
56 * OSF/1 PAL-code-imposed page table bits
57 */
58#define _PAGE_VALID	0x0001
59#define _PAGE_FOR	0x0002	/* used for page protection (fault on read) */
60#define _PAGE_FOW	0x0004	/* used for page protection (fault on write) */
61#define _PAGE_FOE	0x0008	/* used for page protection (fault on exec) */
62#define _PAGE_ASM	0x0010
63#define _PAGE_KRE	0x0100
64#define _PAGE_URE	0x0200
65#define _PAGE_KWE	0x1000	/* used to do the dirty bit in software */
66#define _PAGE_UWE	0x2000	/* used to do the dirty bit in software */
67
68/* .. and these are ours ... */
69#define _PAGE_DIRTY	0x20000
70#define _PAGE_ACCESSED	0x40000
71
72/*
73 * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
74 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
75 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
76 * the KRE/URE bits to watch for it. That way we don't need to overload the
77 * KWE/UWE bits with both handling dirty and accessed.
78 *
79 * Note that the kernel uses the accessed bit just to check whether to page
80 * out a page or not, so it doesn't have to be exact anyway.
81 */
82
83#define __DIRTY_BITS	(_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
84#define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
85
86#define _PFN_MASK	0xFFFFFFFF00000000
87
88#define _PAGE_TABLE	(_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
89#define _PAGE_CHG_MASK	(_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
90
91/*
92 * All the normal masks have the "page accessed" bits on, as any time they are used,
93 * the page is accessed. They are cleared only by the page-out routines
94 */
95#define PAGE_NONE	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
96#define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
97#define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
98#define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
99#define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
100
101#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
102
103#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
104#define _PAGE_S(x) _PAGE_NORMAL(x)
105
106/*
107 * The hardware can handle write-only mappings, but as the Alpha
108 * architecture does byte-wide writes with a read-modify-write
109 * sequence, it's not practical to have write-without-read privs.
110 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
111 * arch/alpha/mm/fault.c)
112 */
113	/* xwr */
114#define __P000	_PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
115#define __P001	_PAGE_P(_PAGE_FOE | _PAGE_FOW)
116#define __P010	_PAGE_P(_PAGE_FOE)
117#define __P011	_PAGE_P(_PAGE_FOE)
118#define __P100	_PAGE_P(_PAGE_FOW | _PAGE_FOR)
119#define __P101	_PAGE_P(_PAGE_FOW)
120#define __P110	_PAGE_P(0)
121#define __P111	_PAGE_P(0)
122
123#define __S000	_PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
124#define __S001	_PAGE_S(_PAGE_FOE | _PAGE_FOW)
125#define __S010	_PAGE_S(_PAGE_FOE)
126#define __S011	_PAGE_S(_PAGE_FOE)
127#define __S100	_PAGE_S(_PAGE_FOW | _PAGE_FOR)
128#define __S101	_PAGE_S(_PAGE_FOW)
129#define __S110	_PAGE_S(0)
130#define __S111	_PAGE_S(0)
131
132/*
133 * BAD_PAGETABLE is used when we need a bogus page-table, while
134 * BAD_PAGE is used for a bogus page.
135 *
136 * ZERO_PAGE is a global shared page that is always zero:  used
137 * for zero-mapped memory areas etc..
138 */
139extern pte_t __bad_page(void);
140extern pmd_t * __bad_pagetable(void);
141
142extern unsigned long __zero_page(void);
143
144#define BAD_PAGETABLE	__bad_pagetable()
145#define BAD_PAGE	__bad_page()
146#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
147
148/* number of bits that fit into a memory pointer */
149#define BITS_PER_PTR			(8*sizeof(unsigned long))
150
151/* to align the pointer to a pointer address */
152#define PTR_MASK			(~(sizeof(void*)-1))
153
154/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
155#define SIZEOF_PTR_LOG2			3
156
157/* to find an entry in a page-table */
158#define PAGE_PTR(address)		\
159  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
160
161/*
162 * On certain platforms whose physical address space can overlap KSEG,
163 * namely EV6 and above, we must re-twiddle the physaddr to restore the
164 * correct high-order bits.
165 *
166 * This is extremely confusing until you realize that this is actually
167 * just working around a userspace bug.  The X server was intending to
168 * provide the physical address but instead provided the KSEG address.
169 * Or tried to, except it's not representable.
170 *
171 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
172 * a safe thing to do.  Come the first core logic that does put something
173 * in this area -- memory or whathaveyou -- then this hack will have
174 * to go away.  So be prepared!
175 */
176
177#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
178#error "EV6-only feature in a generic kernel"
179#endif
180#if defined(CONFIG_ALPHA_GENERIC) || \
181    (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
182#define PHYS_TWIDDLE(phys) \
183  ((((phys) & 0xc0000000000UL) == 0x40000000000UL) \
184  ? ((phys) ^= 0xc0000000000UL) : (phys))
185#else
186#define PHYS_TWIDDLE(phys) (phys)
187#endif
188
189/*
190 * Conversion functions:  convert a page and protection to a page entry,
191 * and a page entry and page directory to the page they refer to.
192 */
193#ifndef CONFIG_DISCONTIGMEM
194#define PAGE_TO_PA(page)	((page - mem_map) << PAGE_SHIFT)
195#else
196#define PAGE_TO_PA(page) \
197		((((page)-page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
198		+ page_zone(page)->zone_start_paddr)
199#endif
200
201#ifndef CONFIG_DISCONTIGMEM
202#define mk_pte(page, pgprot)						\
203({									\
204	pte_t pte;							\
205									\
206	pte_val(pte) = ((unsigned long)(page - mem_map) << 32) |	\
207		       pgprot_val(pgprot);				\
208	pte;								\
209})
210#else
211#define mk_pte(page, pgprot)							\
212({										\
213	pte_t pte;								\
214	unsigned long pfn;							\
215										\
216	pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32;	\
217	pfn += page_zone(page)->zone_start_paddr << (32-PAGE_SHIFT);		\
218	pte_val(pte) = pfn | pgprot_val(pgprot);				\
219										\
220	pte;									\
221})
222#endif
223
224extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
225{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpage) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
226
227extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
228{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
229
230extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
231{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
232
233extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
234{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
235
236#ifndef CONFIG_DISCONTIGMEM
237#define pte_page(x)	(mem_map+(unsigned long)((pte_val(x) >> 32)))
238#else
239#define pte_page(x)							\
240({									\
241	unsigned long kvirt;						\
242	struct page * __xx;						\
243									\
244	kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT));	\
245	__xx = virt_to_page(kvirt);					\
246									\
247	__xx;								\
248})
249#endif
250
251extern inline unsigned long pmd_page(pmd_t pmd)
252{ return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
253
254extern inline unsigned long pgd_page(pgd_t pgd)
255{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
256
257extern inline int pte_none(pte_t pte)		{ return !pte_val(pte); }
258extern inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_VALID; }
259extern inline void pte_clear(pte_t *ptep)	{ pte_val(*ptep) = 0; }
260
261extern inline int pmd_none(pmd_t pmd)		{ return !pmd_val(pmd); }
262extern inline int pmd_bad(pmd_t pmd)		{ return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
263extern inline int pmd_present(pmd_t pmd)	{ return pmd_val(pmd) & _PAGE_VALID; }
264extern inline void pmd_clear(pmd_t * pmdp)	{ pmd_val(*pmdp) = 0; }
265
266extern inline int pgd_none(pgd_t pgd)		{ return !pgd_val(pgd); }
267extern inline int pgd_bad(pgd_t pgd)		{ return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
268extern inline int pgd_present(pgd_t pgd)	{ return pgd_val(pgd) & _PAGE_VALID; }
269extern inline void pgd_clear(pgd_t * pgdp)	{ pgd_val(*pgdp) = 0; }
270
271/*
272 * The following only work if pte_present() is true.
273 * Undefined behaviour if not..
274 */
275extern inline int pte_read(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOR); }
276extern inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOW); }
277extern inline int pte_exec(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOE); }
278extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
279extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
280
281extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOW; return pte; }
282extern inline pte_t pte_rdprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOR; return pte; }
283extern inline pte_t pte_exprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOE; return pte; }
284extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
285extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
286extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOW; return pte; }
287extern inline pte_t pte_mkread(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOR; return pte; }
288extern inline pte_t pte_mkexec(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOE; return pte; }
289extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= __DIRTY_BITS; return pte; }
290extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }
291
292#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
293
294/* to find an entry in a kernel page-table-directory */
295#define pgd_offset_k(address) pgd_offset(&init_mm, address)
296
297/* to find an entry in a page-table-directory. */
298#define pgd_index(address)	((address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
299#define __pgd_offset(address)	pgd_index(address)
300#define pgd_offset(mm, address)	((mm)->pgd+pgd_index(address))
301
302/* Find an entry in the second-level page table.. */
303extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
304{
305	return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
306}
307
308/* Find an entry in the third-level page table.. */
309extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
310{
311	return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
312}
313
314extern pgd_t swapper_pg_dir[1024];
315
316/*
317 * The Alpha doesn't have any external MMU info:  the kernel page
318 * tables contain all the necessary information.
319 */
320extern inline void update_mmu_cache(struct vm_area_struct * vma,
321	unsigned long address, pte_t pte)
322{
323}
324
325/*
326 * Non-present pages:  high 24 bits are offset, next 8 bits type,
327 * low 32 bits zero.
328 */
329extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
330{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
331
332#define SWP_TYPE(x)			(((x).val >> 32) & 0xff)
333#define SWP_OFFSET(x)			((x).val >> 40)
334#define SWP_ENTRY(type, offset)		((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
335#define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
336#define swp_entry_to_pte(x)		((pte_t) { (x).val })
337
338/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
339#define PageSkip(page)		(0)
340
341#ifndef CONFIG_DISCONTIGMEM
342#define kern_addr_valid(addr)	(1)
343#endif
344
345#define io_remap_page_range(start, busaddr, size, prot) \
346    remap_page_range(start, virt_to_phys(__ioremap(busaddr, size)), size, prot)
347
348#define pte_ERROR(e) \
349	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
350#define pmd_ERROR(e) \
351	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
352#define pgd_ERROR(e) \
353	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
354
355extern void paging_init(void);
356
357#include <asm-generic/pgtable.h>
358
359/*
360 * No page table caches to initialise
361 */
362#define pgtable_cache_init()	do { } while (0)
363
364/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
365#define HAVE_ARCH_UNMAPPED_AREA
366
367#endif /* _ALPHA_PGTABLE_H */
368