1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2001 by Ralf Baechle at alii
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_H
10#define _ASM_PGTABLE_H
11
12#include <linux/config.h>
13#include <asm/addrspace.h>
14#include <asm/page.h>
15
16#ifndef __ASSEMBLY__
17
18#include <linux/linkage.h>
19#include <linux/mmzone.h>
20#include <asm/cachectl.h>
21#include <asm/io.h>
22
23/* Cache flushing:
24 *
25 *  - flush_cache_all() flushes entire cache
26 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
27 *  - flush_cache_page(mm, vmaddr) flushes a single page
28 *  - flush_cache_range(mm, start, end) flushes a range of pages
29 *  - flush_page_to_ram(page) write back kernel page to ram
30 */
31extern void (*_flush_cache_all)(void);
32extern void (*___flush_cache_all)(void);
33extern void (*_flush_cache_mm)(struct mm_struct *mm);
34extern void (*_flush_cache_range)(struct mm_struct *mm, unsigned long start,
35	unsigned long end);
36extern void (*_flush_cache_page)(struct vm_area_struct *vma,
37	unsigned long page);
38extern void (*_flush_page_to_ram)(struct page * page);
39extern void (*_flush_icache_range)(unsigned long start, unsigned long end);
40extern void (*_flush_icache_page)(struct vm_area_struct *vma,
41	struct page *page);
42extern void (*_flush_cache_sigtramp)(unsigned long addr);
43extern void (*_flush_icache_all)(void);
44
45/* These suck ...  */
46extern void (*_flush_cache_l2)(void);
47extern void (*_flush_cache_l1)(void);
48
49
50#define flush_cache_all()		_flush_cache_all()
51#define __flush_cache_all()		___flush_cache_all()
52#define flush_dcache_page(page)		do { } while (0)
53
54#ifdef CONFIG_CPU_R10000
55/*
56 * Since the r10k handles VCEs in hardware, most of the flush cache
57 * routines are not needed. Only the icache on a processor is not
58 * coherent with the dcache of the _same_ processor, so we must flush
59 * the icache so that it does not contain stale contents of physical
60 * memory. No flushes are needed for dma coherency, since the o200s
61 * are io coherent. The only place where we might be overoptimizing
62 * out icache flushes are from mprotect (when PROT_EXEC is added).
63 */
64extern void andes_flush_icache_page(unsigned long);
65#define flush_cache_mm(mm)		do { } while(0)
66#define flush_cache_range(mm,start,end)	do { } while(0)
67#define flush_cache_page(vma,page)	do { } while(0)
68#define flush_page_to_ram(page)		do { } while(0)
69#define flush_icache_range(start, end)	_flush_cache_l1()
70#define flush_icache_user_range(vma, page, addr, len) \
71	flush_icache_page((vma), (page))
72#define flush_icache_page(vma, page)					\
73do {									\
74	if ((vma)->vm_flags & VM_EXEC)					\
75		andes_flush_icache_page(phys_to_virt(page_to_phys(page))); \
76} while (0)
77
78#else
79
80#define flush_cache_mm(mm)		_flush_cache_mm(mm)
81#define flush_cache_range(mm,start,end)	_flush_cache_range(mm,start,end)
82#define flush_cache_page(vma,page)	_flush_cache_page(vma, page)
83#define flush_page_to_ram(page)		_flush_page_to_ram(page)
84#define flush_icache_range(start, end)	_flush_icache_range(start, end)
85#define flush_icache_user_range(vma, page, addr, len) \
86	flush_icache_page((vma), (page))
87#define flush_icache_page(vma, page)	_flush_icache_page(vma, page)
88
89#endif /* !CONFIG_CPU_R10000 */
90
91#define flush_cache_sigtramp(addr)	_flush_cache_sigtramp(addr)
92#ifdef CONFIG_VTAG_ICACHE
93#define flush_icache_all()		_flush_icache_all()
94#else
95#define flush_icache_all()		do { } while(0)
96#endif
97
98#define flush_cache_l2()		_flush_cache_l2()
99#define flush_cache_l1()		_flush_cache_l1()
100
101/*
102 * Each address space has 2 4K pages as its page directory, giving 1024
103 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
104 * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to
105 * page tables. Each page table is a single 4K page, giving 512 (==
106 * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to
107 * invalid_pmd_table, each pmde is initialized to point to
108 * invalid_pte_table, each pte is initialized to 0. When memory is low,
109 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
110 * and empty_bad_page_table is returned back to higher layer code, so
111 * that the failure is recognized later on. Linux does not seem to
112 * handle these failures very well though. The empty_bad_page_table has
113 * invalid pte entries in it, to force page faults.
114 * Vmalloc handling: vmalloc uses swapper_pg_dir[0] (returned by
115 * pgd_offset_k), which is initalized to point to kpmdtbl. kpmdtbl is
116 * the only single page pmd in the system. kpmdtbl entries point into
117 * kptbl[] array. We reserve 1 << PGD_ORDER pages to hold the
118 * vmalloc range translations, which the fault handler looks at.
119 */
120
121#endif /* !__ASSEMBLY__ */
122
123/* PMD_SHIFT determines the size of the area a second-level page table can map */
124#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
125#define PMD_SIZE	(1UL << PMD_SHIFT)
126#define PMD_MASK	(~(PMD_SIZE-1))
127
128/* PGDIR_SHIFT determines what a third-level page table entry can map */
129#define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + 1 - 3))
130#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
131#define PGDIR_MASK	(~(PGDIR_SIZE-1))
132
133/* Entries per page directory level: we use two-level, so we don't really
134   have any PMD directory physically.  */
135#define PTRS_PER_PGD	1024
136#define PTRS_PER_PMD	1024
137#define PTRS_PER_PTE	512
138#define PGD_ORDER		1
139
140#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
141#define FIRST_USER_PGD_NR	0
142
143#define VMALLOC_START		XKSEG
144#define VMALLOC_VMADDR(x)	((unsigned long)(x))
145#define VMALLOC_END	\
146	(VMALLOC_START + ((1 << PGD_ORDER) * PTRS_PER_PTE * PAGE_SIZE))
147
148#include <asm/pgtable-bits.h>
149
150#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
151#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
152			PAGE_CACHABLE_DEFAULT)
153#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_READ | \
154			PAGE_CACHABLE_DEFAULT)
155#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_READ | \
156			PAGE_CACHABLE_DEFAULT)
157#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
158			_PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT)
159#define PAGE_USERIO     __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
160			PAGE_CACHABLE_DEFAULT)
161#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
162			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
163
164/*
165 * MIPS can't do page protection for execute, and considers that the same like
166 * read. Also, write permissions imply read permissions. This is the closest
167 * we can get by reasonable means..
168 */
169#define __P000	PAGE_NONE
170#define __P001	PAGE_READONLY
171#define __P010	PAGE_COPY
172#define __P011	PAGE_COPY
173#define __P100	PAGE_READONLY
174#define __P101	PAGE_READONLY
175#define __P110	PAGE_COPY
176#define __P111	PAGE_COPY
177
178#define __S000	PAGE_NONE
179#define __S001	PAGE_READONLY
180#define __S010	PAGE_SHARED
181#define __S011	PAGE_SHARED
182#define __S100	PAGE_READONLY
183#define __S101	PAGE_READONLY
184#define __S110	PAGE_SHARED
185#define __S111	PAGE_SHARED
186
187#ifndef __ASSEMBLY__
188
189#define pte_ERROR(e) \
190	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
191#define pmd_ERROR(e) \
192	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
193#define pgd_ERROR(e) \
194	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
195
196/*
197 * ZERO_PAGE is a global shared page that is always zero: used
198 * for zero-mapped memory areas etc..
199 */
200
201extern unsigned long empty_zero_page;
202extern unsigned long zero_page_mask;
203
204#define ZERO_PAGE(vaddr) \
205	(virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
206
207extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
208extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)];
209extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
210extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
211
212/*
213 * Conversion functions: convert a page and protection to a page entry,
214 * and a page entry and page directory to the page they refer to.
215 */
216static inline unsigned long pmd_page(pmd_t pmd)
217{
218	return pmd_val(pmd);
219}
220
221static inline unsigned long pgd_page(pgd_t pgd)
222{
223	return pgd_val(pgd);
224}
225
226static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
227{
228	pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
229}
230
231static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
232{
233	pgd_val(*pgdp) = (((unsigned long) pmdp) & PAGE_MASK);
234}
235
236static inline int pte_none(pte_t pte)
237{
238	return !(pte_val(pte) & ~_PAGE_GLOBAL);
239}
240
241static inline int pte_present(pte_t pte)
242{
243	return pte_val(pte) & _PAGE_PRESENT;
244}
245
246/*
247 * Certain architectures need to do special things when pte's
248 * within a page table are directly modified.  Thus, the following
249 * hook is made available.
250 */
251static inline void set_pte(pte_t *ptep, pte_t pteval)
252{
253	*ptep = pteval;
254#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
255	if (pte_val(pteval) & _PAGE_GLOBAL) {
256		pte_t *buddy = ptep_buddy(ptep);
257		/*
258		 * Make sure the buddy is global too (if it's !none,
259		 * it better already be global)
260		 */
261		if (pte_none(*buddy))
262			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
263	}
264#endif
265}
266
267static inline void pte_clear(pte_t *ptep)
268{
269#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
270	/* Preserve global status for the pair */
271	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
272		set_pte(ptep, __pte(_PAGE_GLOBAL));
273	else
274#endif
275		set_pte(ptep, __pte(0));
276}
277
278/*
279 * (pmds are folded into pgds so this doesn't get actually called,
280 * but the define is needed for a generic inline function.)
281 */
282#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
283#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
284
285/*
286 * Empty pmd entries point to the invalid_pte_table.
287 */
288static inline int pmd_none(pmd_t pmd)
289{
290	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
291}
292
293static inline int pmd_bad(pmd_t pmd)
294{
295	return pmd_val(pmd) &~ PAGE_MASK;
296}
297
298static inline int pmd_present(pmd_t pmd)
299{
300	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
301}
302
303static inline void pmd_clear(pmd_t *pmdp)
304{
305	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
306}
307
308/*
309 * Empty pgd entries point to the invalid_pmd_table.
310 */
311static inline int pgd_none(pgd_t pgd)
312{
313	return pgd_val(pgd) == (unsigned long) invalid_pmd_table;
314}
315
316static inline int pgd_bad(pgd_t pgd)
317{
318	return pgd_val(pgd) &~ PAGE_MASK;
319}
320
321static inline int pgd_present(pgd_t pgd)
322{
323	return pgd_val(pgd) != (unsigned long) invalid_pmd_table;
324}
325
326static inline void pgd_clear(pgd_t *pgdp)
327{
328	pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table);
329}
330
331#ifndef CONFIG_DISCONTIGMEM
332#define pte_page(x)		(mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
333#else
334#define mips64_pte_pagenr(x) \
335	(PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
336	PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
337#define pte_page(x)		(mem_map+mips64_pte_pagenr(x))
338#endif
339
340/*
341 * The following only work if pte_present() is true.
342 * Undefined behaviour if not..
343 */
344static inline int pte_read(pte_t pte)
345{
346	return pte_val(pte) & _PAGE_READ;
347}
348
349static inline int pte_write(pte_t pte)
350{
351	return pte_val(pte) & _PAGE_WRITE;
352}
353
354static inline int pte_dirty(pte_t pte)
355{
356	return pte_val(pte) & _PAGE_MODIFIED;
357}
358
359static inline int pte_young(pte_t pte)
360{
361	return pte_val(pte) & _PAGE_ACCESSED;
362}
363
364static inline pte_t pte_wrprotect(pte_t pte)
365{
366	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
367	return pte;
368}
369
370static inline pte_t pte_rdprotect(pte_t pte)
371{
372	pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ);
373	return pte;
374}
375
376static inline pte_t pte_mkclean(pte_t pte)
377{
378	pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
379	return pte;
380}
381
382static inline pte_t pte_mkold(pte_t pte)
383{
384	pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
385	return pte;
386}
387
388static inline pte_t pte_mkwrite(pte_t pte)
389{
390	pte_val(pte) |= _PAGE_WRITE;
391	if (pte_val(pte) & _PAGE_MODIFIED)
392		pte_val(pte) |= _PAGE_SILENT_WRITE;
393	return pte;
394}
395
396static inline pte_t pte_mkread(pte_t pte)
397{
398	pte_val(pte) |= _PAGE_READ;
399	if (pte_val(pte) & _PAGE_ACCESSED)
400		pte_val(pte) |= _PAGE_SILENT_READ;
401	return pte;
402}
403
404static inline pte_t pte_mkdirty(pte_t pte)
405{
406	pte_val(pte) |= _PAGE_MODIFIED;
407	if (pte_val(pte) & _PAGE_WRITE)
408		pte_val(pte) |= _PAGE_SILENT_WRITE;
409	return pte;
410}
411
412static inline pte_t pte_mkyoung(pte_t pte)
413{
414	pte_val(pte) |= _PAGE_ACCESSED;
415	if (pte_val(pte) & _PAGE_READ)
416		pte_val(pte) |= _PAGE_SILENT_READ;
417	return pte;
418}
419
420/*
421 * Macro to make mark a page protection value as "uncacheable".  Note
422 * that "protection" is really a misnomer here as the protection value
423 * contains the memory attribute bits, dirty bits, and various other
424 * bits as well.
425 */
426#define pgprot_noncached pgprot_noncached
427
428static inline pgprot_t pgprot_noncached(pgprot_t _prot)
429{
430	unsigned long prot = pgprot_val(_prot);
431
432	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
433
434	return __pgprot(prot);
435}
436
437/*
438 * Conversion functions: convert a page and protection to a page entry,
439 * and a page entry and page directory to the page they refer to.
440 */
441#ifndef CONFIG_DISCONTIGMEM
442#define PAGE_TO_PA(page)	((page - mem_map) << PAGE_SHIFT)
443#else
444#define PAGE_TO_PA(page) \
445		((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
446		  + (page_zone(page)->zone_start_paddr))
447#endif
448#define mk_pte(page, pgprot)						\
449({									\
450	pte_t	__pte;							\
451									\
452	pte_val(__pte) = ((unsigned long)(PAGE_TO_PA(page))) |		\
453						pgprot_val(pgprot);	\
454									\
455	__pte;								\
456})
457
458static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
459{
460	return __pte(physpage | pgprot_val(pgprot));
461}
462
463static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
464{
465	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
466}
467
468#define page_pte(page) page_pte_prot(page, __pgprot(0))
469
470/* to find an entry in a kernel page-table-directory */
471#define pgd_offset_k(address) pgd_offset(&init_mm, 0)
472
473#define pgd_index(address)	((address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
474
475/* to find an entry in a page-table-directory */
476static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
477{
478	return mm->pgd + pgd_index(address);
479}
480
481/* Find an entry in the second-level page table.. */
482static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
483{
484	return (pmd_t *) pgd_page(*dir) +
485	       ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
486}
487
488/* Find an entry in the third-level page table.. */
489static inline pte_t *pte_offset(pmd_t * dir, unsigned long address)
490{
491	return (pte_t *) (pmd_page(*dir)) +
492	       ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
493}
494
495/*
496 * Initialize a new pgd / pmd table with invalid pointers.
497 */
498extern void pgd_init(unsigned long page);
499extern void pmd_init(unsigned long page, unsigned long pagetable);
500
501extern pgd_t swapper_pg_dir[1024];
502extern void paging_init(void);
503
504extern void (*_update_mmu_cache)(struct vm_area_struct *vma,
505	unsigned long address, pte_t pte);
506#define update_mmu_cache(vma, address, pte) _update_mmu_cache(vma, address, pte)
507
508/*
509 * Non-present pages:  high 24 bits are offset, next 8 bits type,
510 * low 32 bits zero.
511 */
512static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
513{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
514
515#define SWP_TYPE(x)		(((x).val >> 32) & 0xff)
516#define SWP_OFFSET(x)		((x).val >> 40)
517#define SWP_ENTRY(type,offset)	((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
518#define pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
519#define swp_entry_to_pte(x)	((pte_t) { (x).val })
520
521/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
522#define PageSkip(page)		(0)
523#ifndef CONFIG_DISCONTIGMEM
524#define kern_addr_valid(addr)	(1)
525#endif
526
527/*
528 * No page table caches to initialise
529 */
530#define pgtable_cache_init()	do { } while (0)
531
532#include <asm-generic/pgtable.h>
533
534/*
535 * We provide our own get_unmapped area to cope with the virtual aliasing
536 * constraints placed on us by the cache architecture.
537 */
538#define HAVE_ARCH_UNMAPPED_AREA
539
540#define io_remap_page_range remap_page_range
541
542#endif /* !__ASSEMBLY__ */
543
544#endif /* _ASM_PGTABLE_H */
545