• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/powerpc/include/asm/
1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
4#include <asm-generic/pgtable-nopmd.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
9#include <asm/io.h>			/* For sub-arch specific PPC_PIN_SIZE */
10
11extern unsigned long va_to_phys(unsigned long address);
12extern pte_t *va_to_pte(unsigned long address);
13extern unsigned long ioremap_bot;
14
15#ifdef CONFIG_44x
16extern int icache_44x_need_flush;
17#endif
18
19#endif /* __ASSEMBLY__ */
20
21/*
22 * The normal case is that PTEs are 32-bits and we have a 1-page
23 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
24 *
25 * For any >32-bit physical address platform, we can use the following
26 * two level page table layout where the pgdir is 8KB and the MS 13 bits
27 * are an index to the second level table.  The combined pgdir/pmd first
28 * level has 2048 entries and the second level has 512 64-bit PTE entries.
29 * -Matt
30 */
31/* PGDIR_SHIFT determines what a top-level page table entry can map */
32#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
33#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
34#define PGDIR_MASK	(~(PGDIR_SIZE-1))
35
36/*
37 * entries per page directory level: our page-table tree is two-level, so
38 * we don't really have any PMD directory.
39 */
40#ifndef __ASSEMBLY__
41#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_SHIFT)
42#define PGD_TABLE_SIZE	(sizeof(pgd_t) << (32 - PGDIR_SHIFT))
43#endif	/* __ASSEMBLY__ */
44
45#define PTRS_PER_PTE	(1 << PTE_SHIFT)
46#define PTRS_PER_PMD	1
47#define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
48
49#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
50#define FIRST_USER_ADDRESS	0
51
52#define pte_ERROR(e) \
53	printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
54		(unsigned long long)pte_val(e))
55#define pgd_ERROR(e) \
56	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57
58/*
59 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
60 * value (for now) on others, from where we can start layout kernel
61 * virtual space that goes below PKMAP and FIXMAP
62 */
63#ifdef CONFIG_HIGHMEM
64#define KVIRT_TOP	PKMAP_BASE
65#else
66#define KVIRT_TOP	(0xfe000000UL)	/* for now, could be FIXMAP_BASE ? */
67#endif
68
69/*
70 * ioremap_bot starts at that address. Early ioremaps move down from there,
71 * until mem_init() at which point this becomes the top of the vmalloc
72 * and ioremap space
73 */
74#ifdef CONFIG_NOT_COHERENT_CACHE
75#define IOREMAP_TOP	((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
76#else
77#define IOREMAP_TOP	KVIRT_TOP
78#endif
79
80/*
81 * Just any arbitrary offset to the start of the vmalloc VM area: the
82 * current 16MB value just means that there will be a 64MB "hole" after the
83 * physical memory until the kernel virtual memory starts.  That means that
84 * any out-of-bounds memory accesses will hopefully be caught.
85 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
86 * area for the same reason. ;)
87 *
88 * We no longer map larger than phys RAM with the BATs so we don't have
89 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
90 * about clashes between our early calls to ioremap() that start growing down
91 * from ioremap_base being run into the VM area allocations (growing upwards
92 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
93 * we actually run into our mappings setup in the early boot with the VM
94 * system.  This really does become a problem for machines with good amounts
95 * of RAM.  -- Cort
96 */
97#define VMALLOC_OFFSET (0x1000000) /* 16M */
98#ifdef PPC_PIN_SIZE
99#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100#else
101#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
102#endif
103#define VMALLOC_END	ioremap_bot
104
105/*
106 * Bits in a linux-style PTE.  These match the bits in the
107 * (hardware-defined) PowerPC PTE as closely as possible.
108 */
109
110#if defined(CONFIG_40x)
111#include <asm/pte-40x.h>
112#elif defined(CONFIG_44x)
113#include <asm/pte-44x.h>
114#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
115#include <asm/pte-book3e.h>
116#elif defined(CONFIG_FSL_BOOKE)
117#include <asm/pte-fsl-booke.h>
118#elif defined(CONFIG_8xx)
119#include <asm/pte-8xx.h>
120#else /* CONFIG_6xx */
121#include <asm/pte-hash32.h>
122#endif
123
124/* And here we include common definitions */
125#include <asm/pte-common.h>
126
127#ifndef __ASSEMBLY__
128
129#define pte_clear(mm, addr, ptep) \
130	do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
131
132#define pmd_none(pmd)		(!pmd_val(pmd))
133#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
134#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
135#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
136
137/*
138 * When flushing the tlb entry for a page, we also need to flush the hash
139 * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
140 */
141extern int flush_hash_pages(unsigned context, unsigned long va,
142			    unsigned long pmdval, int count);
143
144/* Add an HPTE to the hash table */
145extern void add_hash_page(unsigned context, unsigned long va,
146			  unsigned long pmdval);
147
148/* Flush an entry from the TLB/hash table */
149extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
150			     unsigned long address);
151
152/*
153 * PTE updates. This function is called whenever an existing
154 * valid PTE is updated. This does -not- include set_pte_at()
155 * which nowadays only sets a new PTE.
156 *
157 * Depending on the type of MMU, we may need to use atomic updates
158 * and the PTE may be either 32 or 64 bit wide. In the later case,
159 * when using atomic updates, only the low part of the PTE is
160 * accessed atomically.
161 *
162 * In addition, on 44x, we also maintain a global flag indicating
163 * that an executable user mapping was modified, which is needed
164 * to properly flush the virtually tagged instruction cache of
165 * those implementations.
166 */
167#ifndef CONFIG_PTE_64BIT
168static inline unsigned long pte_update(pte_t *p,
169				       unsigned long clr,
170				       unsigned long set)
171{
172#ifdef PTE_ATOMIC_UPDATES
173	unsigned long old, tmp;
174
175	__asm__ __volatile__("\
1761:	lwarx	%0,0,%3\n\
177	andc	%1,%0,%4\n\
178	or	%1,%1,%5\n"
179	PPC405_ERR77(0,%3)
180"	stwcx.	%1,0,%3\n\
181	bne-	1b"
182	: "=&r" (old), "=&r" (tmp), "=m" (*p)
183	: "r" (p), "r" (clr), "r" (set), "m" (*p)
184	: "cc" );
185#else /* PTE_ATOMIC_UPDATES */
186	unsigned long old = pte_val(*p);
187	*p = __pte((old & ~clr) | set);
188#endif /* !PTE_ATOMIC_UPDATES */
189
190#ifdef CONFIG_44x
191	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
192		icache_44x_need_flush = 1;
193#endif
194	return old;
195}
196#else /* CONFIG_PTE_64BIT */
197static inline unsigned long long pte_update(pte_t *p,
198					    unsigned long clr,
199					    unsigned long set)
200{
201#ifdef PTE_ATOMIC_UPDATES
202	unsigned long long old;
203	unsigned long tmp;
204
205	__asm__ __volatile__("\
2061:	lwarx	%L0,0,%4\n\
207	lwzx	%0,0,%3\n\
208	andc	%1,%L0,%5\n\
209	or	%1,%1,%6\n"
210	PPC405_ERR77(0,%3)
211"	stwcx.	%1,0,%4\n\
212	bne-	1b"
213	: "=&r" (old), "=&r" (tmp), "=m" (*p)
214	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
215	: "cc" );
216#else /* PTE_ATOMIC_UPDATES */
217	unsigned long long old = pte_val(*p);
218	*p = __pte((old & ~(unsigned long long)clr) | set);
219#endif /* !PTE_ATOMIC_UPDATES */
220
221#ifdef CONFIG_44x
222	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
223		icache_44x_need_flush = 1;
224#endif
225	return old;
226}
227#endif /* CONFIG_PTE_64BIT */
228
229/*
230 * 2.6 calls this without flushing the TLB entry; this is wrong
231 * for our hash-based implementation, we fix that up here.
232 */
233#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
234static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
235{
236	unsigned long old;
237	old = pte_update(ptep, _PAGE_ACCESSED, 0);
238#if _PAGE_HASHPTE != 0
239	if (old & _PAGE_HASHPTE) {
240		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
241		flush_hash_pages(context, addr, ptephys, 1);
242	}
243#endif
244	return (old & _PAGE_ACCESSED) != 0;
245}
246#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
247	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
248
249#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
250static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
251				       pte_t *ptep)
252{
253	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
254}
255
256#define __HAVE_ARCH_PTEP_SET_WRPROTECT
257static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
258				      pte_t *ptep)
259{
260	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
261}
262static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
263					   unsigned long addr, pte_t *ptep)
264{
265	ptep_set_wrprotect(mm, addr, ptep);
266}
267
268
269static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
270{
271	unsigned long bits = pte_val(entry) &
272		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
273	pte_update(ptep, 0, bits);
274}
275
276#define __HAVE_ARCH_PTE_SAME
277#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
278
279/*
280 * Note that on Book E processors, the pmd contains the kernel virtual
281 * (lowmem) address of the pte page.  The physical address is less useful
282 * because everything runs with translation enabled (even the TLB miss
283 * handler).  On everything else the pmd contains the physical address
284 * of the pte page.  -- paulus
285 */
286#ifndef CONFIG_BOOKE
287#define pmd_page_vaddr(pmd)	\
288	((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
289#define pmd_page(pmd)		\
290	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
291#else
292#define pmd_page_vaddr(pmd)	\
293	((unsigned long) (pmd_val(pmd) & PAGE_MASK))
294#define pmd_page(pmd)		\
295	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
296#endif
297
298/* to find an entry in a kernel page-table-directory */
299#define pgd_offset_k(address) pgd_offset(&init_mm, address)
300
301/* to find an entry in a page-table-directory */
302#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
303#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
304
305/* Find an entry in the third-level page table.. */
306#define pte_index(address)		\
307	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
308#define pte_offset_kernel(dir, addr)	\
309	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
310#define pte_offset_map(dir, addr)		\
311	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
312#define pte_offset_map_nested(dir, addr)	\
313	((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
314
315#define pte_unmap(pte)		kunmap_atomic(pte, KM_PTE0)
316#define pte_unmap_nested(pte)	kunmap_atomic(pte, KM_PTE1)
317
318/*
319 * Encode and decode a swap entry.
320 * Note that the bits we use in a PTE for representing a swap entry
321 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
322 *_PAGE_HASHPTE bit (if used).  -- paulus
323 */
324#define __swp_type(entry)		((entry).val & 0x1f)
325#define __swp_offset(entry)		((entry).val >> 5)
326#define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
327#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
328#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
329
330/* Encode and decode a nonlinear file mapping entry */
331#define PTE_FILE_MAX_BITS	29
332#define pte_to_pgoff(pte)	(pte_val(pte) >> 3)
333#define pgoff_to_pte(off)	((pte_t) { ((off) << 3) | _PAGE_FILE })
334
335/*
336 * No page table caches to initialise
337 */
338#define pgtable_cache_init()	do { } while (0)
339
340extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
341		      pmd_t **pmdp);
342
343#endif /* !__ASSEMBLY__ */
344
345#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
346