1/*
2 * BK Id: %F% %I% %G% %U% %#%
3 */
4#ifdef __KERNEL__
5#ifndef _PPC_PGTABLE_H
6#define _PPC_PGTABLE_H
7
8#include <linux/config.h>
9
10#ifndef __ASSEMBLY__
11#include <linux/sched.h>
12#include <linux/threads.h>
13#include <asm/processor.h>		/* For TASK_SIZE */
14#include <asm/mmu.h>
15#include <asm/page.h>
16
17#if defined(CONFIG_4xx)
18extern void local_flush_tlb_all(void);
19extern void local_flush_tlb_mm(struct mm_struct *mm);
20extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
21extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
22				  unsigned long end);
23#define update_mmu_cache(vma, addr, pte)	do { } while (0)
24
25#elif defined(CONFIG_8xx)
26#define __tlbia()	asm volatile ("tlbia" : : )
27
28static inline void local_flush_tlb_all(void)
29	{ __tlbia(); }
30static inline void local_flush_tlb_mm(struct mm_struct *mm)
31	{ __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33				unsigned long vmaddr)
34	{ __tlbia(); }
35static inline void local_flush_tlb_range(struct mm_struct *mm,
36				unsigned long start, unsigned long end)
37	{ __tlbia(); }
38#define update_mmu_cache(vma, addr, pte)	do { } while (0)
39
40#else	/* 6xx, 7xx, 7xxx cpus */
41struct mm_struct;
42struct vm_area_struct;
43extern void local_flush_tlb_all(void);
44extern void local_flush_tlb_mm(struct mm_struct *mm);
45extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
46extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
47			    unsigned long end);
48
49/*
50 * This gets called at the end of handling a page fault, when
51 * the kernel has put a new PTE into the page table for the process.
52 * We use it to put a corresponding HPTE into the hash table
53 * ahead of time, instead of waiting for the inevitable extra
54 * hash-table miss exception.
55 */
56extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
57#endif
58
59#define flush_tlb_all local_flush_tlb_all
60#define flush_tlb_mm local_flush_tlb_mm
61#define flush_tlb_page local_flush_tlb_page
62#define flush_tlb_range local_flush_tlb_range
63
64/*
65 * This is called in munmap when we have freed up some page-table
66 * pages.  We don't need to do anything here, there's nothing special
67 * about our page-table pages.  -- paulus
68 */
69static inline void flush_tlb_pgtables(struct mm_struct *mm,
70				      unsigned long start, unsigned long end)
71{
72}
73
74/*
75 * No cache flushing is required when address mappings are
76 * changed, because the caches on PowerPCs are physically
77 * addressed.  -- paulus
78 * Also, when SMP we use the coherency (M) bit of the
79 * BATs and PTEs.  -- Cort
80 */
81#define flush_cache_all()		do { } while (0)
82#define flush_cache_mm(mm)		do { } while (0)
83#define flush_cache_range(mm, a, b)	do { } while (0)
84#define flush_cache_page(vma, p)	do { } while (0)
85#define flush_page_to_ram(page)		do { } while (0)
86
87extern void flush_icache_user_range(struct vm_area_struct *vma,
88		struct page *page, unsigned long addr, int len);
89extern void flush_icache_range(unsigned long, unsigned long);
90extern void __flush_dcache_icache(void *page_va);
91extern void flush_dcache_page(struct page *page);
92extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
93
94extern unsigned long va_to_phys(unsigned long address);
95extern pte_t *va_to_pte(unsigned long address);
96extern unsigned long ioremap_bot, ioremap_base;
97#endif /* __ASSEMBLY__ */
98
99/*
100 * The PowerPC MMU uses a hash table containing PTEs, together with
101 * a set of 16 segment registers (on 32-bit implementations), to define
102 * the virtual to physical address mapping.
103 *
104 * We use the hash table as an extended TLB, i.e. a cache of currently
105 * active mappings.  We maintain a two-level page table tree, much
106 * like that used by the i386, for the sake of the Linux memory
107 * management code.  Low-level assembler code in hashtable.S
108 * (procedure hash_page) is responsible for extracting ptes from the
109 * tree and putting them into the hash table when necessary, and
110 * updating the accessed and modified bits in the page table tree.
111 */
112
113/*
114 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
115 * We also use the two level tables, but we can put the real bits in them
116 * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
117 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
118 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
119 * based upon user/super access.  The TLB does not have accessed nor write
120 * protect.  We assume that if the TLB get loaded with an entry it is
121 * accessed, and overload the changed bit for write protect.  We use
122 * two bits in the software pte that are supposed to be set to zero in
123 * the TLB entry (24 and 25) for these indicators.  Although the level 1
124 * descriptor contains the guarded and writethrough/copyback bits, we can
125 * set these at the page level since they get copied from the Mx_TWC
126 * register when the TLB entry is loaded.  We will use bit 27 for guard, since
127 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
128 * These will get masked from the level 2 descriptor at TLB load time, and
129 * copied to the MD_TWC before it gets loaded.
130 */
131
132/*
133 * At present, all PowerPC 400-class processors share a similar TLB
134 * architecture. The instruction and data sides share a unified,
135 * 64-entry, fully-associative TLB which is maintained totally under
136 * software control. In addition, the instruction side has a
137 * hardware-managed, 4-entry, fully-associative TLB which serves as a
138 * first level to the shared TLB. These two TLBs are known as the UTLB
139 * and ITLB, respectively (see "mmu.h" for definitions).
140 */
141
142/* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
143#define PMD_SHIFT	22
144#define PMD_SIZE	(1UL << PMD_SHIFT)
145#define PMD_MASK	(~(PMD_SIZE-1))
146
147/* PGDIR_SHIFT determines what a third-level page table entry can map */
148#define PGDIR_SHIFT	22
149#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
150#define PGDIR_MASK	(~(PGDIR_SIZE-1))
151
152/*
153 * entries per page directory level: our page-table tree is two-level, so
154 * we don't really have any PMD directory.
155 */
156#define PTRS_PER_PTE	1024
157#define PTRS_PER_PMD	1
158#define PTRS_PER_PGD	1024
159#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
160#define FIRST_USER_PGD_NR	0
161
162#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
163#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
164
165#define pte_ERROR(e) \
166	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
167#define pmd_ERROR(e) \
168	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
169#define pgd_ERROR(e) \
170	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
171
172/*
173 * Just any arbitrary offset to the start of the vmalloc VM area: the
174 * current 64MB value just means that there will be a 64MB "hole" after the
175 * physical memory until the kernel virtual memory starts.  That means that
176 * any out-of-bounds memory accesses will hopefully be caught.
177 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
178 * area for the same reason. ;)
179 *
180 * We no longer map larger than phys RAM with the BATs so we don't have
181 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
182 * about clashes between our early calls to ioremap() that start growing down
183 * from ioremap_base being run into the VM area allocations (growing upwards
184 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
185 * we actually run into our mappings setup in the early boot with the VM
186 * system.  This really does become a problem for machines with good amounts
187 * of RAM.  -- Cort
188 */
189#define VMALLOC_OFFSET (0x1000000) /* 16M */
190#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
191#define VMALLOC_VMADDR(x) ((unsigned long)(x))
192#define VMALLOC_END	ioremap_bot
193
194/*
195 * Bits in a linux-style PTE.  These match the bits in the
196 * (hardware-defined) PowerPC PTE as closely as possible.
197 */
198
199#if defined(CONFIG_4xx)
200/* Definitions for 4xx embedded chips. */
201#define	_PAGE_GUARDED	0x001	/* G: page is guarded from prefetch */
202#define	_PAGE_COHERENT	0x002	/* M: enforece memory coherence */
203#define	_PAGE_NO_CACHE	0x004	/* I: caching is inhibited */
204#define	_PAGE_WRITETHRU	0x008	/* W: caching is write-through */
205#define	_PAGE_USER	0x010	/* matches one of the zone permission bits */
206#define _PAGE_EXEC	0x020	/* software: i-cache coherency required */
207#define	_PAGE_PRESENT	0x040	/* software: PTE contains a translation */
208#define _PAGE_DIRTY	0x100	/* C: page changed */
209#define	_PAGE_RW	0x200	/* Writes permitted */
210#define _PAGE_ACCESSED	0x400	/* R: page referenced */
211
212#elif defined(CONFIG_8xx)
213/* Definitions for 8xx embedded chips. */
214#define _PAGE_PRESENT	0x0001	/* Page is valid */
215#define _PAGE_NO_CACHE	0x0002	/* I: cache inhibit */
216#define _PAGE_SHARED	0x0004	/* No ASID (context) compare */
217
218/* These five software bits must be masked out when the entry is loaded
219 * into the TLB.
220 */
221#define _PAGE_EXEC	0x0008	/* software: i-cache coherency required */
222#define _PAGE_GUARDED	0x0010	/* software: guarded access */
223#define _PAGE_WRITETHRU 0x0020	/* software: use writethrough cache */
224#define _PAGE_RW	0x0040	/* software: user write access allowed */
225#define _PAGE_ACCESSED	0x0080	/* software: page referenced */
226
227#define _PAGE_HWWRITE	0x0100	/* h/w write enable: never set in Linux PTE */
228#define _PAGE_DIRTY	0x0200	/* software: page changed */
229#define _PAGE_USER	0x0800	/* One of the PP bits, the other is USER&~RW */
230
231#else /* CONFIG_6xx */
232/* Definitions for 60x, 740/750, etc. */
233#define _PAGE_PRESENT	0x001	/* software: pte contains a translation */
234#define _PAGE_HASHPTE	0x002	/* hash_page has made an HPTE for this pte */
235#define _PAGE_USER	0x004	/* usermode access allowed */
236#define _PAGE_GUARDED	0x008	/* G: prohibit speculative access */
237#define _PAGE_COHERENT	0x010	/* M: enforce memory coherence (SMP systems) */
238#define _PAGE_NO_CACHE	0x020	/* I: cache inhibit */
239#define _PAGE_WRITETHRU	0x040	/* W: cache write-through */
240#define _PAGE_DIRTY	0x080	/* C: page changed */
241#define _PAGE_ACCESSED	0x100	/* R: page referenced */
242#define _PAGE_EXEC	0x200	/* software: i-cache coherency required */
243#define _PAGE_RW	0x400	/* software: user write access allowed */
244#endif
245
246/* The non-standard PowerPC MMUs, which includes the 4xx and 8xx (and
247 * mabe 603e) have TLB miss handlers that unconditionally set the
248 * _PAGE_ACCESSED flag as a performance optimization.  This causes
249 * problems for the page_none() macro, just like the HASHPTE flag does
250 * for the standard PowerPC MMUs.  Depending upon the MMU configuration,
251 * either HASHPTE or ACCESSED will have to be masked to give us a
252 * proper pte_none() condition.
253 */
254#ifndef _PAGE_HASHPTE
255#define _PAGE_HASHPTE	0
256#define _PTE_NONE_MASK _PAGE_ACCESSED
257#else
258#define _PTE_NONE_MASK _PAGE_HASHPTE
259#endif
260#ifndef _PAGE_SHARED
261#define _PAGE_SHARED	0
262#endif
263#ifndef _PAGE_HWWRITE
264#define _PAGE_HWWRITE	0
265#endif
266
267#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
268
269/*
270 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
271 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
272 * to have it in the Linux PTE, and in fact the bit could be reused for
273 * another purpose.  -- paulus.
274 */
275#define _PAGE_BASE	_PAGE_PRESENT | _PAGE_ACCESSED
276#define _PAGE_WRENABLE	_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
277
278#define _PAGE_KERNEL	_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED
279#define _PAGE_IO	_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
280
281#define PAGE_NONE	__pgprot(_PAGE_BASE)
282#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
283#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
284#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
285#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
286#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
287#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
288
289#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
290#define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_SHARED)
291#define PAGE_KERNEL_CI	__pgprot(_PAGE_IO)
292
293/*
294 * The PowerPC can only do execute protection on a segment (256MB) basis,
295 * not on a page basis.  So we consider execute permission the same as read.
296 * Also, write permissions imply read permissions.
297 * This is the closest we can get..
298 */
299#define __P000	PAGE_NONE
300#define __P001	PAGE_READONLY_X
301#define __P010	PAGE_COPY
302#define __P011	PAGE_COPY_X
303#define __P100	PAGE_READONLY
304#define __P101	PAGE_READONLY_X
305#define __P110	PAGE_COPY
306#define __P111	PAGE_COPY_X
307
308#define __S000	PAGE_NONE
309#define __S001	PAGE_READONLY_X
310#define __S010	PAGE_SHARED
311#define __S011	PAGE_SHARED_X
312#define __S100	PAGE_READONLY
313#define __S101	PAGE_READONLY_X
314#define __S110	PAGE_SHARED
315#define __S111	PAGE_SHARED_X
316
317#ifndef __ASSEMBLY__
318/*
319 * ZERO_PAGE is a global shared page that is always zero: used
320 * for zero-mapped memory areas etc..
321 */
322extern unsigned long empty_zero_page[1024];
323#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
324
325#endif /* __ASSEMBLY__ */
326
327#define pte_none(pte)		((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
328#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
329#define pte_clear(ptep)		do { set_pte((ptep), __pte(0)); } while (0)
330
331#define pmd_none(pmd)		(!pmd_val(pmd))
332#define	pmd_bad(pmd)		((pmd_val(pmd) & ~PAGE_MASK) != 0)
333#define	pmd_present(pmd)	((pmd_val(pmd) & PAGE_MASK) != 0)
334#define	pmd_clear(pmdp)		do { pmd_val(*(pmdp)) = 0; } while (0)
335
336#define pte_page(x)		(mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
337
338#ifndef __ASSEMBLY__
339/*
340 * The "pgd_xxx()" functions here are trivial for a folded two-level
341 * setup: the pgd is never bad, and a pmd always exists (as it's folded
342 * into the pgd entry)
343 */
344static inline int pgd_none(pgd_t pgd)		{ return 0; }
345static inline int pgd_bad(pgd_t pgd)		{ return 0; }
346static inline int pgd_present(pgd_t pgd)	{ return 1; }
347#define pgd_clear(xp)				do { } while (0)
348
349#define pgd_page(pgd) \
350	((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
351
352/*
353 * The following only work if pte_present() is true.
354 * Undefined behaviour if not..
355 */
356static inline int pte_read(pte_t pte)		{ return pte_val(pte) & _PAGE_USER; }
357static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }
358static inline int pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
359static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
360static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
361
362static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }
363static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }
364
365static inline pte_t pte_rdprotect(pte_t pte) {
366	pte_val(pte) &= ~_PAGE_USER; return pte; }
367static inline pte_t pte_wrprotect(pte_t pte) {
368	pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
369static inline pte_t pte_exprotect(pte_t pte) {
370	pte_val(pte) &= ~_PAGE_EXEC; return pte; }
371static inline pte_t pte_mkclean(pte_t pte) {
372	pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
373static inline pte_t pte_mkold(pte_t pte) {
374	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
375
376static inline pte_t pte_mkread(pte_t pte) {
377	pte_val(pte) |= _PAGE_USER; return pte; }
378static inline pte_t pte_mkexec(pte_t pte) {
379	pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
380static inline pte_t pte_mkwrite(pte_t pte) {
381	pte_val(pte) |= _PAGE_RW; return pte; }
382static inline pte_t pte_mkdirty(pte_t pte) {
383	pte_val(pte) |= _PAGE_DIRTY; return pte; }
384static inline pte_t pte_mkyoung(pte_t pte) {
385	pte_val(pte) |= _PAGE_ACCESSED; return pte; }
386
387/*
388 * Conversion functions: convert a page and protection to a page entry,
389 * and a page entry and page directory to the page they refer to.
390 */
391
392static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
393{
394	pte_t pte;
395	pte_val(pte) = physpage | pgprot_val(pgprot);
396	return pte;
397}
398
399#define mk_pte(page,pgprot) \
400({									\
401	pte_t pte;							\
402	pte_val(pte) = ((page - mem_map) << PAGE_SHIFT) | pgprot_val(pgprot); \
403	pte;							\
404})
405
406static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
407{
408	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
409	return pte;
410}
411
412/*
413 * Atomic PTE updates.
414 *
415 * pte_update clears and sets bit atomically, and returns
416 * the old pte value.
417 */
418static inline unsigned long pte_update(pte_t *p, unsigned long clr,
419				       unsigned long set)
420{
421	unsigned long old, tmp;
422
423	__asm__ __volatile__("\
4241:	lwarx	%0,0,%3\n\
425	andc	%1,%0,%4\n\
426	or	%1,%1,%5\n\
427	stwcx.	%1,0,%3\n\
428	bne-	1b"
429	: "=&r" (old), "=&r" (tmp), "=m" (*p)
430	: "r" (p), "r" (clr), "r" (set), "m" (*p)
431	: "cc" );
432	return old;
433}
434
435/*
436 * set_pte stores a linux PTE into the linux page table.
437 * On machines which use an MMU hash table we avoid changing the
438 * _PAGE_HASHPTE bit.
439 */
440static inline void set_pte(pte_t *ptep, pte_t pte)
441{
442#if _PAGE_HASHPTE != 0
443	pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
444#else
445	*ptep = pte;
446#endif
447}
448
449static inline int ptep_test_and_clear_young(pte_t *ptep)
450{
451	return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
452}
453
454static inline int ptep_test_and_clear_dirty(pte_t *ptep)
455{
456	return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
457}
458
459static inline pte_t ptep_get_and_clear(pte_t *ptep)
460{
461	return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
462}
463
464static inline void ptep_set_wrprotect(pte_t *ptep)
465{
466	pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
467}
468
469static inline void ptep_mkdirty(pte_t *ptep)
470{
471	pte_update(ptep, 0, _PAGE_DIRTY);
472}
473
474#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
475
476#define pmd_page(pmd)	(pmd_val(pmd))
477
478/* to find an entry in a kernel page-table-directory */
479#define pgd_offset_k(address) pgd_offset(&init_mm, address)
480
481/* to find an entry in a page-table-directory */
482#define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
483#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
484
485/* Find an entry in the second-level page table.. */
486static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
487{
488	return (pmd_t *) dir;
489}
490
491/* Find an entry in the third-level page table.. */
492static inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
493{
494	return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
495}
496
497extern pgd_t swapper_pg_dir[1024];
498extern void paging_init(void);
499
500/*
501 * When flushing the tlb entry for a page, we also need to flush the hash
502 * table entry.  flush_hash_page is assembler (for speed) in hashtable.S.
503 */
504extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
505
506/* Add an HPTE to the hash table */
507extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
508
509/*
510 * Encode and decode a swap entry.
511 * Note that the bits we use in a PTE for representing a swap entry
512 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
513 * (if used).  -- paulus
514 */
515#define SWP_TYPE(entry)			((entry).val & 0x3f)
516#define SWP_OFFSET(entry)		((entry).val >> 6)
517#define SWP_ENTRY(type, offset)		((swp_entry_t) { (type) | ((offset) << 6) })
518#define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 2 })
519#define swp_entry_to_pte(x)		((pte_t) { (x).val << 2 })
520
521/* CONFIG_APUS */
522/* For virtual address to physical address conversion */
523extern void cache_clear(__u32 addr, int length);
524extern void cache_push(__u32 addr, int length);
525extern int mm_end_of_chunk (unsigned long addr, int len);
526extern unsigned long iopa(unsigned long addr);
527extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
528
529/* Values for nocacheflag and cmode */
530/* These are not used by the APUS kernel_map, but prevents
531   compilation errors. */
532#define	IOMAP_FULL_CACHING	0
533#define	IOMAP_NOCACHE_SER	1
534#define	IOMAP_NOCACHE_NONSER	2
535#define	IOMAP_NO_COPYBACK	3
536
537/*
538 * Map some physical address range into the kernel address space.
539 */
540extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
541				int nocacheflag, unsigned long *memavailp );
542
543/*
544 * Set cache mode of (kernel space) address range.
545 */
546extern void kernel_set_cachemode (unsigned long address, unsigned long size,
547                                 unsigned int cmode);
548
549/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
550#define kern_addr_valid(addr)	(1)
551
552#define io_remap_page_range remap_page_range
553
554/*
555 * No page table caches to initialise
556 */
557#define pgtable_cache_init()	do { } while (0)
558
559#endif /* __ASSEMBLY__ */
560#endif /* _PPC_PGTABLE_H */
561#endif /* __KERNEL__ */
562