1/*
2 * BK Id: SCCS/s.page.h 1.8 08/19/01 20:06:47 paulus
3 */
4#ifndef _PPC_PAGE_H
5#define _PPC_PAGE_H
6
7/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT	12
9#define PAGE_SIZE	(1UL << PAGE_SHIFT)
10#define PAGE_MASK	(~(PAGE_SIZE-1))
11
12#ifdef __KERNEL__
13#include <linux/config.h>
14
15/* Be sure to change arch/ppc/Makefile to match */
16#define PAGE_OFFSET	0xc0000000
17#define KERNELBASE	PAGE_OFFSET
18
19#ifndef __ASSEMBLY__
20#include <asm/system.h> /* for xmon definition */
21
22#ifdef CONFIG_XMON
23#define BUG() do { \
24	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
25	xmon(0); \
26} while (0)
27#else
28#define BUG() do { \
29	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
30	__asm__ __volatile__(".long 0x0"); \
31} while (0)
32#endif
33#define PAGE_BUG(page) do { BUG(); } while (0)
34
35#define STRICT_MM_TYPECHECKS
36
37#ifdef STRICT_MM_TYPECHECKS
38/*
39 * These are used to make use of C type-checking..
40 */
41typedef struct { unsigned long pte; } pte_t;
42typedef struct { unsigned long pmd; } pmd_t;
43typedef struct { unsigned long pgd; } pgd_t;
44typedef struct { unsigned long pgprot; } pgprot_t;
45
46#define pte_val(x)	((x).pte)
47#define pmd_val(x)	((x).pmd)
48#define pgd_val(x)	((x).pgd)
49#define pgprot_val(x)	((x).pgprot)
50
51#define __pte(x)	((pte_t) { (x) } )
52#define __pmd(x)	((pmd_t) { (x) } )
53#define __pgd(x)	((pgd_t) { (x) } )
54#define __pgprot(x)	((pgprot_t) { (x) } )
55
56#else
57/*
58 * .. while these make it easier on the compiler
59 */
60typedef unsigned long pte_t;
61typedef unsigned long pmd_t;
62typedef unsigned long pgd_t;
63typedef unsigned long pgprot_t;
64
65#define pte_val(x)	(x)
66#define pmd_val(x)	(x)
67#define pgd_val(x)	(x)
68#define pgprot_val(x)	(x)
69
70#define __pte(x)	(x)
71#define __pmd(x)	(x)
72#define __pgd(x)	(x)
73#define __pgprot(x)	(x)
74
75#endif
76
77
78/* align addr on a size boundry - adjust address up if needed -- Cort */
79#define _ALIGN(addr,size)	(((addr)+size-1)&(~(size-1)))
80
81/* to align the pointer to the (next) page boundary */
82#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
83
84extern void clear_page(void *page);
85extern void copy_page(void *to, void *from);
86extern void clear_user_page(void *page, unsigned long vaddr);
87extern void copy_user_page(void *to, void *from, unsigned long vaddr);
88
89/* map phys->virtual and virtual->phys for RAM pages */
90static inline unsigned long ___pa(unsigned long v)
91{
92	unsigned long p;
93	asm volatile ("1: addis %0, %1, %2;"
94		      ".section \".vtop_fixup\",\"aw\";"
95		      ".align  1;"
96		      ".long   1b;"
97		      ".previous;"
98		      : "=r" (p)
99		      : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
100
101	return p;
102}
103static inline void* ___va(unsigned long p)
104{
105	unsigned long v;
106	asm volatile ("1: addis %0, %1, %2;"
107		      ".section \".ptov_fixup\",\"aw\";"
108		      ".align  1;"
109		      ".long   1b;"
110		      ".previous;"
111		      : "=r" (v)
112		      : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
113
114	return (void*) v;
115}
116#define __pa(x) ___pa ((unsigned long)(x))
117#define __va(x) ___va ((unsigned long)(x))
118
119#define MAP_PAGE_RESERVED	(1<<15)
120#define virt_to_page(kaddr)	(mem_map + (((unsigned long)kaddr-PAGE_OFFSET) >> PAGE_SHIFT))
121#define VALID_PAGE(page)	((page - mem_map) < max_mapnr)
122
123extern unsigned long get_zero_page_fast(void);
124
125/* Pure 2^n version of get_order */
126extern __inline__ int get_order(unsigned long size)
127{
128	int order;
129
130	size = (size-1) >> (PAGE_SHIFT-1);
131	order = -1;
132	do {
133		size >>= 1;
134		order++;
135	} while (size);
136	return order;
137}
138
139#endif /* __ASSEMBLY__ */
140
141#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
142				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
143
144#endif /* __KERNEL__ */
145#endif /* _PPC_PAGE_H */
146