1#ifndef _ASM_PGALLOC_H
2#define _ASM_PGALLOC_H
3
4/* The usual comment is "Caches aren't brain-dead on the <architecture>".
5 * Unfortunately, that doesn't apply to PA-RISC. */
6
7#include <asm/processor.h>
8#include <asm/fixmap.h>
9#include <linux/threads.h>
10
11#include <asm/pgtable.h>
12#include <asm/cache.h>
13
14#define flush_kernel_dcache_range(start,size) \
15	flush_kernel_dcache_range_asm((start), (start)+(size));
16
17static inline void
18flush_page_to_ram(struct page *page)
19{
20}
21
22extern void flush_cache_all_local(void);
23
24#ifdef CONFIG_SMP
25static inline void flush_cache_all(void)
26{
27	smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
28	flush_cache_all_local();
29}
30#else
31#define flush_cache_all flush_cache_all_local
32#endif
33
34#ifdef CONFIG_SMP
35#define flush_cache_mm(mm) flush_cache_all()
36#else
37#define flush_cache_mm(mm) flush_cache_all_local()
38#endif
39
40/* The following value needs to be tuned and probably scaled with the
41 * cache size.
42 */
43
44#define FLUSH_THRESHOLD 0x80000
45
46static inline void
47flush_user_dcache_range(unsigned long start, unsigned long end)
48{
49#ifdef CONFIG_SMP
50	flush_user_dcache_range_asm(start,end);
51#else
52	if ((end - start) < FLUSH_THRESHOLD)
53		flush_user_dcache_range_asm(start,end);
54	else
55		flush_data_cache();
56#endif
57}
58
59static inline void
60flush_user_icache_range(unsigned long start, unsigned long end)
61{
62#ifdef CONFIG_SMP
63	flush_user_icache_range_asm(start,end);
64#else
65	if ((end - start) < FLUSH_THRESHOLD)
66		flush_user_icache_range_asm(start,end);
67	else
68		flush_instruction_cache();
69#endif
70}
71
72static inline void
73flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
74{
75	int sr3;
76
77	if (!mm->context) {
78		BUG();
79		return;
80	}
81
82	sr3 = mfsp(3);
83	if (mm->context == sr3) {
84		flush_user_dcache_range(start,end);
85		flush_user_icache_range(start,end);
86	} else {
87		flush_cache_all();
88	}
89}
90
91static inline void
92flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
93{
94	int sr3;
95
96	if (!vma->vm_mm->context) {
97		BUG();
98		return;
99	}
100
101	sr3 = mfsp(3);
102	if (vma->vm_mm->context == sr3) {
103		flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
104		if (vma->vm_flags & VM_EXEC)
105			flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
106	} else {
107		if (vma->vm_flags & VM_EXEC)
108			flush_cache_all();
109		else
110			flush_data_cache();
111	}
112}
113
114static inline void flush_dcache_page(struct page *page)
115{
116	if (page->mapping && !page->mapping->i_mmap &&
117			!page->mapping->i_mmap_shared) {
118		set_bit(PG_dcache_dirty, &page->flags);
119	} else {
120		flush_kernel_dcache_page(page_address(page));
121	}
122}
123
124#define flush_icache_page(vma,page)	do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
125
126#define flush_icache_range(s,e)		do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
127
128/* TLB flushing routines.... */
129
130extern void flush_tlb_all(void);
131
132static inline void load_context(mm_context_t context)
133{
134	mtsp(context, 3);
135#if SPACEID_SHIFT == 0
136	mtctl(context << 1,8);
137#else
138	mtctl(context >> (SPACEID_SHIFT - 1),8);
139#endif
140}
141
142
143static inline void flush_tlb_mm(struct mm_struct *mm)
144{
145	if (mm == &init_mm) BUG(); /* Should never happen */
146
147#ifdef CONFIG_SMP
148	flush_tlb_all();
149#else
150	if (mm) {
151		if (mm->context != 0)
152			free_sid(mm->context);
153		mm->context = alloc_sid();
154		if (mm == current->active_mm)
155			load_context(mm->context);
156	}
157#endif
158}
159
160extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
161{
162}
163
164static inline void flush_tlb_page(struct vm_area_struct *vma,
165	unsigned long addr)
166{
167	/* For one page, it's not worth testing the split_tlb variable */
168
169	mtsp(vma->vm_mm->context,1);
170	pdtlb(addr);
171	pitlb(addr);
172}
173
174static inline void flush_tlb_range(struct mm_struct *mm,
175	unsigned long start, unsigned long end)
176{
177	unsigned long npages;
178
179	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
180	if (npages >= 512)
181		flush_tlb_all();
182	else {
183
184		mtsp(mm->context,1);
185		if (split_tlb) {
186			while (npages--) {
187				pdtlb(start);
188				pitlb(start);
189				start += PAGE_SIZE;
190			}
191		} else {
192			while (npages--) {
193				pdtlb(start);
194				start += PAGE_SIZE;
195			}
196		}
197	}
198}
199
200static inline pgd_t *pgd_alloc_one_fast (void)
201{
202	return NULL; /* not implemented */
203}
204
205static inline pgd_t *pgd_alloc (struct mm_struct *mm)
206{
207	/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
208	pgd_t *pgd = pgd_alloc_one_fast();
209	if (!pgd) {
210		pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
211		if (pgd)
212			clear_page(pgd);
213	}
214	return pgd;
215}
216
217static inline void pgd_free(pgd_t *pgd)
218{
219	free_page((unsigned long)pgd);
220}
221
222#ifdef __LP64__
223
224/* Three Level Page Table Support for pmd's */
225
226static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
227{
228	pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd);
229}
230
231static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
232{
233	return NULL; /* la la */
234}
235
236static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
237{
238	pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
239	if (pmd)
240		clear_page(pmd);
241	return pmd;
242}
243
244static inline void pmd_free(pmd_t *pmd)
245{
246	free_page((unsigned long)pmd);
247}
248
249#else
250
251/* Two Level Page Table Support for pmd's */
252
253/*
254 * allocating and freeing a pmd is trivial: the 1-entry pmd is
255 * inside the pgd, so has no extra memory associated with it.
256 */
257
258#define pmd_alloc_one_fast(mm, addr)	({ BUG(); ((pmd_t *)1); })
259#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
260#define pmd_free(x)			do { } while (0)
261#define pgd_populate(mm, pmd, pte)	BUG()
262
263#endif
264
265static inline void pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
266{
267	pmd_val(*pmd_entry) = _PAGE_TABLE + __pa((unsigned long)pte);
268}
269
270static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
271{
272	return NULL; /* la la */
273}
274
275static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
276{
277	pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
278	if (pte)
279		clear_page(pte);
280	return pte;
281}
282
283static inline void pte_free(pte_t *pte)
284{
285	free_page((unsigned long)pte);
286}
287
288extern int do_check_pgt_cache(int, int);
289
290#endif
291