1#ifndef _ALPHA_PGALLOC_H
2#define _ALPHA_PGALLOC_H
3
4#include <linux/config.h>
5
6#ifndef __EXTERN_INLINE
7#define __EXTERN_INLINE extern inline
8#define __MMU_EXTERN_INLINE
9#endif
10
11extern void __load_new_mm_context(struct mm_struct *);
12
13
14/* Caches aren't brain-dead on the Alpha. */
15#define flush_cache_all()			do { } while (0)
16#define flush_cache_mm(mm)			do { } while (0)
17#define flush_cache_range(mm, start, end)	do { } while (0)
18#define flush_cache_page(vma, vmaddr)		do { } while (0)
19#define flush_page_to_ram(page)			do { } while (0)
20#define flush_dcache_page(page)			do { } while (0)
21
22/* Note that the following two definitions are _highly_ dependent
23   on the contexts in which they are used in the kernel.  I personally
24   think it is criminal how loosely defined these macros are.  */
25
26/* We need to flush the kernel's icache after loading modules.  The
27   only other use of this macro is in load_aout_interp which is not
28   used on Alpha.
29
30   Note that this definition should *not* be used for userspace
31   icache flushing.  While functional, it is _way_ overkill.  The
32   icache is tagged with ASNs and it suffices to allocate a new ASN
33   for the process.  */
34#ifndef CONFIG_SMP
35#define flush_icache_range(start, end)		imb()
36#else
37#define flush_icache_range(start, end)		smp_imb()
38extern void smp_imb(void);
39#endif
40
41
42/*
43 * Use a few helper functions to hide the ugly broken ASN
44 * numbers on early Alphas (ev4 and ev45)
45 */
46
47__EXTERN_INLINE void
48ev4_flush_tlb_current(struct mm_struct *mm)
49{
50	__load_new_mm_context(mm);
51	tbiap();
52}
53
54__EXTERN_INLINE void
55ev5_flush_tlb_current(struct mm_struct *mm)
56{
57	__load_new_mm_context(mm);
58}
59
60static inline void
61flush_tlb_other(struct mm_struct *mm)
62{
63	long * mmc = &mm->context[smp_processor_id()];
64	/*
65	 * Check it's not zero first to avoid cacheline ping pong when
66	 * possible.
67	 */
68	if (*mmc)
69		*mmc = 0;
70}
71
72/* We need to flush the userspace icache after setting breakpoints in
73   ptrace.
74
75   Instead of indiscriminately using imb, take advantage of the fact
76   that icache entries are tagged with the ASN and load a new mm context.  */
77/* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
78
79#ifndef CONFIG_SMP
80static inline void
81flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
82			unsigned long addr, int len)
83{
84	if (vma->vm_flags & VM_EXEC) {
85		struct mm_struct *mm = vma->vm_mm;
86		if (current->active_mm == mm)
87			__load_new_mm_context(mm);
88		else
89			mm->context[smp_processor_id()] = 0;
90	}
91}
92#else
93extern void flush_icache_user_range(struct vm_area_struct *vma,
94		struct page *page, unsigned long addr, int len);
95#endif
96
97/* this is used only in do_no_page and do_swap_page */
98#define flush_icache_page(vma, page)	flush_icache_user_range((vma), (page), 0, 0)
99
100/*
101 * Flush just one page in the current TLB set.
102 * We need to be very careful about the icache here, there
103 * is no way to invalidate a specific icache page..
104 */
105
106__EXTERN_INLINE void
107ev4_flush_tlb_current_page(struct mm_struct * mm,
108			   struct vm_area_struct *vma,
109			   unsigned long addr)
110{
111	int tbi_flag = 2;
112	if (vma->vm_flags & VM_EXEC) {
113		__load_new_mm_context(mm);
114		tbi_flag = 3;
115	}
116	tbi(tbi_flag, addr);
117}
118
119__EXTERN_INLINE void
120ev5_flush_tlb_current_page(struct mm_struct * mm,
121			   struct vm_area_struct *vma,
122			   unsigned long addr)
123{
124	if (vma->vm_flags & VM_EXEC)
125		__load_new_mm_context(mm);
126	else
127		tbi(2, addr);
128}
129
130
131#ifdef CONFIG_ALPHA_GENERIC
132# define flush_tlb_current		alpha_mv.mv_flush_tlb_current
133# define flush_tlb_current_page		alpha_mv.mv_flush_tlb_current_page
134#else
135# ifdef CONFIG_ALPHA_EV4
136#  define flush_tlb_current		ev4_flush_tlb_current
137#  define flush_tlb_current_page	ev4_flush_tlb_current_page
138# else
139#  define flush_tlb_current		ev5_flush_tlb_current
140#  define flush_tlb_current_page	ev5_flush_tlb_current_page
141# endif
142#endif
143
144#ifdef __MMU_EXTERN_INLINE
145#undef __EXTERN_INLINE
146#undef __MMU_EXTERN_INLINE
147#endif
148
149/*
150 * Flush current user mapping.
151 */
152static inline void flush_tlb(void)
153{
154	flush_tlb_current(current->active_mm);
155}
156
157/*
158 * Flush a specified range of user mapping page tables
159 * from TLB.
160 * Although Alpha uses VPTE caches, this can be a nop, as Alpha does
161 * not have finegrained tlb flushing, so it will flush VPTE stuff
162 * during next flush_tlb_range.
163 */
164static inline void flush_tlb_pgtables(struct mm_struct *mm,
165	unsigned long start, unsigned long end)
166{
167}
168
169#ifndef CONFIG_SMP
170/*
171 * Flush everything (kernel mapping may also have
172 * changed due to vmalloc/vfree)
173 */
174static inline void flush_tlb_all(void)
175{
176	tbia();
177}
178
179/*
180 * Flush a specified user mapping
181 */
182static inline void flush_tlb_mm(struct mm_struct *mm)
183{
184	if (mm == current->active_mm)
185		flush_tlb_current(mm);
186	else
187		flush_tlb_other(mm);
188}
189
190/*
191 * Page-granular tlb flush.
192 *
193 * do a tbisd (type = 2) normally, and a tbis (type = 3)
194 * if it is an executable mapping.  We want to avoid the
195 * itlb flush, because that potentially also does a
196 * icache flush.
197 */
198static inline void flush_tlb_page(struct vm_area_struct *vma,
199	unsigned long addr)
200{
201	struct mm_struct * mm = vma->vm_mm;
202
203	if (mm == current->active_mm)
204		flush_tlb_current_page(mm, vma, addr);
205	else
206		flush_tlb_other(mm);
207}
208
209/*
210 * Flush a specified range of user mapping:  on the
211 * Alpha we flush the whole user tlb.
212 */
213static inline void flush_tlb_range(struct mm_struct *mm,
214	unsigned long start, unsigned long end)
215{
216	flush_tlb_mm(mm);
217}
218
219#else /* CONFIG_SMP */
220
221extern void flush_tlb_all(void);
222extern void flush_tlb_mm(struct mm_struct *);
223extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
224extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
225
226#endif /* CONFIG_SMP */
227
228/*
229 * Allocate and free page tables. The xxx_kernel() versions are
230 * used to allocate a kernel page table - this turns on ASN bits
231 * if any.
232 */
233#ifndef CONFIG_SMP
234extern struct pgtable_cache_struct {
235	unsigned long *pgd_cache;
236	unsigned long *pmd_cache;
237	unsigned long *pte_cache;
238	unsigned long pgtable_cache_sz;
239} quicklists;
240#else
241#include <asm/smp.h>
242#define quicklists cpu_data[smp_processor_id()]
243#endif
244#define pgd_quicklist (quicklists.pgd_cache)
245#define pmd_quicklist (quicklists.pmd_cache)
246#define pte_quicklist (quicklists.pte_cache)
247#define pgtable_cache_size (quicklists.pgtable_cache_sz)
248
249#define pmd_populate(mm, pmd, pte)	pmd_set(pmd, pte)
250#define pgd_populate(mm, pgd, pmd)	pgd_set(pgd, pmd)
251
252extern pgd_t *get_pgd_slow(void);
253
254static inline pgd_t *get_pgd_fast(void)
255{
256	unsigned long *ret;
257
258	if ((ret = pgd_quicklist) != NULL) {
259		pgd_quicklist = (unsigned long *)(*ret);
260		ret[0] = 0;
261		pgtable_cache_size--;
262	} else
263		ret = (unsigned long *)get_pgd_slow();
264	return (pgd_t *)ret;
265}
266
267static inline void free_pgd_fast(pgd_t *pgd)
268{
269	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
270	pgd_quicklist = (unsigned long *) pgd;
271	pgtable_cache_size++;
272}
273
274static inline void free_pgd_slow(pgd_t *pgd)
275{
276	free_page((unsigned long)pgd);
277}
278
279static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
280{
281	pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL);
282	if (ret)
283		clear_page(ret);
284	return ret;
285}
286
287static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
288{
289	unsigned long *ret;
290
291	if ((ret = (unsigned long *)pte_quicklist) != NULL) {
292		pte_quicklist = (unsigned long *)(*ret);
293		ret[0] = 0;
294		pgtable_cache_size--;
295	}
296	return (pmd_t *)ret;
297}
298
299static inline void pmd_free_fast(pmd_t *pmd)
300{
301	*(unsigned long *)pmd = (unsigned long) pte_quicklist;
302	pte_quicklist = (unsigned long *) pmd;
303	pgtable_cache_size++;
304}
305
306static inline void pmd_free_slow(pmd_t *pmd)
307{
308	free_page((unsigned long)pmd);
309}
310
311static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
312{
313	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
314	if (pte)
315		clear_page(pte);
316	return pte;
317}
318
319static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
320{
321	unsigned long *ret;
322
323	if ((ret = (unsigned long *)pte_quicklist) != NULL) {
324		pte_quicklist = (unsigned long *)(*ret);
325		ret[0] = 0;
326		pgtable_cache_size--;
327	}
328	return (pte_t *)ret;
329}
330
331static inline void pte_free_fast(pte_t *pte)
332{
333	*(unsigned long *)pte = (unsigned long) pte_quicklist;
334	pte_quicklist = (unsigned long *) pte;
335	pgtable_cache_size++;
336}
337
338static inline void pte_free_slow(pte_t *pte)
339{
340	free_page((unsigned long)pte);
341}
342
343#define pte_free(pte)		pte_free_fast(pte)
344#define pmd_free(pmd)		pmd_free_fast(pmd)
345#define pgd_free(pgd)		free_pgd_fast(pgd)
346#define pgd_alloc(mm)		get_pgd_fast()
347
348extern int do_check_pgt_cache(int, int);
349
350#endif /* _ALPHA_PGALLOC_H */
351