1#ifndef _ASM_IA64_PGALLOC_H
2#define _ASM_IA64_PGALLOC_H
3
4/*
5 * This file contains the functions and defines necessary to allocate
6 * page tables.
7 *
8 * This hopefully works with any (fixed) ia-64 page-size, as defined
9 * in <asm/page.h> (currently 8192).
10 *
11 * Copyright (C) 1998-2002 Hewlett-Packard Co
12 *	David Mosberger-Tang <davidm@hpl.hp.com>
13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
14 */
15
16#include <linux/config.h>
17
18#include <linux/compiler.h>
19#include <linux/mm.h>
20#include <linux/threads.h>
21
22#include <asm/mmu_context.h>
23#include <asm/processor.h>
24
25/*
26 * Very stupidly, we used to get new pgd's and pmd's, init their contents
27 * to point to the NULL versions of the next level page table, later on
28 * completely re-init them the same way, then free them up.  This wasted
29 * a lot of work and caused unnecessary memory traffic.  How broken...
30 * We fix this by caching them.
31 */
32#define pgd_quicklist		(local_cpu_data->pgd_quick)
33#define pmd_quicklist		(local_cpu_data->pmd_quick)
34#define pte_quicklist		(local_cpu_data->pte_quick)
35#define pgtable_cache_size	(local_cpu_data->pgtable_cache_sz)
36
37static inline pgd_t*
38pgd_alloc_one_fast (struct mm_struct *mm)
39{
40	unsigned long *ret = pgd_quicklist;
41
42	if (__builtin_expect(ret != NULL, 1)) {
43		pgd_quicklist = (unsigned long *)(*ret);
44		ret[0] = 0;
45		--pgtable_cache_size;
46	} else
47		ret = NULL;
48	return (pgd_t *) ret;
49}
50
51static inline pgd_t*
52pgd_alloc (struct mm_struct *mm)
53{
54	/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
55	pgd_t *pgd = pgd_alloc_one_fast(mm);
56
57	if (__builtin_expect(pgd == NULL, 0)) {
58		pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
59		if (__builtin_expect(pgd != NULL, 1))
60			clear_page(pgd);
61	}
62	return pgd;
63}
64
65static inline void
66pgd_free (pgd_t *pgd)
67{
68	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
69	pgd_quicklist = (unsigned long *) pgd;
70	++pgtable_cache_size;
71}
72
73static inline void
74pgd_populate (struct mm_struct *mm, pgd_t *pgd_entry, pmd_t *pmd)
75{
76	pgd_val(*pgd_entry) = __pa(pmd);
77}
78
79
80static inline pmd_t*
81pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
82{
83	unsigned long *ret = (unsigned long *)pmd_quicklist;
84
85	if (__builtin_expect(ret != NULL, 1)) {
86		pmd_quicklist = (unsigned long *)(*ret);
87		ret[0] = 0;
88		--pgtable_cache_size;
89	}
90	return (pmd_t *)ret;
91}
92
93static inline pmd_t*
94pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
95{
96	pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
97
98	if (__builtin_expect(pmd != NULL, 1))
99		clear_page(pmd);
100	return pmd;
101}
102
103static inline void
104pmd_free (pmd_t *pmd)
105{
106	*(unsigned long *)pmd = (unsigned long) pmd_quicklist;
107	pmd_quicklist = (unsigned long *) pmd;
108	++pgtable_cache_size;
109}
110
111static inline void
112pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
113{
114	pmd_val(*pmd_entry) = __pa(pte);
115}
116
117static inline pte_t*
118pte_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
119{
120	unsigned long *ret = (unsigned long *)pte_quicklist;
121
122	if (__builtin_expect(ret != NULL, 1)) {
123		pte_quicklist = (unsigned long *)(*ret);
124		ret[0] = 0;
125		--pgtable_cache_size;
126	}
127	return (pte_t *)ret;
128}
129
130
131static inline pte_t*
132pte_alloc_one (struct mm_struct *mm, unsigned long addr)
133{
134	pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
135
136	if (__builtin_expect(pte != NULL, 1))
137		clear_page(pte);
138	return pte;
139}
140
141static inline void
142pte_free (pte_t *pte)
143{
144	*(unsigned long *)pte = (unsigned long) pte_quicklist;
145	pte_quicklist = (unsigned long *) pte;
146	++pgtable_cache_size;
147}
148
149extern int do_check_pgt_cache (int, int);
150
151/*
152 * Now for some TLB flushing routines.  This is the kind of stuff that
153 * can be very expensive, so try to avoid them whenever possible.
154 */
155
156/*
157 * Flush everything (kernel mapping may also have changed due to
158 * vmalloc/vfree).
159 */
160extern void __flush_tlb_all (void);
161
162#ifdef CONFIG_SMP
163  extern void smp_flush_tlb_all (void);
164# define flush_tlb_all()	smp_flush_tlb_all()
165#else
166# define flush_tlb_all()	__flush_tlb_all()
167#endif
168
169/*
170 * Flush a specified user mapping
171 */
172static inline void
173flush_tlb_mm (struct mm_struct *mm)
174{
175	if (mm) {
176		mm->context = 0;
177		if (mm == current->active_mm) {
178			/* This is called, e.g., as a result of exec().  */
179			get_new_mmu_context(mm);
180			reload_context(mm);
181		}
182	}
183}
184
185extern void flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end);
186
187/*
188 * Page-granular tlb flush.
189 */
190static inline void
191flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
192{
193#ifdef CONFIG_SMP
194	flush_tlb_range(vma->vm_mm, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
195#else
196	if (vma->vm_mm == current->active_mm)
197		asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
198	else
199		vma->vm_mm->context = 0;
200#endif
201}
202
203/*
204 * Flush the TLB entries mapping the virtually mapped linear page
205 * table corresponding to address range [START-END).
206 */
207static inline void
208flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
209{
210	if (unlikely(end - start >= 1024*1024*1024*1024UL
211		     || rgn_index(start) != rgn_index(end - 1)))
212		/*
213		 * This condition is very rare and normal applications shouldn't get
214		 * here. No attempt has been made to optimize for this case.
215		 */
216		flush_tlb_all();
217	else
218		flush_tlb_range(mm, ia64_thash(start), ia64_thash(end));
219}
220
221/*
222 * Cache flushing routines.  This is the kind of stuff that can be very expensive, so try
223 * to avoid them whenever possible.
224 */
225
226#define flush_cache_all()			do { } while (0)
227#define flush_cache_mm(mm)			do { } while (0)
228#define flush_cache_range(mm, start, end)	do { } while (0)
229#define flush_cache_page(vma, vmaddr)		do { } while (0)
230#define flush_page_to_ram(page)			do { } while (0)
231#define flush_icache_page(vma,page)		do { } while (0)
232
233#define flush_dcache_page(page)			\
234do {						\
235	clear_bit(PG_arch_1, &(page)->flags);	\
236} while (0)
237
238extern void flush_icache_range (unsigned long start, unsigned long end);
239
240#define flush_icache_user_range(vma, page, user_addr, len)					\
241do {												\
242	unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK);	\
243	flush_icache_range(_addr, _addr + (len));						\
244} while (0)
245
246static inline void
247clear_user_page (void *addr, unsigned long vaddr, struct page *page)
248{
249	clear_page(addr);
250	flush_dcache_page(page);
251}
252
253static inline void
254copy_user_page (void *to, void *from, unsigned long vaddr, struct page *page)
255{
256	copy_page(to, from);
257	flush_dcache_page(page);
258}
259
260/*
261 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
262 * information.  However, we use this macro to take care of any (delayed) i-cache flushing
263 * that may be necessary.
264 */
265static inline void
266update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
267{
268	unsigned long addr;
269	struct page *page;
270
271	if (!pte_exec(pte))
272		return;				/* not an executable page... */
273
274	page = pte_page(pte);
275	/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
276	addr = (unsigned long) page_address(page);
277
278	if (test_bit(PG_arch_1, &page->flags))
279		return;				/* i-cache is already coherent with d-cache */
280
281	flush_icache_range(addr, addr + PAGE_SIZE);
282	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
283}
284
285#endif /* _ASM_IA64_PGALLOC_H */
286