1/* $Id: pgalloc.h,v 1.1.1.1 2008/10/15 03:29:18 james26_jang Exp $ */
2#ifndef _SPARC64_PGALLOC_H
3#define _SPARC64_PGALLOC_H
4
5#include <linux/config.h>
6#include <linux/kernel.h>
7#include <linux/sched.h>
8
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/pgtable.h>
12
13/* Cache and TLB flush operations. */
14
15/* These are the same regardless of whether this is an SMP kernel or not. */
16#define flush_cache_mm(__mm) \
17	do { if ((__mm) == current->mm) flushw_user(); } while(0)
18#define flush_cache_range(mm, start, end) \
19	flush_cache_mm(mm)
20#define flush_cache_page(vma, page) \
21	flush_cache_mm((vma)->vm_mm)
22
23/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
24#define flush_page_to_ram(page)			do { } while (0)
25
26/*
27 * On spitfire, the icache doesn't snoop local stores and we don't
28 * use block commit stores (which invalidate icache lines) during
29 * module load, so we need this.
30 */
31extern void flush_icache_range(unsigned long start, unsigned long end);
32
33extern void __flush_dcache_page(void *addr, int flush_icache);
34extern void __flush_icache_page(unsigned long);
35extern void flush_dcache_page_impl(struct page *page);
36#ifdef CONFIG_SMP
37extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
38extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
39#else
40#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
41#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
42#endif
43
44extern void flush_dcache_page(struct page *page);
45
46extern void __flush_dcache_range(unsigned long start, unsigned long end);
47
48extern void __flush_cache_all(void);
49
50extern void __flush_tlb_all(void);
51extern void __flush_tlb_mm(unsigned long context, unsigned long r);
52extern void __flush_tlb_range(unsigned long context, unsigned long start,
53			      unsigned long r, unsigned long end,
54			      unsigned long pgsz, unsigned long size);
55extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
56
57#ifndef CONFIG_SMP
58
59#define flush_cache_all()	__flush_cache_all()
60#define flush_tlb_all()		__flush_tlb_all()
61
62#define flush_tlb_mm(__mm) \
63do { if(CTX_VALID((__mm)->context)) \
64	__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
65} while(0)
66
67#define flush_tlb_range(__mm, start, end) \
68do { if(CTX_VALID((__mm)->context)) { \
69	unsigned long __start = (start)&PAGE_MASK; \
70	unsigned long __end = PAGE_ALIGN(end); \
71	__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
72			  SECONDARY_CONTEXT, __end, PAGE_SIZE, \
73			  (__end - __start)); \
74     } \
75} while(0)
76
77#define flush_tlb_page(vma, page) \
78do { struct mm_struct *__mm = (vma)->vm_mm; \
79     if(CTX_VALID(__mm->context)) \
80	__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
81			 SECONDARY_CONTEXT); \
82} while(0)
83
84#else /* CONFIG_SMP */
85
86extern void smp_flush_cache_all(void);
87extern void smp_flush_tlb_all(void);
88extern void smp_flush_tlb_mm(struct mm_struct *mm);
89extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
90				unsigned long end);
91extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
92
93#define flush_cache_all()	smp_flush_cache_all()
94#define flush_tlb_all()		smp_flush_tlb_all()
95#define flush_tlb_mm(mm)	smp_flush_tlb_mm(mm)
96#define flush_tlb_range(mm, start, end) \
97	smp_flush_tlb_range(mm, start, end)
98#define flush_tlb_page(vma, page) \
99	smp_flush_tlb_page((vma)->vm_mm, page)
100
101#endif /* ! CONFIG_SMP */
102
103extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
104					  unsigned long end)
105{
106	/* Note the signed type.  */
107	long s = start, e = end, vpte_base;
108	if (s > e)
109		/* Nobody should call us with start below VM hole and end above.
110		   See if it is really true.  */
111		BUG();
112	vpte_base = (tlb_type == spitfire ?
113		     VPTE_BASE_SPITFIRE :
114		     VPTE_BASE_CHEETAH);
115	flush_tlb_range(mm,
116			vpte_base + (s >> (PAGE_SHIFT - 3)),
117			vpte_base + (e >> (PAGE_SHIFT - 3)));
118}
119
120/* Page table allocation/freeing. */
121#ifdef CONFIG_SMP
122/* Sliiiicck */
123#define pgt_quicklists	cpu_data[smp_processor_id()]
124#else
125extern struct pgtable_cache_struct {
126	unsigned long *pgd_cache;
127	unsigned long *pte_cache[2];
128	unsigned int pgcache_size;
129	unsigned int pgdcache_size;
130} pgt_quicklists;
131#endif
132#define pgd_quicklist		(pgt_quicklists.pgd_cache)
133#define pmd_quicklist		((unsigned long *)0)
134#define pte_quicklist		(pgt_quicklists.pte_cache)
135#define pgtable_cache_size	(pgt_quicklists.pgcache_size)
136#define pgd_cache_size		(pgt_quicklists.pgdcache_size)
137
138#ifndef CONFIG_SMP
139
140extern __inline__ void free_pgd_fast(pgd_t *pgd)
141{
142	struct page *page = virt_to_page(pgd);
143
144	if (!page->pprev_hash) {
145		(unsigned long *)page->next_hash = pgd_quicklist;
146		pgd_quicklist = (unsigned long *)page;
147	}
148	(unsigned long)page->pprev_hash |=
149		(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
150	pgd_cache_size++;
151}
152
153extern __inline__ pgd_t *get_pgd_fast(void)
154{
155        struct page *ret;
156
157        if ((ret = (struct page *)pgd_quicklist) != NULL) {
158                unsigned long mask = (unsigned long)ret->pprev_hash;
159		unsigned long off = 0;
160
161		if (mask & 1)
162			mask &= ~1;
163		else {
164			off = PAGE_SIZE / 2;
165			mask &= ~2;
166		}
167		(unsigned long)ret->pprev_hash = mask;
168		if (!mask)
169			pgd_quicklist = (unsigned long *)ret->next_hash;
170                ret = (struct page *)(__page_address(ret) + off);
171                pgd_cache_size--;
172        } else {
173		struct page *page = alloc_page(GFP_KERNEL);
174
175		if (page) {
176			ret = (struct page *)page_address(page);
177			clear_page(ret);
178			(unsigned long)page->pprev_hash = 2;
179			(unsigned long *)page->next_hash = pgd_quicklist;
180			pgd_quicklist = (unsigned long *)page;
181			pgd_cache_size++;
182		}
183        }
184        return (pgd_t *)ret;
185}
186
187#else /* CONFIG_SMP */
188
189extern __inline__ void free_pgd_fast(pgd_t *pgd)
190{
191	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
192	pgd_quicklist = (unsigned long *) pgd;
193	pgtable_cache_size++;
194}
195
196extern __inline__ pgd_t *get_pgd_fast(void)
197{
198	unsigned long *ret;
199
200	if((ret = pgd_quicklist) != NULL) {
201		pgd_quicklist = (unsigned long *)(*ret);
202		ret[0] = 0;
203		pgtable_cache_size--;
204	} else {
205		ret = (unsigned long *) __get_free_page(GFP_KERNEL);
206		if(ret)
207			memset(ret, 0, PAGE_SIZE);
208	}
209	return (pgd_t *)ret;
210}
211
212extern __inline__ void free_pgd_slow(pgd_t *pgd)
213{
214	free_page((unsigned long)pgd);
215}
216
217#endif /* CONFIG_SMP */
218
219#if (L1DCACHE_SIZE > PAGE_SIZE)			    /* is there D$ aliasing problem */
220#define VPTE_COLOR(address)		(((address) >> (PAGE_SHIFT + 10)) & 1UL)
221#define DCACHE_COLOR(address)		(((address) >> PAGE_SHIFT) & 1UL)
222#else
223#define VPTE_COLOR(address)		0
224#define DCACHE_COLOR(address)		0
225#endif
226
227#define pgd_populate(MM, PGD, PMD)	pgd_set(PGD, PMD)
228
229extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
230{
231	pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
232	if (pmd)
233		memset(pmd, 0, PAGE_SIZE);
234	return pmd;
235}
236
237extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
238{
239	unsigned long *ret;
240	int color = 0;
241
242	if (pte_quicklist[color] == NULL)
243		color = 1;
244	if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
245		pte_quicklist[color] = (unsigned long *)(*ret);
246		ret[0] = 0;
247		pgtable_cache_size--;
248	}
249	return (pmd_t *)ret;
250}
251
252extern __inline__ void free_pmd_fast(pmd_t *pmd)
253{
254	unsigned long color = DCACHE_COLOR((unsigned long)pmd);
255	*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
256	pte_quicklist[color] = (unsigned long *) pmd;
257	pgtable_cache_size++;
258}
259
260extern __inline__ void free_pmd_slow(pmd_t *pmd)
261{
262	free_page((unsigned long)pmd);
263}
264
265#define pmd_populate(MM, PMD, PTE)	pmd_set(PMD, PTE)
266
267extern pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address);
268
269extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
270{
271	unsigned long color = VPTE_COLOR(address);
272	unsigned long *ret;
273
274	if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
275		pte_quicklist[color] = (unsigned long *)(*ret);
276		ret[0] = 0;
277		pgtable_cache_size--;
278	}
279	return (pte_t *)ret;
280}
281
282extern __inline__ void free_pte_fast(pte_t *pte)
283{
284	unsigned long color = DCACHE_COLOR((unsigned long)pte);
285	*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
286	pte_quicklist[color] = (unsigned long *) pte;
287	pgtable_cache_size++;
288}
289
290extern __inline__ void free_pte_slow(pte_t *pte)
291{
292	free_page((unsigned long)pte);
293}
294
295#define pte_free(pte)		free_pte_fast(pte)
296#define pmd_free(pmd)		free_pmd_fast(pmd)
297#define pgd_free(pgd)		free_pgd_fast(pgd)
298#define pgd_alloc(mm)		get_pgd_fast()
299
300extern int do_check_pgt_cache(int, int);
301
302#endif /* _SPARC64_PGALLOC_H */
303