1/*
2 * BK Id: SCCS/s.pgalloc.h 1.9 05/17/01 18:14:25 cort
3 */
4#ifdef __KERNEL__
5#ifndef _PPC_PGALLOC_H
6#define _PPC_PGALLOC_H
7
8#include <linux/config.h>
9#include <linux/threads.h>
10#include <asm/processor.h>
11
12/*
13 * This is handled very differently on the PPC since out page tables
14 * are all 0's and I want to be able to use these zero'd pages elsewhere
15 * as well - it gives us quite a speedup.
16 *
17 * Note that the SMP/UP versions are the same but we don't need a
18 * per cpu list of zero pages because we do the zero-ing with the cache
19 * off and the access routines are lock-free but the pgt cache stuff
20 * is per-cpu since it isn't done with any lock-free access routines
21 * (although I think we need arch-specific routines so I can do lock-free).
22 *
23 * I need to generalize this so we can use it for other arch's as well.
24 * -- Cort
25 */
26#ifdef CONFIG_SMP
27#define quicklists	cpu_data[smp_processor_id()]
28#else
29extern struct pgtable_cache_struct {
30	unsigned long *pgd_cache;
31	unsigned long *pte_cache;
32	unsigned long pgtable_cache_sz;
33} quicklists;
34#endif
35
36#define pgd_quicklist 		(quicklists.pgd_cache)
37#define pmd_quicklist 		((unsigned long *)0)
38#define pte_quicklist 		(quicklists.pte_cache)
39#define pgtable_cache_size 	(quicklists.pgtable_cache_sz)
40
41extern unsigned long *zero_cache;    /* head linked list of pre-zero'd pages */
42extern atomic_t zero_sz;	     /* # currently pre-zero'd pages */
43extern atomic_t zeropage_hits;	     /* # zero'd pages request that we've done */
44extern atomic_t zeropage_calls;      /* # zero'd pages request that've been made */
45extern atomic_t zerototal;	     /* # pages zero'd over time */
46
47#define zero_quicklist     	(zero_cache)
48#define zero_cache_sz  	 	(zero_sz)
49#define zero_cache_calls 	(zeropage_calls)
50#define zero_cache_hits  	(zeropage_hits)
51#define zero_cache_total 	(zerototal)
52
53/* return a pre-zero'd page from the list, return NULL if none available -- Cort */
54extern unsigned long get_zero_page_fast(void);
55
56extern void __bad_pte(pmd_t *pmd);
57
58extern __inline__ pgd_t *get_pgd_slow(void)
59{
60	pgd_t *ret;
61
62	if ((ret = (pgd_t *)__get_free_page(GFP_KERNEL)) != NULL)
63		clear_page(ret);
64	return ret;
65}
66
67extern __inline__ pgd_t *get_pgd_fast(void)
68{
69        unsigned long *ret;
70
71        if ((ret = pgd_quicklist) != NULL) {
72                pgd_quicklist = (unsigned long *)(*ret);
73                ret[0] = 0;
74                pgtable_cache_size--;
75        } else
76                ret = (unsigned long *)get_pgd_slow();
77        return (pgd_t *)ret;
78}
79
80extern __inline__ void free_pgd_fast(pgd_t *pgd)
81{
82        *(unsigned long **)pgd = pgd_quicklist;
83        pgd_quicklist = (unsigned long *) pgd;
84        pgtable_cache_size++;
85}
86
87extern __inline__ void free_pgd_slow(pgd_t *pgd)
88{
89	free_page((unsigned long)pgd);
90}
91
92#define pgd_free(pgd)		free_pgd_fast(pgd)
93#define pgd_alloc(mm)		get_pgd_fast()
94
95/*
96 * We don't have any real pmd's, and this code never triggers because
97 * the pgd will always be present..
98 */
99#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
100#define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
101#define pmd_free(x)                     do { } while (0)
102#define pgd_populate(mm, pmd, pte)      BUG()
103
104static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
105{
106	pte_t *pte;
107	extern int mem_init_done;
108	extern void *early_get_page(void);
109
110	if (mem_init_done)
111		pte = (pte_t *) __get_free_page(GFP_KERNEL);
112	else
113		pte = (pte_t *) early_get_page();
114	if (pte != NULL)
115		clear_page(pte);
116	return pte;
117}
118
119static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
120{
121        unsigned long *ret;
122
123        if ((ret = pte_quicklist) != NULL) {
124                pte_quicklist = (unsigned long *)(*ret);
125                ret[0] = 0;
126                pgtable_cache_size--;
127	}
128        return (pte_t *)ret;
129}
130
131extern __inline__ void pte_free_fast(pte_t *pte)
132{
133        *(unsigned long **)pte = pte_quicklist;
134        pte_quicklist = (unsigned long *) pte;
135        pgtable_cache_size++;
136}
137
138extern __inline__ void pte_free_slow(pte_t *pte)
139{
140	free_page((unsigned long)pte);
141}
142
143#define pte_free(pte)    pte_free_slow(pte)
144
145#define pmd_populate(mm, pmd, pte)	(pmd_val(*(pmd)) = (unsigned long) (pte))
146
147extern int do_check_pgt_cache(int, int);
148
149#endif /* _PPC_PGALLOC_H */
150#endif /* __KERNEL__ */
151