1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2001 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGALLOC_H
10#define _ASM_PGALLOC_H
11
12#include <linux/config.h>
13#include <linux/mm.h>
14#include <asm/fixmap.h>
15
16/* TLB flushing:
17 *
18 *  - flush_tlb_all() flushes all processes TLB entries
19 *  - flush_tlb_mm(mm) flushes the specified mm context TLB entries
20 *  - flush_tlb_page(mm, vmaddr) flushes a single page
21 *  - flush_tlb_range(mm, start, end) flushes a range of pages
22 *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
23 */
24extern void local_flush_tlb_all(void);
25extern void local_flush_tlb_mm(struct mm_struct *mm);
26extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
27			       unsigned long end);
28extern void local_flush_tlb_page(struct vm_area_struct *vma,
29                                 unsigned long page);
30
31#ifdef CONFIG_SMP
32
33extern void flush_tlb_all(void);
34extern void flush_tlb_mm(struct mm_struct *);
35extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
36extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
37
38#else /* CONFIG_SMP */
39
40#define flush_tlb_all()			local_flush_tlb_all()
41#define flush_tlb_mm(mm)		local_flush_tlb_mm(mm)
42#define flush_tlb_range(mm,vmaddr,end)	local_flush_tlb_range(mm, vmaddr, end)
43#define flush_tlb_page(vma,page)	local_flush_tlb_page(vma, page)
44
45#endif /* CONFIG_SMP */
46
47static inline void flush_tlb_pgtables(struct mm_struct *mm,
48                                      unsigned long start, unsigned long end)
49{
50	/* Nothing to do on MIPS.  */
51}
52
53
54/*
55 * Allocate and free page tables.
56 */
57
58#define pgd_quicklist (current_cpu_data.pgd_quick)
59#define pmd_quicklist ((unsigned long *)0)
60#define pte_quicklist (current_cpu_data.pte_quick)
61#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
62
63#define pmd_populate(mm, pmd, pte)	pmd_set(pmd, pte)
64
65/*
66 * Initialize new page directory with pointers to invalid ptes
67 */
68extern void pgd_init(unsigned long page);
69
70extern __inline__ pgd_t *get_pgd_slow(void)
71{
72	pgd_t *ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER), *init;
73
74	if (ret) {
75		init = pgd_offset(&init_mm, 0);
76		pgd_init((unsigned long)ret);
77		memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
78			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
79	}
80	return ret;
81}
82
83extern __inline__ pgd_t *get_pgd_fast(void)
84{
85	unsigned long *ret;
86
87	if((ret = pgd_quicklist) != NULL) {
88		pgd_quicklist = (unsigned long *)(*ret);
89		ret[0] = ret[1];
90		pgtable_cache_size--;
91	} else
92		ret = (unsigned long *)get_pgd_slow();
93	return (pgd_t *)ret;
94}
95
96extern __inline__ void free_pgd_fast(pgd_t *pgd)
97{
98	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
99	pgd_quicklist = (unsigned long *) pgd;
100	pgtable_cache_size++;
101}
102
103extern __inline__ void free_pgd_slow(pgd_t *pgd)
104{
105	free_pages((unsigned long)pgd, PGD_ORDER);
106}
107
108extern __inline__ pte_t *get_pte_fast(void)
109{
110	unsigned long *ret;
111
112	if((ret = (unsigned long *)pte_quicklist) != NULL) {
113		pte_quicklist = (unsigned long *)(*ret);
114		ret[0] = ret[1];
115		pgtable_cache_size--;
116	}
117	return (pte_t *)ret;
118}
119
120extern __inline__ void free_pte_fast(pte_t *pte)
121{
122	*(unsigned long *)pte = (unsigned long) pte_quicklist;
123	pte_quicklist = (unsigned long *) pte;
124	pgtable_cache_size++;
125}
126
127extern __inline__ void free_pte_slow(pte_t *pte)
128{
129	free_page((unsigned long)pte);
130}
131
132/* We don't use pmd cache, so these are dummy routines */
133extern __inline__ pmd_t *get_pmd_fast(void)
134{
135	return (pmd_t *)0;
136}
137
138extern __inline__ void free_pmd_fast(pmd_t *pmd)
139{
140}
141
142extern __inline__ void free_pmd_slow(pmd_t *pmd)
143{
144}
145
146extern void __bad_pte(pmd_t *pmd);
147
148static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
149{
150	pte_t *pte;
151
152	pte = (pte_t *) __get_free_page(GFP_KERNEL);
153	if (pte)
154		clear_page(pte);
155	return pte;
156}
157
158static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
159{
160	unsigned long *ret;
161
162	if ((ret = (unsigned long *)pte_quicklist) != NULL) {
163		pte_quicklist = (unsigned long *)(*ret);
164		ret[0] = ret[1];
165		pgtable_cache_size--;
166	}
167	return (pte_t *)ret;
168}
169
170extern __inline__ void pte_free_fast(pte_t *pte)
171{
172	*(unsigned long *)pte = (unsigned long) pte_quicklist;
173	pte_quicklist = (unsigned long *) pte;
174	pgtable_cache_size++;
175}
176
177extern __inline__ void pte_free_slow(pte_t *pte)
178{
179	free_page((unsigned long)pte);
180}
181
182#define pte_free(pte)           pte_free_fast(pte)
183#define pgd_free(pgd)           free_pgd_fast(pgd)
184#define pgd_alloc(mm)           get_pgd_fast()
185
186/*
187 * allocating and freeing a pmd is trivial: the 1-entry pmd is
188 * inside the pgd, so has no extra memory associated with it.
189 */
190#define pmd_alloc_one_fast(mm, addr)	({ BUG(); ((pmd_t *)1); })
191#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
192#define pmd_free(x)			do { } while (0)
193#define pgd_populate(mm, pmd, pte)	BUG()
194
195extern int do_check_pgt_cache(int, int);
196
197#endif /* _ASM_PGALLOC_H */
198