1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2001 by Ralf Baechle at alii 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9#ifndef _ASM_PGALLOC_H 10#define _ASM_PGALLOC_H 11 12#include <linux/config.h> 13 14/* TLB flushing: 15 * 16 * - flush_tlb_all() flushes all processes TLB entries 17 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries 18 * - flush_tlb_page(mm, vmaddr) flushes a single page 19 * - flush_tlb_range(mm, start, end) flushes a range of pages 20 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables 21 */ 22extern void local_flush_tlb_all(void); 23extern void local_flush_tlb_mm(struct mm_struct *mm); 24extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start, 25 unsigned long end); 26extern void local_flush_tlb_page(struct vm_area_struct *vma, 27 unsigned long page); 28 29#ifdef CONFIG_SMP 30 31extern void flush_tlb_all(void); 32extern void flush_tlb_mm(struct mm_struct *); 33extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long); 34extern void flush_tlb_page(struct vm_area_struct *, unsigned long); 35 36#else /* CONFIG_SMP */ 37 38#define flush_tlb_all() local_flush_tlb_all() 39#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 40#define flush_tlb_range(mm,vmaddr,end) local_flush_tlb_range(mm, vmaddr, end) 41#define flush_tlb_page(vma,page) local_flush_tlb_page(vma, page) 42 43#endif /* CONFIG_SMP */ 44 45static inline void flush_tlb_pgtables(struct mm_struct *mm, 46 unsigned long start, unsigned long end) 47{ 48 /* Nothing to do on MIPS. */ 49} 50 51 52/* 53 * Allocate and free page tables. 54 */ 55 56#define pgd_quicklist (current_cpu_data.pgd_quick) 57#define pmd_quicklist (current_cpu_data.pmd_quick) 58#define pte_quicklist (current_cpu_data.pte_quick) 59#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz) 60 61#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte) 62#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd) 63 64extern pgd_t *get_pgd_slow(void); 65 66static inline pgd_t *get_pgd_fast(void) 67{ 68 unsigned long *ret; 69 70 if((ret = pgd_quicklist) != NULL) { 71 pgd_quicklist = (unsigned long *)(*ret); 72 ret[0] = ret[1]; 73 pgtable_cache_size--; 74 return (pgd_t *)ret; 75 } 76 77 ret = (unsigned long *) get_pgd_slow(); 78 return (pgd_t *)ret; 79} 80 81static inline void free_pgd_fast(pgd_t *pgd) 82{ 83 *(unsigned long *)pgd = (unsigned long) pgd_quicklist; 84 pgd_quicklist = (unsigned long *) pgd; 85 pgtable_cache_size++; 86} 87 88static inline void free_pgd_slow(pgd_t *pgd) 89{ 90 free_pages((unsigned long)pgd, 1); 91} 92 93static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) 94{ 95 pte_t *pte; 96 97 pte = (pte_t *) __get_free_page(GFP_KERNEL); 98 if (pte) 99 clear_page(pte); 100 return pte; 101} 102 103static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) 104{ 105 unsigned long *ret; 106 107 if ((ret = (unsigned long *)pte_quicklist) != NULL) { 108 pte_quicklist = (unsigned long *)(*ret); 109 ret[0] = ret[1]; 110 pgtable_cache_size--; 111 } 112 return (pte_t *)ret; 113} 114 115extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted); 116 117static inline pte_t *get_pte_fast(void) 118{ 119 unsigned long *ret; 120 121 if((ret = (unsigned long *)pte_quicklist) != NULL) { 122 pte_quicklist = (unsigned long *)(*ret); 123 ret[0] = ret[1]; 124 pgtable_cache_size--; 125 } 126 return (pte_t *)ret; 127} 128 129static inline void free_pte_fast(pte_t *pte) 130{ 131 *(unsigned long *)pte = (unsigned long) pte_quicklist; 132 pte_quicklist = (unsigned long *) pte; 133 pgtable_cache_size++; 134} 135 136static inline void free_pte_slow(pte_t *pte) 137{ 138 free_pages((unsigned long)pte, 0); 139} 140 141static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 142{ 143 pmd_t *pmd; 144 145 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 1); 146 if (pmd) 147 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); 148 return pmd; 149} 150 151static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) 152{ 153 unsigned long *ret; 154 155 if ((ret = (unsigned long *)pmd_quicklist) != NULL) { 156 pmd_quicklist = (unsigned long *)(*ret); 157 ret[0] = ret[1]; 158 pgtable_cache_size--; 159 } 160 return (pmd_t *)ret; 161} 162 163extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_preadjusted); 164 165static inline pmd_t *get_pmd_fast(void) 166{ 167 unsigned long *ret; 168 169 if ((ret = (unsigned long *)pmd_quicklist) != NULL) { 170 pmd_quicklist = (unsigned long *)(*ret); 171 ret[0] = ret[1]; 172 pgtable_cache_size--; 173 return (pmd_t *)ret; 174 } 175 176 return (pmd_t *)ret; 177} 178 179static inline void free_pmd_fast(pmd_t *pmd) 180{ 181 *(unsigned long *)pmd = (unsigned long) pmd_quicklist; 182 pmd_quicklist = (unsigned long *) pmd; 183 pgtable_cache_size++; 184} 185 186static inline void free_pmd_slow(pmd_t *pmd) 187{ 188 free_pages((unsigned long)pmd, 1); 189} 190 191#define pte_free(pte) free_pte_fast(pte) 192#define pmd_free(pte) free_pmd_fast(pte) 193#define pgd_free(pgd) free_pgd_fast(pgd) 194#define pgd_alloc(mm) get_pgd_fast() 195 196extern pte_t kptbl[(PAGE_SIZE << PGD_ORDER)/sizeof(pte_t)]; 197extern pmd_t kpmdtbl[PTRS_PER_PMD]; 198 199extern int do_check_pgt_cache(int, int); 200 201#endif /* _ASM_PGALLOC_H */ 202