1/* 2 * IA-32 Huge TLB Page Support for Kernel. 3 * 4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> 5 */ 6 7#include <linux/init.h> 8#include <linux/fs.h> 9#include <linux/mm.h> 10#include <linux/hugetlb.h> 11#include <linux/pagemap.h> 12#include <linux/slab.h> 13#include <linux/err.h> 14#include <linux/sysctl.h> 15#include <asm/mman.h> 16#include <asm/tlb.h> 17#include <asm/tlbflush.h> 18 19static unsigned long page_table_shareable(struct vm_area_struct *svma, 20 struct vm_area_struct *vma, 21 unsigned long addr, pgoff_t idx) 22{ 23 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 24 svma->vm_start; 25 unsigned long sbase = saddr & PUD_MASK; 26 unsigned long s_end = sbase + PUD_SIZE; 27 28 /* 29 * match the virtual addresses, permission and the alignment of the 30 * page table page. 31 */ 32 if (pmd_index(addr) != pmd_index(saddr) || 33 vma->vm_flags != svma->vm_flags || 34 sbase < svma->vm_start || svma->vm_end < s_end) 35 return 0; 36 37 return saddr; 38} 39 40static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) 41{ 42 unsigned long base = addr & PUD_MASK; 43 unsigned long end = base + PUD_SIZE; 44 45 /* 46 * check on proper vm_flags and page table alignment 47 */ 48 if (vma->vm_flags & VM_MAYSHARE && 49 vma->vm_start <= base && end <= vma->vm_end) 50 return 1; 51 return 0; 52} 53 54/* 55 * search for a shareable pmd page for hugetlb. 56 */ 57static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 58{ 59 struct vm_area_struct *vma = find_vma(mm, addr); 60 struct address_space *mapping = vma->vm_file->f_mapping; 61 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 62 vma->vm_pgoff; 63 struct prio_tree_iter iter; 64 struct vm_area_struct *svma; 65 unsigned long saddr; 66 pte_t *spte = NULL; 67 68 if (!vma_shareable(vma, addr)) 69 return; 70 71 spin_lock(&mapping->i_mmap_lock); 72 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 73 if (svma == vma) 74 continue; 75 76 saddr = page_table_shareable(svma, vma, addr, idx); 77 if (saddr) { 78 spte = huge_pte_offset(svma->vm_mm, saddr); 79 if (spte) { 80 get_page(virt_to_page(spte)); 81 break; 82 } 83 } 84 } 85 86 if (!spte) 87 goto out; 88 89 spin_lock(&mm->page_table_lock); 90 if (pud_none(*pud)) 91 pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK); 92 else 93 put_page(virt_to_page(spte)); 94 spin_unlock(&mm->page_table_lock); 95out: 96 spin_unlock(&mapping->i_mmap_lock); 97} 98 99/* 100 * unmap huge page backed by shared pte. 101 * 102 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 103 * indicated by page_count > 1, unmap is achieved by clearing pud and 104 * decrementing the ref count. If count == 1, the pte page is not shared. 105 * 106 * called with vma->vm_mm->page_table_lock held. 107 * 108 * returns: 1 successfully unmapped a shared pte page 109 * 0 the underlying pte page is not shared, or it is the last user 110 */ 111int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) 112{ 113 pgd_t *pgd = pgd_offset(mm, *addr); 114 pud_t *pud = pud_offset(pgd, *addr); 115 116 BUG_ON(page_count(virt_to_page(ptep)) == 0); 117 if (page_count(virt_to_page(ptep)) == 1) 118 return 0; 119 120 pud_clear(pud); 121 put_page(virt_to_page(ptep)); 122 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 123 return 1; 124} 125 126pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 127{ 128 pgd_t *pgd; 129 pud_t *pud; 130 pte_t *pte = NULL; 131 132 pgd = pgd_offset(mm, addr); 133 pud = pud_alloc(mm, pgd, addr); 134 if (pud) { 135 if (pud_none(*pud)) 136 huge_pmd_share(mm, addr, pud); 137 pte = (pte_t *) pmd_alloc(mm, pud, addr); 138 } 139 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 140 141 return pte; 142} 143 144pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 145{ 146 pgd_t *pgd; 147 pud_t *pud; 148 pmd_t *pmd = NULL; 149 150 pgd = pgd_offset(mm, addr); 151 if (pgd_present(*pgd)) { 152 pud = pud_offset(pgd, addr); 153 if (pud_present(*pud)) 154 pmd = pmd_offset(pud, addr); 155 } 156 return (pte_t *) pmd; 157} 158 159 160struct page * 161follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) 162{ 163 return ERR_PTR(-EINVAL); 164} 165 166int pmd_huge(pmd_t pmd) 167{ 168 return !!(pmd_val(pmd) & _PAGE_PSE); 169} 170 171struct page * 172follow_huge_pmd(struct mm_struct *mm, unsigned long address, 173 pmd_t *pmd, int write) 174{ 175 struct page *page; 176 177 page = pte_page(*(pte_t *)pmd); 178 if (page) 179 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); 180 return page; 181} 182 183/* x86_64 also uses this file */ 184 185#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 186static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 187 unsigned long addr, unsigned long len, 188 unsigned long pgoff, unsigned long flags) 189{ 190 struct mm_struct *mm = current->mm; 191 struct vm_area_struct *vma; 192 unsigned long start_addr; 193 194 if (len > mm->cached_hole_size) { 195 start_addr = mm->free_area_cache; 196 } else { 197 start_addr = TASK_UNMAPPED_BASE; 198 mm->cached_hole_size = 0; 199 } 200 201full_search: 202 addr = ALIGN(start_addr, HPAGE_SIZE); 203 204 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 205 /* At this point: (!vma || addr < vma->vm_end). */ 206 if (TASK_SIZE - len < addr) { 207 /* 208 * Start a new search - just in case we missed 209 * some holes. 210 */ 211 if (start_addr != TASK_UNMAPPED_BASE) { 212 start_addr = TASK_UNMAPPED_BASE; 213 mm->cached_hole_size = 0; 214 goto full_search; 215 } 216 return -ENOMEM; 217 } 218 if (!vma || addr + len <= vma->vm_start) { 219 mm->free_area_cache = addr + len; 220 return addr; 221 } 222 if (addr + mm->cached_hole_size < vma->vm_start) 223 mm->cached_hole_size = vma->vm_start - addr; 224 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 225 } 226} 227 228static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 229 unsigned long addr0, unsigned long len, 230 unsigned long pgoff, unsigned long flags) 231{ 232 struct mm_struct *mm = current->mm; 233 struct vm_area_struct *vma, *prev_vma; 234 unsigned long base = mm->mmap_base, addr = addr0; 235 unsigned long largest_hole = mm->cached_hole_size; 236 int first_time = 1; 237 238 /* don't allow allocations above current base */ 239 if (mm->free_area_cache > base) 240 mm->free_area_cache = base; 241 242 if (len <= largest_hole) { 243 largest_hole = 0; 244 mm->free_area_cache = base; 245 } 246try_again: 247 /* make sure it can fit in the remaining address space */ 248 if (mm->free_area_cache < len) 249 goto fail; 250 251 /* either no address requested or cant fit in requested address hole */ 252 addr = (mm->free_area_cache - len) & HPAGE_MASK; 253 do { 254 /* 255 * Lookup failure means no vma is above this address, 256 * i.e. return with success: 257 */ 258 if (!(vma = find_vma_prev(mm, addr, &prev_vma))) 259 return addr; 260 261 /* 262 * new region fits between prev_vma->vm_end and 263 * vma->vm_start, use it: 264 */ 265 if (addr + len <= vma->vm_start && 266 (!prev_vma || (addr >= prev_vma->vm_end))) { 267 /* remember the address as a hint for next time */ 268 mm->cached_hole_size = largest_hole; 269 return (mm->free_area_cache = addr); 270 } else { 271 /* pull free_area_cache down to the first hole */ 272 if (mm->free_area_cache == vma->vm_end) { 273 mm->free_area_cache = vma->vm_start; 274 mm->cached_hole_size = largest_hole; 275 } 276 } 277 278 /* remember the largest hole we saw so far */ 279 if (addr + largest_hole < vma->vm_start) 280 largest_hole = vma->vm_start - addr; 281 282 /* try just below the current vma->vm_start */ 283 addr = (vma->vm_start - len) & HPAGE_MASK; 284 } while (len <= vma->vm_start); 285 286fail: 287 /* 288 * if hint left us with no space for the requested 289 * mapping then try again: 290 */ 291 if (first_time) { 292 mm->free_area_cache = base; 293 largest_hole = 0; 294 first_time = 0; 295 goto try_again; 296 } 297 /* 298 * A failed mmap() very likely causes application failure, 299 * so fall back to the bottom-up function here. This scenario 300 * can happen with large stack limits and large mmap() 301 * allocations. 302 */ 303 mm->free_area_cache = TASK_UNMAPPED_BASE; 304 mm->cached_hole_size = ~0UL; 305 addr = hugetlb_get_unmapped_area_bottomup(file, addr0, 306 len, pgoff, flags); 307 308 /* 309 * Restore the topdown base: 310 */ 311 mm->free_area_cache = base; 312 mm->cached_hole_size = ~0UL; 313 314 return addr; 315} 316 317unsigned long 318hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 319 unsigned long len, unsigned long pgoff, unsigned long flags) 320{ 321 struct mm_struct *mm = current->mm; 322 struct vm_area_struct *vma; 323 324 if (len & ~HPAGE_MASK) 325 return -EINVAL; 326 if (len > TASK_SIZE) 327 return -ENOMEM; 328 329 if (flags & MAP_FIXED) { 330 if (prepare_hugepage_range(addr, len, pgoff)) 331 return -EINVAL; 332 return addr; 333 } 334 335 if (addr) { 336 addr = ALIGN(addr, HPAGE_SIZE); 337 vma = find_vma(mm, addr); 338 if (TASK_SIZE - len >= addr && 339 (!vma || addr + len <= vma->vm_start)) 340 return addr; 341 } 342 if (mm->get_unmapped_area == arch_get_unmapped_area) 343 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 344 pgoff, flags); 345 else 346 return hugetlb_get_unmapped_area_topdown(file, addr, len, 347 pgoff, flags); 348} 349 350#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ 351