1/* 2 * Generic hugetlb support. 3 * (C) William Irwin, April 2004 4 */ 5#include <linux/gfp.h> 6#include <linux/list.h> 7#include <linux/init.h> 8#include <linux/module.h> 9#include <linux/mm.h> 10#include <linux/sysctl.h> 11#include <linux/highmem.h> 12#include <linux/nodemask.h> 13#include <linux/pagemap.h> 14#include <linux/mempolicy.h> 15#include <linux/cpuset.h> 16#include <linux/mutex.h> 17 18#include <asm/page.h> 19#include <asm/pgtable.h> 20 21#include <linux/hugetlb.h> 22#include "internal.h" 23 24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; 26unsigned long max_huge_pages; 27static struct list_head hugepage_freelists[MAX_NUMNODES]; 28static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 29static unsigned int free_huge_pages_node[MAX_NUMNODES]; 30/* 31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 32 */ 33static DEFINE_SPINLOCK(hugetlb_lock); 34 35static void clear_huge_page(struct page *page, unsigned long addr) 36{ 37 int i; 38 39 might_sleep(); 40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 41 cond_resched(); 42 clear_user_highpage(page + i, addr); 43 } 44} 45 46static void copy_huge_page(struct page *dst, struct page *src, 47 unsigned long addr, struct vm_area_struct *vma) 48{ 49 int i; 50 51 might_sleep(); 52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 53 cond_resched(); 54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 55 } 56} 57 58static void enqueue_huge_page(struct page *page) 59{ 60 int nid = page_to_nid(page); 61 list_add(&page->lru, &hugepage_freelists[nid]); 62 free_huge_pages++; 63 free_huge_pages_node[nid]++; 64} 65 66static struct page *dequeue_huge_page(struct vm_area_struct *vma, 67 unsigned long address) 68{ 69 int nid = numa_node_id(); 70 struct page *page = NULL; 71 struct zonelist *zonelist = huge_zonelist(vma, address); 72 struct zone **z; 73 74 for (z = zonelist->zones; *z; z++) { 75 nid = zone_to_nid(*z); 76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) && 77 !list_empty(&hugepage_freelists[nid])) 78 break; 79 } 80 81 if (*z) { 82 page = list_entry(hugepage_freelists[nid].next, 83 struct page, lru); 84 list_del(&page->lru); 85 free_huge_pages--; 86 free_huge_pages_node[nid]--; 87 } 88 return page; 89} 90 91static void free_huge_page(struct page *page) 92{ 93 BUG_ON(page_count(page)); 94 95 INIT_LIST_HEAD(&page->lru); 96 97 spin_lock(&hugetlb_lock); 98 enqueue_huge_page(page); 99 spin_unlock(&hugetlb_lock); 100} 101 102static int alloc_fresh_huge_page(void) 103{ 104 static int nid = 0; 105 struct page *page; 106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 107 HUGETLB_PAGE_ORDER); 108 nid = next_node(nid, node_online_map); 109 if (nid == MAX_NUMNODES) 110 nid = first_node(node_online_map); 111 if (page) { 112 set_compound_page_dtor(page, free_huge_page); 113 spin_lock(&hugetlb_lock); 114 nr_huge_pages++; 115 nr_huge_pages_node[page_to_nid(page)]++; 116 spin_unlock(&hugetlb_lock); 117 put_page(page); /* free it into the hugepage allocator */ 118 return 1; 119 } 120 return 0; 121} 122 123static struct page *alloc_huge_page(struct vm_area_struct *vma, 124 unsigned long addr) 125{ 126 struct page *page; 127 128 spin_lock(&hugetlb_lock); 129 if (vma->vm_flags & VM_MAYSHARE) 130 resv_huge_pages--; 131 else if (free_huge_pages <= resv_huge_pages) 132 goto fail; 133 134 page = dequeue_huge_page(vma, addr); 135 if (!page) 136 goto fail; 137 138 spin_unlock(&hugetlb_lock); 139 set_page_refcounted(page); 140 return page; 141 142fail: 143 if (vma->vm_flags & VM_MAYSHARE) 144 resv_huge_pages++; 145 spin_unlock(&hugetlb_lock); 146 return NULL; 147} 148 149static int __init hugetlb_init(void) 150{ 151 unsigned long i; 152 153 if (HPAGE_SHIFT == 0) 154 return 0; 155 156 for (i = 0; i < MAX_NUMNODES; ++i) 157 INIT_LIST_HEAD(&hugepage_freelists[i]); 158 159 for (i = 0; i < max_huge_pages; ++i) { 160 if (!alloc_fresh_huge_page()) 161 break; 162 } 163 max_huge_pages = free_huge_pages = nr_huge_pages = i; 164 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 165 return 0; 166} 167module_init(hugetlb_init); 168 169static int __init hugetlb_setup(char *s) 170{ 171 if (sscanf(s, "%lu", &max_huge_pages) <= 0) 172 max_huge_pages = 0; 173 return 1; 174} 175__setup("hugepages=", hugetlb_setup); 176 177static unsigned int cpuset_mems_nr(unsigned int *array) 178{ 179 int node; 180 unsigned int nr = 0; 181 182 for_each_node_mask(node, cpuset_current_mems_allowed) 183 nr += array[node]; 184 185 return nr; 186} 187 188#ifdef CONFIG_SYSCTL 189static void update_and_free_page(struct page *page) 190{ 191 int i; 192 nr_huge_pages--; 193 nr_huge_pages_node[page_to_nid(page)]--; 194 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 195 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 196 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 197 1 << PG_private | 1<< PG_writeback); 198 } 199 page[1].lru.next = NULL; 200 set_page_refcounted(page); 201 __free_pages(page, HUGETLB_PAGE_ORDER); 202} 203 204#ifdef CONFIG_HIGHMEM 205static void try_to_free_low(unsigned long count) 206{ 207 int i; 208 209 for (i = 0; i < MAX_NUMNODES; ++i) { 210 struct page *page, *next; 211 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 212 if (PageHighMem(page)) 213 continue; 214 list_del(&page->lru); 215 update_and_free_page(page); 216 free_huge_pages--; 217 free_huge_pages_node[page_to_nid(page)]--; 218 if (count >= nr_huge_pages) 219 return; 220 } 221 } 222} 223#else 224static inline void try_to_free_low(unsigned long count) 225{ 226} 227#endif 228 229static unsigned long set_max_huge_pages(unsigned long count) 230{ 231 while (count > nr_huge_pages) { 232 if (!alloc_fresh_huge_page()) 233 return nr_huge_pages; 234 } 235 if (count >= nr_huge_pages) 236 return nr_huge_pages; 237 238 spin_lock(&hugetlb_lock); 239 count = max(count, resv_huge_pages); 240 try_to_free_low(count); 241 while (count < nr_huge_pages) { 242 struct page *page = dequeue_huge_page(NULL, 0); 243 if (!page) 244 break; 245 update_and_free_page(page); 246 } 247 spin_unlock(&hugetlb_lock); 248 return nr_huge_pages; 249} 250 251int hugetlb_sysctl_handler(struct ctl_table *table, int write, 252 struct file *file, void __user *buffer, 253 size_t *length, loff_t *ppos) 254{ 255 proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 256 max_huge_pages = set_max_huge_pages(max_huge_pages); 257 return 0; 258} 259#endif /* CONFIG_SYSCTL */ 260 261int hugetlb_report_meminfo(char *buf) 262{ 263 return sprintf(buf, 264 "HugePages_Total: %5lu\n" 265 "HugePages_Free: %5lu\n" 266 "HugePages_Rsvd: %5lu\n" 267 "Hugepagesize: %5lu kB\n", 268 nr_huge_pages, 269 free_huge_pages, 270 resv_huge_pages, 271 HPAGE_SIZE/1024); 272} 273 274int hugetlb_report_node_meminfo(int nid, char *buf) 275{ 276 return sprintf(buf, 277 "Node %d HugePages_Total: %5u\n" 278 "Node %d HugePages_Free: %5u\n", 279 nid, nr_huge_pages_node[nid], 280 nid, free_huge_pages_node[nid]); 281} 282 283/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 284unsigned long hugetlb_total_pages(void) 285{ 286 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 287} 288 289/* 290 * We cannot handle pagefaults against hugetlb pages at all. They cause 291 * handle_mm_fault() to try to instantiate regular-sized pages in the 292 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 293 * this far. 294 */ 295static struct page *hugetlb_nopage(struct vm_area_struct *vma, 296 unsigned long address, int *unused) 297{ 298 BUG(); 299 return NULL; 300} 301 302struct vm_operations_struct hugetlb_vm_ops = { 303 .nopage = hugetlb_nopage, 304}; 305 306static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 307 int writable) 308{ 309 pte_t entry; 310 311 if (writable) { 312 entry = 313 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 314 } else { 315 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 316 } 317 entry = pte_mkyoung(entry); 318 entry = pte_mkhuge(entry); 319 320 return entry; 321} 322 323static void set_huge_ptep_writable(struct vm_area_struct *vma, 324 unsigned long address, pte_t *ptep) 325{ 326 pte_t entry; 327 328 entry = pte_mkwrite(pte_mkdirty(*ptep)); 329 if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { 330 update_mmu_cache(vma, address, entry); 331 lazy_mmu_prot_update(entry); 332 } 333} 334 335 336int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 337 struct vm_area_struct *vma) 338{ 339 pte_t *src_pte, *dst_pte, entry; 340 struct page *ptepage; 341 unsigned long addr; 342 int cow; 343 344 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 345 346 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 347 src_pte = huge_pte_offset(src, addr); 348 if (!src_pte) 349 continue; 350 dst_pte = huge_pte_alloc(dst, addr); 351 if (!dst_pte) 352 goto nomem; 353 spin_lock(&dst->page_table_lock); 354 spin_lock(&src->page_table_lock); 355 if (!pte_none(*src_pte)) { 356 if (cow) 357 ptep_set_wrprotect(src, addr, src_pte); 358 entry = *src_pte; 359 ptepage = pte_page(entry); 360 get_page(ptepage); 361 set_huge_pte_at(dst, addr, dst_pte, entry); 362 } 363 spin_unlock(&src->page_table_lock); 364 spin_unlock(&dst->page_table_lock); 365 } 366 return 0; 367 368nomem: 369 return -ENOMEM; 370} 371 372void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 373 unsigned long end) 374{ 375 struct mm_struct *mm = vma->vm_mm; 376 unsigned long address; 377 pte_t *ptep; 378 pte_t pte; 379 struct page *page; 380 struct page *tmp; 381 /* 382 * A page gathering list, protected by per file i_mmap_lock. The 383 * lock is used to avoid list corruption from multiple unmapping 384 * of the same page since we are using page->lru. 385 */ 386 LIST_HEAD(page_list); 387 388 WARN_ON(!is_vm_hugetlb_page(vma)); 389 BUG_ON(start & ~HPAGE_MASK); 390 BUG_ON(end & ~HPAGE_MASK); 391 392 spin_lock(&mm->page_table_lock); 393 for (address = start; address < end; address += HPAGE_SIZE) { 394 ptep = huge_pte_offset(mm, address); 395 if (!ptep) 396 continue; 397 398 if (huge_pmd_unshare(mm, &address, ptep)) 399 continue; 400 401 pte = huge_ptep_get_and_clear(mm, address, ptep); 402 if (pte_none(pte)) 403 continue; 404 405 page = pte_page(pte); 406 if (pte_dirty(pte)) 407 set_page_dirty(page); 408 list_add(&page->lru, &page_list); 409 } 410 spin_unlock(&mm->page_table_lock); 411 flush_tlb_range(vma, start, end); 412 list_for_each_entry_safe(page, tmp, &page_list, lru) { 413 list_del(&page->lru); 414 put_page(page); 415 } 416} 417 418void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 419 unsigned long end) 420{ 421 /* 422 * It is undesirable to test vma->vm_file as it should be non-null 423 * for valid hugetlb area. However, vm_file will be NULL in the error 424 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, 425 * do_mmap_pgoff() nullifies vma->vm_file before calling this function 426 * to clean up. Since no pte has actually been setup, it is safe to 427 * do nothing in this case. 428 */ 429 if (vma->vm_file) { 430 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 431 __unmap_hugepage_range(vma, start, end); 432 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 433 } 434} 435 436static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 437 unsigned long address, pte_t *ptep, pte_t pte) 438{ 439 struct page *old_page, *new_page; 440 int avoidcopy; 441 442 old_page = pte_page(pte); 443 444 /* If no-one else is actually using this page, avoid the copy 445 * and just make the page writable */ 446 avoidcopy = (page_count(old_page) == 1); 447 if (avoidcopy) { 448 set_huge_ptep_writable(vma, address, ptep); 449 return VM_FAULT_MINOR; 450 } 451 452 page_cache_get(old_page); 453 new_page = alloc_huge_page(vma, address); 454 455 if (!new_page) { 456 page_cache_release(old_page); 457 return VM_FAULT_OOM; 458 } 459 460 spin_unlock(&mm->page_table_lock); 461 copy_huge_page(new_page, old_page, address, vma); 462 spin_lock(&mm->page_table_lock); 463 464 ptep = huge_pte_offset(mm, address & HPAGE_MASK); 465 if (likely(pte_same(*ptep, pte))) { 466 /* Break COW */ 467 set_huge_pte_at(mm, address, ptep, 468 make_huge_pte(vma, new_page, 1)); 469 /* Make the old page be freed below */ 470 new_page = old_page; 471 } 472 page_cache_release(new_page); 473 page_cache_release(old_page); 474 return VM_FAULT_MINOR; 475} 476 477int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 478 unsigned long address, pte_t *ptep, int write_access) 479{ 480 int ret = VM_FAULT_SIGBUS; 481 unsigned long idx; 482 unsigned long size; 483 struct page *page; 484 struct address_space *mapping; 485 pte_t new_pte; 486 487 mapping = vma->vm_file->f_mapping; 488 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 489 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 490 491 /* 492 * Use page lock to guard against racing truncation 493 * before we get page_table_lock. 494 */ 495retry: 496 page = find_lock_page(mapping, idx); 497 if (!page) { 498 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 499 if (idx >= size) 500 goto out; 501 if (hugetlb_get_quota(mapping)) 502 goto out; 503 page = alloc_huge_page(vma, address); 504 if (!page) { 505 hugetlb_put_quota(mapping); 506 ret = VM_FAULT_OOM; 507 goto out; 508 } 509 clear_huge_page(page, address); 510 511 if (vma->vm_flags & VM_SHARED) { 512 int err; 513 514 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 515 if (err) { 516 put_page(page); 517 hugetlb_put_quota(mapping); 518 if (err == -EEXIST) 519 goto retry; 520 goto out; 521 } 522 } else 523 lock_page(page); 524 } 525 526 spin_lock(&mm->page_table_lock); 527 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 528 if (idx >= size) 529 goto backout; 530 531 ret = VM_FAULT_MINOR; 532 if (!pte_none(*ptep)) 533 goto backout; 534 535 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 536 && (vma->vm_flags & VM_SHARED))); 537 set_huge_pte_at(mm, address, ptep, new_pte); 538 539 if (write_access && !(vma->vm_flags & VM_SHARED)) { 540 /* Optimization, do the COW without a second fault */ 541 ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 542 } 543 544 spin_unlock(&mm->page_table_lock); 545 unlock_page(page); 546out: 547 return ret; 548 549backout: 550 spin_unlock(&mm->page_table_lock); 551 hugetlb_put_quota(mapping); 552 unlock_page(page); 553 put_page(page); 554 goto out; 555} 556 557int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 558 unsigned long address, int write_access) 559{ 560 pte_t *ptep; 561 pte_t entry; 562 int ret; 563 static DEFINE_MUTEX(hugetlb_instantiation_mutex); 564 565 ptep = huge_pte_alloc(mm, address); 566 if (!ptep) 567 return VM_FAULT_OOM; 568 569 /* 570 * Serialize hugepage allocation and instantiation, so that we don't 571 * get spurious allocation failures if two CPUs race to instantiate 572 * the same page in the page cache. 573 */ 574 mutex_lock(&hugetlb_instantiation_mutex); 575 entry = *ptep; 576 if (pte_none(entry)) { 577 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 578 mutex_unlock(&hugetlb_instantiation_mutex); 579 return ret; 580 } 581 582 ret = VM_FAULT_MINOR; 583 584 spin_lock(&mm->page_table_lock); 585 /* Check for a racing update before calling hugetlb_cow */ 586 if (likely(pte_same(entry, *ptep))) 587 if (write_access && !pte_write(entry)) 588 ret = hugetlb_cow(mm, vma, address, ptep, entry); 589 spin_unlock(&mm->page_table_lock); 590 mutex_unlock(&hugetlb_instantiation_mutex); 591 592 return ret; 593} 594 595int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 596 struct page **pages, struct vm_area_struct **vmas, 597 unsigned long *position, int *length, int i) 598{ 599 unsigned long pfn_offset; 600 unsigned long vaddr = *position; 601 int remainder = *length; 602 603 spin_lock(&mm->page_table_lock); 604 while (vaddr < vma->vm_end && remainder) { 605 pte_t *pte; 606 struct page *page; 607 608 /* 609 * Some archs (sparc64, sh*) have multiple pte_ts to 610 * each hugepage. We have to make * sure we get the 611 * first, for the page indexing below to work. 612 */ 613 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 614 615 if (!pte || pte_none(*pte)) { 616 int ret; 617 618 spin_unlock(&mm->page_table_lock); 619 ret = hugetlb_fault(mm, vma, vaddr, 0); 620 spin_lock(&mm->page_table_lock); 621 if (ret == VM_FAULT_MINOR) 622 continue; 623 624 remainder = 0; 625 if (!i) 626 i = -EFAULT; 627 break; 628 } 629 630 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; 631 page = pte_page(*pte); 632same_page: 633 if (pages) { 634 get_page(page); 635 pages[i] = page + pfn_offset; 636 } 637 638 if (vmas) 639 vmas[i] = vma; 640 641 vaddr += PAGE_SIZE; 642 ++pfn_offset; 643 --remainder; 644 ++i; 645 if (vaddr < vma->vm_end && remainder && 646 pfn_offset < HPAGE_SIZE/PAGE_SIZE) { 647 /* 648 * We use pfn_offset to avoid touching the pageframes 649 * of this compound page. 650 */ 651 goto same_page; 652 } 653 } 654 spin_unlock(&mm->page_table_lock); 655 *length = remainder; 656 *position = vaddr; 657 658 return i; 659} 660 661void hugetlb_change_protection(struct vm_area_struct *vma, 662 unsigned long address, unsigned long end, pgprot_t newprot) 663{ 664 struct mm_struct *mm = vma->vm_mm; 665 unsigned long start = address; 666 pte_t *ptep; 667 pte_t pte; 668 669 BUG_ON(address >= end); 670 flush_cache_range(vma, address, end); 671 672 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 673 spin_lock(&mm->page_table_lock); 674 for (; address < end; address += HPAGE_SIZE) { 675 ptep = huge_pte_offset(mm, address); 676 if (!ptep) 677 continue; 678 if (huge_pmd_unshare(mm, &address, ptep)) 679 continue; 680 if (!pte_none(*ptep)) { 681 pte = huge_ptep_get_and_clear(mm, address, ptep); 682 pte = pte_mkhuge(pte_modify(pte, newprot)); 683 set_huge_pte_at(mm, address, ptep, pte); 684 lazy_mmu_prot_update(pte); 685 } 686 } 687 spin_unlock(&mm->page_table_lock); 688 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 689 690 flush_tlb_range(vma, start, end); 691} 692 693struct file_region { 694 struct list_head link; 695 long from; 696 long to; 697}; 698 699static long region_add(struct list_head *head, long f, long t) 700{ 701 struct file_region *rg, *nrg, *trg; 702 703 /* Locate the region we are either in or before. */ 704 list_for_each_entry(rg, head, link) 705 if (f <= rg->to) 706 break; 707 708 /* Round our left edge to the current segment if it encloses us. */ 709 if (f > rg->from) 710 f = rg->from; 711 712 /* Check for and consume any regions we now overlap with. */ 713 nrg = rg; 714 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 715 if (&rg->link == head) 716 break; 717 if (rg->from > t) 718 break; 719 720 /* If this area reaches higher then extend our area to 721 * include it completely. If this is not the first area 722 * which we intend to reuse, free it. */ 723 if (rg->to > t) 724 t = rg->to; 725 if (rg != nrg) { 726 list_del(&rg->link); 727 kfree(rg); 728 } 729 } 730 nrg->from = f; 731 nrg->to = t; 732 return 0; 733} 734 735static long region_chg(struct list_head *head, long f, long t) 736{ 737 struct file_region *rg, *nrg; 738 long chg = 0; 739 740 /* Locate the region we are before or in. */ 741 list_for_each_entry(rg, head, link) 742 if (f <= rg->to) 743 break; 744 745 /* If we are below the current region then a new region is required. 746 * Subtle, allocate a new region at the position but make it zero 747 * size such that we can guarentee to record the reservation. */ 748 if (&rg->link == head || t < rg->from) { 749 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 750 if (nrg == 0) 751 return -ENOMEM; 752 nrg->from = f; 753 nrg->to = f; 754 INIT_LIST_HEAD(&nrg->link); 755 list_add(&nrg->link, rg->link.prev); 756 757 return t - f; 758 } 759 760 /* Round our left edge to the current segment if it encloses us. */ 761 if (f > rg->from) 762 f = rg->from; 763 chg = t - f; 764 765 /* Check for and consume any regions we now overlap with. */ 766 list_for_each_entry(rg, rg->link.prev, link) { 767 if (&rg->link == head) 768 break; 769 if (rg->from > t) 770 return chg; 771 772 /* We overlap with this area, if it extends futher than 773 * us then we must extend ourselves. Account for its 774 * existing reservation. */ 775 if (rg->to > t) { 776 chg += rg->to - t; 777 t = rg->to; 778 } 779 chg -= rg->to - rg->from; 780 } 781 return chg; 782} 783 784static long region_truncate(struct list_head *head, long end) 785{ 786 struct file_region *rg, *trg; 787 long chg = 0; 788 789 /* Locate the region we are either in or before. */ 790 list_for_each_entry(rg, head, link) 791 if (end <= rg->to) 792 break; 793 if (&rg->link == head) 794 return 0; 795 796 /* If we are in the middle of a region then adjust it. */ 797 if (end > rg->from) { 798 chg = rg->to - end; 799 rg->to = end; 800 rg = list_entry(rg->link.next, typeof(*rg), link); 801 } 802 803 /* Drop any remaining regions. */ 804 list_for_each_entry_safe(rg, trg, rg->link.prev, link) { 805 if (&rg->link == head) 806 break; 807 chg += rg->to - rg->from; 808 list_del(&rg->link); 809 kfree(rg); 810 } 811 return chg; 812} 813 814static int hugetlb_acct_memory(long delta) 815{ 816 int ret = -ENOMEM; 817 818 spin_lock(&hugetlb_lock); 819 if ((delta + resv_huge_pages) <= free_huge_pages) { 820 resv_huge_pages += delta; 821 ret = 0; 822 } 823 spin_unlock(&hugetlb_lock); 824 return ret; 825} 826 827int hugetlb_reserve_pages(struct inode *inode, long from, long to) 828{ 829 long ret, chg; 830 831 chg = region_chg(&inode->i_mapping->private_list, from, to); 832 if (chg < 0) 833 return chg; 834 /* 835 * When cpuset is configured, it breaks the strict hugetlb page 836 * reservation as the accounting is done on a global variable. Such 837 * reservation is completely rubbish in the presence of cpuset because 838 * the reservation is not checked against page availability for the 839 * current cpuset. Application can still potentially OOM'ed by kernel 840 * with lack of free htlb page in cpuset that the task is in. 841 * Attempt to enforce strict accounting with cpuset is almost 842 * impossible (or too ugly) because cpuset is too fluid that 843 * task or memory node can be dynamically moved between cpusets. 844 * 845 * The change of semantics for shared hugetlb mapping with cpuset is 846 * undesirable. However, in order to preserve some of the semantics, 847 * we fall back to check against current free page availability as 848 * a best attempt and hopefully to minimize the impact of changing 849 * semantics that cpuset has. 850 */ 851 if (chg > cpuset_mems_nr(free_huge_pages_node)) 852 return -ENOMEM; 853 854 ret = hugetlb_acct_memory(chg); 855 if (ret < 0) 856 return ret; 857 region_add(&inode->i_mapping->private_list, from, to); 858 return 0; 859} 860 861void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 862{ 863 long chg = region_truncate(&inode->i_mapping->private_list, offset); 864 hugetlb_acct_memory(freed - chg); 865} 866