1/* 2 * linux/mm/swapfile.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 */ 7 8#include <linux/slab.h> 9#include <linux/smp_lock.h> 10#include <linux/kernel_stat.h> 11#include <linux/swap.h> 12#include <linux/swapctl.h> 13#include <linux/blkdev.h> /* for blk_size */ 14#include <linux/vmalloc.h> 15#include <linux/pagemap.h> 16#include <linux/shm.h> 17 18#include <asm/pgtable.h> 19 20spinlock_t swaplock = SPIN_LOCK_UNLOCKED; 21unsigned int nr_swapfiles; 22int total_swap_pages; 23static int swap_overflow; 24 25static const char Bad_file[] = "Bad swap file entry "; 26static const char Unused_file[] = "Unused swap file entry "; 27static const char Bad_offset[] = "Bad swap offset entry "; 28static const char Unused_offset[] = "Unused swap offset entry "; 29 30struct swap_list_t swap_list = {-1, -1}; 31 32struct swap_info_struct swap_info[MAX_SWAPFILES]; 33 34#define SWAPFILE_CLUSTER 256 35 36static inline int scan_swap_map(struct swap_info_struct *si) 37{ 38 unsigned long offset; 39 /* 40 * We try to cluster swap pages by allocating them 41 * sequentially in swap. Once we've allocated 42 * SWAPFILE_CLUSTER pages this way, however, we resort to 43 * first-free allocation, starting a new cluster. This 44 * prevents us from scattering swap pages all over the entire 45 * swap partition, so that we reduce overall disk seek times 46 * between swap pages. -- sct */ 47 if (si->cluster_nr) { 48 while (si->cluster_next <= si->highest_bit) { 49 offset = si->cluster_next++; 50 if (si->swap_map[offset]) 51 continue; 52 si->cluster_nr--; 53 goto got_page; 54 } 55 } 56 si->cluster_nr = SWAPFILE_CLUSTER; 57 58 /* try to find an empty (even not aligned) cluster. */ 59 offset = si->lowest_bit; 60 check_next_cluster: 61 if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit) 62 { 63 int nr; 64 for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++) 65 if (si->swap_map[nr]) 66 { 67 offset = nr+1; 68 goto check_next_cluster; 69 } 70 /* We found a completly empty cluster, so start 71 * using it. 72 */ 73 goto got_page; 74 } 75 /* No luck, so now go finegrined as usual. -Andrea */ 76 for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) { 77 if (si->swap_map[offset]) 78 continue; 79 si->lowest_bit = offset+1; 80 got_page: 81 if (offset == si->lowest_bit) 82 si->lowest_bit++; 83 if (offset == si->highest_bit) 84 si->highest_bit--; 85 if (si->lowest_bit > si->highest_bit) { 86 si->lowest_bit = si->max; 87 si->highest_bit = 0; 88 } 89 si->swap_map[offset] = 1; 90 nr_swap_pages--; 91 si->cluster_next = offset+1; 92 return offset; 93 } 94 si->lowest_bit = si->max; 95 si->highest_bit = 0; 96 return 0; 97} 98 99swp_entry_t get_swap_page(void) 100{ 101 struct swap_info_struct * p; 102 unsigned long offset; 103 swp_entry_t entry; 104 int type, wrapped = 0; 105 106 entry.val = 0; /* Out of memory */ 107 swap_list_lock(); 108 type = swap_list.next; 109 if (type < 0) 110 goto out; 111 if (nr_swap_pages <= 0) 112 goto out; 113 114 while (1) { 115 p = &swap_info[type]; 116 if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) { 117 swap_device_lock(p); 118 offset = scan_swap_map(p); 119 swap_device_unlock(p); 120 if (offset) { 121 entry = SWP_ENTRY(type,offset); 122 type = swap_info[type].next; 123 if (type < 0 || 124 p->prio != swap_info[type].prio) { 125 swap_list.next = swap_list.head; 126 } else { 127 swap_list.next = type; 128 } 129 goto out; 130 } 131 } 132 type = p->next; 133 if (!wrapped) { 134 if (type < 0 || p->prio != swap_info[type].prio) { 135 type = swap_list.head; 136 wrapped = 1; 137 } 138 } else 139 if (type < 0) 140 goto out; /* out of swap space */ 141 } 142out: 143 swap_list_unlock(); 144 return entry; 145} 146 147static struct swap_info_struct * swap_info_get(swp_entry_t entry) 148{ 149 struct swap_info_struct * p; 150 unsigned long offset, type; 151 152 if (!entry.val) 153 goto out; 154 type = SWP_TYPE(entry); 155 if (type >= nr_swapfiles) 156 goto bad_nofile; 157 p = & swap_info[type]; 158 if (!(p->flags & SWP_USED)) 159 goto bad_device; 160 offset = SWP_OFFSET(entry); 161 if (offset >= p->max) 162 goto bad_offset; 163 if (!p->swap_map[offset]) 164 goto bad_free; 165 swap_list_lock(); 166 if (p->prio > swap_info[swap_list.next].prio) 167 swap_list.next = type; 168 swap_device_lock(p); 169 return p; 170 171bad_free: 172 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val); 173 goto out; 174bad_offset: 175 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val); 176 goto out; 177bad_device: 178 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val); 179 goto out; 180bad_nofile: 181 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); 182out: 183 return NULL; 184} 185 186static void swap_info_put(struct swap_info_struct * p) 187{ 188 swap_device_unlock(p); 189 swap_list_unlock(); 190} 191 192static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) 193{ 194 int count = p->swap_map[offset]; 195 196 if (count < SWAP_MAP_MAX) { 197 count--; 198 p->swap_map[offset] = count; 199 if (!count) { 200 if (offset < p->lowest_bit) 201 p->lowest_bit = offset; 202 if (offset > p->highest_bit) 203 p->highest_bit = offset; 204 nr_swap_pages++; 205 } 206 } 207 return count; 208} 209 210/* 211 * Caller has made sure that the swapdevice corresponding to entry 212 * is still around or has not been recycled. 213 */ 214void swap_free(swp_entry_t entry) 215{ 216 struct swap_info_struct * p; 217 218 p = swap_info_get(entry); 219 if (p) { 220 swap_entry_free(p, SWP_OFFSET(entry)); 221 swap_info_put(p); 222 } 223} 224 225/* 226 * Check if we're the only user of a swap page, 227 * when the page is locked. 228 */ 229static int exclusive_swap_page(struct page *page) 230{ 231 int retval = 0; 232 struct swap_info_struct * p; 233 swp_entry_t entry; 234 235 entry.val = page->index; 236 p = swap_info_get(entry); 237 if (p) { 238 /* Is the only swap cache user the cache itself? */ 239 if (p->swap_map[SWP_OFFSET(entry)] == 1) { 240 /* Recheck the page count with the pagecache lock held.. */ 241 spin_lock(&pagecache_lock); 242 if (page_count(page) - !!page->buffers == 2) 243 retval = 1; 244 spin_unlock(&pagecache_lock); 245 } 246 swap_info_put(p); 247 } 248 return retval; 249} 250 251/* 252 * We can use this swap cache entry directly 253 * if there are no other references to it. 254 * 255 * Here "exclusive_swap_page()" does the real 256 * work, but we opportunistically check whether 257 * we need to get all the locks first.. 258 */ 259int can_share_swap_page(struct page *page) 260{ 261 int retval = 0; 262 263 if (!PageLocked(page)) 264 BUG(); 265 switch (page_count(page)) { 266 case 3: 267 if (!page->buffers) 268 break; 269 /* Fallthrough */ 270 case 2: 271 if (!PageSwapCache(page)) 272 break; 273 retval = exclusive_swap_page(page); 274 break; 275 case 1: 276 if (PageReserved(page)) 277 break; 278 retval = 1; 279 } 280 return retval; 281} 282 283/* 284 * Work out if there are any other processes sharing this 285 * swap cache page. Free it if you can. Return success. 286 */ 287int remove_exclusive_swap_page(struct page *page) 288{ 289 int retval; 290 struct swap_info_struct * p; 291 swp_entry_t entry; 292 293 if (!PageLocked(page)) 294 BUG(); 295 if (!PageSwapCache(page)) 296 return 0; 297 if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */ 298 return 0; 299 300 entry.val = page->index; 301 p = swap_info_get(entry); 302 if (!p) 303 return 0; 304 305 /* Is the only swap cache user the cache itself? */ 306 retval = 0; 307 if (p->swap_map[SWP_OFFSET(entry)] == 1) { 308 /* Recheck the page count with the pagecache lock held.. */ 309 spin_lock(&pagecache_lock); 310 if (page_count(page) - !!page->buffers == 2) { 311 __delete_from_swap_cache(page); 312 SetPageDirty(page); 313 retval = 1; 314 } 315 spin_unlock(&pagecache_lock); 316 } 317 swap_info_put(p); 318 319 if (retval) { 320 block_flushpage(page, 0); 321 swap_free(entry); 322 page_cache_release(page); 323 } 324 325 return retval; 326} 327 328/* 329 * Free the swap entry like above, but also try to 330 * free the page cache entry if it is the last user. 331 */ 332void free_swap_and_cache(swp_entry_t entry) 333{ 334 struct swap_info_struct * p; 335 struct page *page = NULL; 336 337 p = swap_info_get(entry); 338 if (p) { 339 if (swap_entry_free(p, SWP_OFFSET(entry)) == 1) 340 page = find_trylock_page(&swapper_space, entry.val); 341 swap_info_put(p); 342 } 343 if (page) { 344 page_cache_get(page); 345 /* Only cache user (+us), or swap space full? Free it! */ 346 if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) { 347 delete_from_swap_cache(page); 348 SetPageDirty(page); 349 } 350 UnlockPage(page); 351 page_cache_release(page); 352 } 353} 354 355/* 356 * The swap entry has been read in advance, and we return 1 to indicate 357 * that the page has been used or is no longer needed. 358 * 359 * Always set the resulting pte to be nowrite (the same as COW pages 360 * after one process has exited). We don't know just how many PTEs will 361 * share this swap entry, so be cautious and let do_wp_page work out 362 * what to do if a write is requested later. 363 */ 364/* mmlist_lock and vma->vm_mm->page_table_lock are held */ 365static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address, 366 pte_t *dir, swp_entry_t entry, struct page* page) 367{ 368 pte_t pte = *dir; 369 370 if (likely(pte_to_swp_entry(pte).val != entry.val)) 371 return; 372 if (unlikely(pte_none(pte) || pte_present(pte))) 373 return; 374 get_page(page); 375 set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot))); 376 swap_free(entry); 377 ++vma->vm_mm->rss; 378} 379 380/* mmlist_lock and vma->vm_mm->page_table_lock are held */ 381static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, 382 unsigned long address, unsigned long size, unsigned long offset, 383 swp_entry_t entry, struct page* page) 384{ 385 pte_t * pte; 386 unsigned long end; 387 388 if (pmd_none(*dir)) 389 return; 390 if (pmd_bad(*dir)) { 391 pmd_ERROR(*dir); 392 pmd_clear(dir); 393 return; 394 } 395 pte = pte_offset(dir, address); 396 offset += address & PMD_MASK; 397 address &= ~PMD_MASK; 398 end = address + size; 399 if (end > PMD_SIZE) 400 end = PMD_SIZE; 401 do { 402 unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page); 403 address += PAGE_SIZE; 404 pte++; 405 } while (address && (address < end)); 406} 407 408/* mmlist_lock and vma->vm_mm->page_table_lock are held */ 409static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, 410 unsigned long address, unsigned long size, 411 swp_entry_t entry, struct page* page) 412{ 413 pmd_t * pmd; 414 unsigned long offset, end; 415 416 if (pgd_none(*dir)) 417 return; 418 if (pgd_bad(*dir)) { 419 pgd_ERROR(*dir); 420 pgd_clear(dir); 421 return; 422 } 423 pmd = pmd_offset(dir, address); 424 offset = address & PGDIR_MASK; 425 address &= ~PGDIR_MASK; 426 end = address + size; 427 if (end > PGDIR_SIZE) 428 end = PGDIR_SIZE; 429 if (address >= end) 430 BUG(); 431 do { 432 unuse_pmd(vma, pmd, address, end - address, offset, entry, 433 page); 434 address = (address + PMD_SIZE) & PMD_MASK; 435 pmd++; 436 } while (address && (address < end)); 437} 438 439/* mmlist_lock and vma->vm_mm->page_table_lock are held */ 440static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir, 441 swp_entry_t entry, struct page* page) 442{ 443 unsigned long start = vma->vm_start, end = vma->vm_end; 444 445 if (start >= end) 446 BUG(); 447 do { 448 unuse_pgd(vma, pgdir, start, end - start, entry, page); 449 start = (start + PGDIR_SIZE) & PGDIR_MASK; 450 pgdir++; 451 } while (start && (start < end)); 452} 453 454static void unuse_process(struct mm_struct * mm, 455 swp_entry_t entry, struct page* page) 456{ 457 struct vm_area_struct* vma; 458 459 /* 460 * Go through process' page directory. 461 */ 462 spin_lock(&mm->page_table_lock); 463 for (vma = mm->mmap; vma; vma = vma->vm_next) { 464 pgd_t * pgd = pgd_offset(mm, vma->vm_start); 465 unuse_vma(vma, pgd, entry, page); 466 } 467 spin_unlock(&mm->page_table_lock); 468 return; 469} 470 471/* 472 * Scan swap_map from current position to next entry still in use. 473 * Recycle to start on reaching the end, returning 0 when empty. 474 */ 475static int find_next_to_unuse(struct swap_info_struct *si, int prev) 476{ 477 int max = si->max; 478 int i = prev; 479 int count; 480 481 /* 482 * No need for swap_device_lock(si) here: we're just looking 483 * for whether an entry is in use, not modifying it; false 484 * hits are okay, and sys_swapoff() has already prevented new 485 * allocations from this area (while holding swap_list_lock()). 486 */ 487 for (;;) { 488 if (++i >= max) { 489 if (!prev) { 490 i = 0; 491 break; 492 } 493 /* 494 * No entries in use at top of swap_map, 495 * loop back to start and recheck there. 496 */ 497 max = prev + 1; 498 prev = 0; 499 i = 1; 500 } 501 count = si->swap_map[i]; 502 if (count && count != SWAP_MAP_BAD) 503 break; 504 } 505 return i; 506} 507 508/* 509 * We completely avoid races by reading each swap page in advance, 510 * and then search for the process using it. All the necessary 511 * page table adjustments can then be made atomically. 512 */ 513static int try_to_unuse(unsigned int type) 514{ 515 struct swap_info_struct * si = &swap_info[type]; 516 struct mm_struct *start_mm; 517 unsigned short *swap_map; 518 unsigned short swcount; 519 struct page *page; 520 swp_entry_t entry; 521 int i = 0; 522 int retval = 0; 523 int reset_overflow = 0; 524 525 /* 526 * When searching mms for an entry, a good strategy is to 527 * start at the first mm we freed the previous entry from 528 * (though actually we don't notice whether we or coincidence 529 * freed the entry). Initialize this start_mm with a hold. 530 * 531 * A simpler strategy would be to start at the last mm we 532 * freed the previous entry from; but that would take less 533 * advantage of mmlist ordering (now preserved by swap_out()), 534 * which clusters forked address spaces together, most recent 535 * child immediately after parent. If we race with dup_mmap(), 536 * we very much want to resolve parent before child, otherwise 537 * we may miss some entries: using last mm would invert that. 538 */ 539 start_mm = &init_mm; 540 atomic_inc(&init_mm.mm_users); 541 542 /* 543 * Keep on scanning until all entries have gone. Usually, 544 * one pass through swap_map is enough, but not necessarily: 545 * mmput() removes mm from mmlist before exit_mmap() and its 546 * zap_page_range(). That's not too bad, those entries are 547 * on their way out, and handled faster there than here. 548 * do_munmap() behaves similarly, taking the range out of mm's 549 * vma list before zap_page_range(). But unfortunately, when 550 * unmapping a part of a vma, it takes the whole out first, 551 * then reinserts what's left after (might even reschedule if 552 * open() method called) - so swap entries may be invisible 553 * to swapoff for a while, then reappear - but that is rare. 554 */ 555 while ((i = find_next_to_unuse(si, i))) { 556 /* 557 * Get a page for the entry, using the existing swap 558 * cache page if there is one. Otherwise, get a clean 559 * page and read the swap into it. 560 */ 561 swap_map = &si->swap_map[i]; 562 entry = SWP_ENTRY(type, i); 563 page = read_swap_cache_async(entry); 564 if (!page) { 565 /* 566 * Either swap_duplicate() failed because entry 567 * has been freed independently, and will not be 568 * reused since sys_swapoff() already disabled 569 * allocation from here, or alloc_page() failed. 570 */ 571 if (!*swap_map) 572 continue; 573 retval = -ENOMEM; 574 break; 575 } 576 577 /* 578 * Don't hold on to start_mm if it looks like exiting. 579 */ 580 if (atomic_read(&start_mm->mm_users) == 1) { 581 mmput(start_mm); 582 start_mm = &init_mm; 583 atomic_inc(&init_mm.mm_users); 584 } 585 586 /* 587 * Wait for and lock page. When do_swap_page races with 588 * try_to_unuse, do_swap_page can handle the fault much 589 * faster than try_to_unuse can locate the entry. This 590 * apparently redundant "wait_on_page" lets try_to_unuse 591 * defer to do_swap_page in such a case - in some tests, 592 * do_swap_page and try_to_unuse repeatedly compete. 593 */ 594 wait_on_page(page); 595 lock_page(page); 596 597 /* 598 * Remove all references to entry, without blocking. 599 * Whenever we reach init_mm, there's no address space 600 * to search, but use it as a reminder to search shmem. 601 */ 602 swcount = *swap_map; 603 if (swcount > 1) { 604 flush_page_to_ram(page); 605 if (start_mm == &init_mm) 606 shmem_unuse(entry, page); 607 else 608 unuse_process(start_mm, entry, page); 609 } 610 if (*swap_map > 1) { 611 int set_start_mm = (*swap_map >= swcount); 612 struct list_head *p = &start_mm->mmlist; 613 struct mm_struct *new_start_mm = start_mm; 614 struct mm_struct *mm; 615 616 spin_lock(&mmlist_lock); 617 while (*swap_map > 1 && 618 (p = p->next) != &start_mm->mmlist) { 619 mm = list_entry(p, struct mm_struct, mmlist); 620 swcount = *swap_map; 621 if (mm == &init_mm) { 622 set_start_mm = 1; 623 shmem_unuse(entry, page); 624 } else 625 unuse_process(mm, entry, page); 626 if (set_start_mm && *swap_map < swcount) { 627 new_start_mm = mm; 628 set_start_mm = 0; 629 } 630 } 631 atomic_inc(&new_start_mm->mm_users); 632 spin_unlock(&mmlist_lock); 633 mmput(start_mm); 634 start_mm = new_start_mm; 635 } 636 637 /* 638 * How could swap count reach 0x7fff when the maximum 639 * pid is 0x7fff, and there's no way to repeat a swap 640 * page within an mm (except in shmem, where it's the 641 * shared object which takes the reference count)? 642 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. 643 * 644 * If that's wrong, then we should worry more about 645 * exit_mmap() and do_munmap() cases described above: 646 * we might be resetting SWAP_MAP_MAX too early here. 647 * We know "Undead"s can happen, they're okay, so don't 648 * report them; but do report if we reset SWAP_MAP_MAX. 649 */ 650 if (*swap_map == SWAP_MAP_MAX) { 651 swap_list_lock(); 652 swap_device_lock(si); 653 nr_swap_pages++; 654 *swap_map = 1; 655 swap_device_unlock(si); 656 swap_list_unlock(); 657 reset_overflow = 1; 658 } 659 660 /* 661 * If a reference remains (rare), we would like to leave 662 * the page in the swap cache; but try_to_swap_out could 663 * then re-duplicate the entry once we drop page lock, 664 * so we might loop indefinitely; also, that page could 665 * not be swapped out to other storage meanwhile. So: 666 * delete from cache even if there's another reference, 667 * after ensuring that the data has been saved to disk - 668 * since if the reference remains (rarer), it will be 669 * read from disk into another page. Splitting into two 670 * pages would be incorrect if swap supported "shared 671 * private" pages, but they are handled by tmpfs files. 672 * Note shmem_unuse already deleted its from swap cache. 673 */ 674 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { 675 rw_swap_page(WRITE, page); 676 lock_page(page); 677 } 678 if (PageSwapCache(page)) 679 delete_from_swap_cache(page); 680 681 /* 682 * So we could skip searching mms once swap count went 683 * to 1, we did not mark any present ptes as dirty: must 684 * mark page dirty so try_to_swap_out will preserve it. 685 */ 686 SetPageDirty(page); 687 UnlockPage(page); 688 page_cache_release(page); 689 690 /* 691 * Make sure that we aren't completely killing 692 * interactive performance. Interruptible check on 693 * signal_pending() would be nice, but changes the spec? 694 */ 695 if (current->need_resched) 696 schedule(); 697 } 698 699 mmput(start_mm); 700 if (reset_overflow) { 701 printk(KERN_WARNING "swapoff: cleared swap entry overflow\n"); 702 swap_overflow = 0; 703 } 704 return retval; 705} 706 707asmlinkage long sys_swapoff(const char * specialfile) 708{ 709 struct swap_info_struct * p = NULL; 710 unsigned short *swap_map; 711 struct nameidata nd; 712 int i, type, prev; 713 int err; 714 715 if (!capable(CAP_SYS_ADMIN)) 716 return -EPERM; 717 718 err = user_path_walk(specialfile, &nd); 719 if (err) 720 goto out; 721 722 lock_kernel(); 723 prev = -1; 724 swap_list_lock(); 725 for (type = swap_list.head; type >= 0; type = swap_info[type].next) { 726 p = swap_info + type; 727 if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) { 728// 2008.06 James. { 729printk("p:i_ino=%ul, target:i_ino=%ul.\n", p->swap_file->d_inode->i_ino, nd.dentry->d_inode->i_ino); 730 //if (p->swap_file == nd.dentry) 731 if(p->swap_file->d_inode->i_ino == nd.dentry->d_inode->i_ino) 732// 2008.06 James. } 733 break; 734 } 735 prev = type; 736 } 737 err = -EINVAL; 738 if (type < 0) { 739 swap_list_unlock(); 740 goto out_dput; 741 } 742 743 if (prev < 0) { 744 swap_list.head = p->next; 745 } else { 746 swap_info[prev].next = p->next; 747 } 748 if (type == swap_list.next) { 749 /* just pick something that's safe... */ 750 swap_list.next = swap_list.head; 751 } 752 nr_swap_pages -= p->pages; 753 total_swap_pages -= p->pages; 754 p->flags = SWP_USED; 755 swap_list_unlock(); 756 unlock_kernel(); 757 err = try_to_unuse(type); 758 lock_kernel(); 759 if (err) { 760 /* re-insert swap space back into swap_list */ 761 swap_list_lock(); 762 for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next) 763 if (p->prio >= swap_info[i].prio) 764 break; 765 p->next = i; 766 if (prev < 0) 767 swap_list.head = swap_list.next = p - swap_info; 768 else 769 swap_info[prev].next = p - swap_info; 770 nr_swap_pages += p->pages; 771 total_swap_pages += p->pages; 772 p->flags = SWP_WRITEOK; 773 swap_list_unlock(); 774 goto out_dput; 775 } 776 if (p->swap_device) 777 blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP); 778 path_release(&nd); 779 780 swap_list_lock(); 781 swap_device_lock(p); 782 nd.mnt = p->swap_vfsmnt; 783 nd.dentry = p->swap_file; 784 p->swap_vfsmnt = NULL; 785 p->swap_file = NULL; 786 p->swap_device = 0; 787 p->max = 0; 788 swap_map = p->swap_map; 789 p->swap_map = NULL; 790 p->flags = 0; 791 swap_device_unlock(p); 792 swap_list_unlock(); 793 vfree(swap_map); 794 err = 0; 795 796out_dput: 797 unlock_kernel(); 798 path_release(&nd); 799out: 800 return err; 801} 802 803int get_swaparea_info(char *buf) 804{ 805 char * page = (char *) __get_free_page(GFP_KERNEL); 806 struct swap_info_struct *ptr = swap_info; 807 int i, j, len = 0, usedswap; 808 809 if (!page) 810 return -ENOMEM; 811 812 len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n"); 813 for (i = 0 ; i < nr_swapfiles ; i++, ptr++) { 814 if ((ptr->flags & SWP_USED) && ptr->swap_map) { 815 char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt, 816 page, PAGE_SIZE); 817 818 len += sprintf(buf + len, "%-31s ", path); 819 820 if (!ptr->swap_device) 821 len += sprintf(buf + len, "file\t\t"); 822 else 823 len += sprintf(buf + len, "partition\t"); 824 825 usedswap = 0; 826 for (j = 0; j < ptr->max; ++j) 827 switch (ptr->swap_map[j]) { 828 case SWAP_MAP_BAD: 829 case 0: 830 continue; 831 default: 832 usedswap++; 833 } 834 len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages << (PAGE_SHIFT - 10), 835 usedswap << (PAGE_SHIFT - 10), ptr->prio); 836 } 837 } 838 free_page((unsigned long) page); 839 return len; 840} 841 842int is_swap_partition(kdev_t dev) { 843 struct swap_info_struct *ptr = swap_info; 844 int i; 845 846 for (i = 0 ; i < nr_swapfiles ; i++, ptr++) { 847 if (ptr->flags & SWP_USED) 848 if (ptr->swap_device == dev) 849 return 1; 850 } 851 return 0; 852} 853 854/* 855 * Written 01/25/92 by Simmule Turner, heavily changed by Linus. 856 * 857 * The swapon system call 858 */ 859asmlinkage long sys_swapon(const char * specialfile, int swap_flags) 860{ 861 struct swap_info_struct * p; 862 struct nameidata nd; 863 struct inode * swap_inode; 864 unsigned int type; 865 int i, j, prev; 866 int error; 867 static int least_priority = 0; 868 union swap_header *swap_header = 0; 869 int swap_header_version; 870 int nr_good_pages = 0; 871 unsigned long maxpages = 1; 872 int swapfilesize; 873 struct block_device *bdev = NULL; 874 unsigned short *swap_map; 875 876 if (!capable(CAP_SYS_ADMIN)) 877 return -EPERM; 878 lock_kernel(); 879 swap_list_lock(); 880 p = swap_info; 881 for (type = 0 ; type < nr_swapfiles ; type++,p++) 882 if (!(p->flags & SWP_USED)) 883 break; 884 error = -EPERM; 885 if (type >= MAX_SWAPFILES) { 886 swap_list_unlock(); 887 goto out; 888 } 889 if (type >= nr_swapfiles) 890 nr_swapfiles = type+1; 891 p->flags = SWP_USED; 892 p->swap_file = NULL; 893 p->swap_vfsmnt = NULL; 894 p->swap_device = 0; 895 p->swap_map = NULL; 896 p->lowest_bit = 0; 897 p->highest_bit = 0; 898 p->cluster_nr = 0; 899 p->sdev_lock = SPIN_LOCK_UNLOCKED; 900 p->next = -1; 901 if (swap_flags & SWAP_FLAG_PREFER) { 902 p->prio = 903 (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT; 904 } else { 905 p->prio = --least_priority; 906 } 907 swap_list_unlock(); 908 error = user_path_walk(specialfile, &nd); 909 if (error) 910 goto bad_swap_2; 911 912 p->swap_file = nd.dentry; 913 p->swap_vfsmnt = nd.mnt; 914 swap_inode = nd.dentry->d_inode; 915 error = -EINVAL; 916 917 if (S_ISBLK(swap_inode->i_mode)) { 918 kdev_t dev = swap_inode->i_rdev; 919 struct block_device_operations *bdops; 920 devfs_handle_t de; 921 922 p->swap_device = dev; 923 set_blocksize(dev, PAGE_SIZE); 924 925 bd_acquire(swap_inode); 926 bdev = swap_inode->i_bdev; 927 de = devfs_get_handle_from_inode(swap_inode); 928 bdops = devfs_get_ops(de); /* Increments module use count */ 929 if (bdops) bdev->bd_op = bdops; 930 931 error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP); 932 devfs_put_ops(de);/*Decrement module use count now we're safe*/ 933 if (error) 934 goto bad_swap_2; 935 set_blocksize(dev, PAGE_SIZE); 936 error = -ENODEV; 937 if (!dev || (blk_size[MAJOR(dev)] && 938 !blk_size[MAJOR(dev)][MINOR(dev)])) 939 goto bad_swap; 940 swapfilesize = 0; 941 if (blk_size[MAJOR(dev)]) 942 swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)] 943 >> (PAGE_SHIFT - 10); 944 } else if (S_ISREG(swap_inode->i_mode)) 945 swapfilesize = swap_inode->i_size >> PAGE_SHIFT; 946 else 947 goto bad_swap; 948 949 error = -EBUSY; 950 for (i = 0 ; i < nr_swapfiles ; i++) { 951 struct swap_info_struct *q = &swap_info[i]; 952 if (i == type || !q->swap_file) 953 continue; 954 if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping) 955 goto bad_swap; 956 } 957 958 swap_header = (void *) __get_free_page(GFP_USER); 959 if (!swap_header) { 960 printk("Unable to start swapping: out of memory :-)\n"); 961 error = -ENOMEM; 962 goto bad_swap; 963 } 964 965 lock_page(virt_to_page(swap_header)); 966 rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header); 967 968 if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10)) 969 swap_header_version = 1; 970 else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10)) 971 swap_header_version = 2; 972 else { 973 printk("Unable to find swap-space signature\n"); 974 error = -EINVAL; 975 goto bad_swap; 976 } 977 978 switch (swap_header_version) { 979 case 1: 980 memset(((char *) swap_header)+PAGE_SIZE-10,0,10); 981 j = 0; 982 p->lowest_bit = 0; 983 p->highest_bit = 0; 984 for (i = 1 ; i < 8*PAGE_SIZE ; i++) { 985 if (test_bit(i,(char *) swap_header)) { 986 if (!p->lowest_bit) 987 p->lowest_bit = i; 988 p->highest_bit = i; 989 maxpages = i+1; 990 j++; 991 } 992 } 993 nr_good_pages = j; 994 p->swap_map = vmalloc(maxpages * sizeof(short)); 995 if (!p->swap_map) { 996 error = -ENOMEM; 997 goto bad_swap; 998 } 999 for (i = 1 ; i < maxpages ; i++) { 1000 if (test_bit(i,(char *) swap_header)) 1001 p->swap_map[i] = 0; 1002 else 1003 p->swap_map[i] = SWAP_MAP_BAD; 1004 } 1005 break; 1006 1007 case 2: 1008 /* Check the swap header's sub-version and the size of 1009 the swap file and bad block lists */ 1010 if (swap_header->info.version != 1) { 1011 printk(KERN_WARNING 1012 "Unable to handle swap header version %d\n", 1013 swap_header->info.version); 1014 error = -EINVAL; 1015 goto bad_swap; 1016 } 1017 1018 p->lowest_bit = 1; 1019 maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1; 1020 if (maxpages > swap_header->info.last_page) 1021 maxpages = swap_header->info.last_page; 1022 p->highest_bit = maxpages - 1; 1023 1024 error = -EINVAL; 1025 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 1026 goto bad_swap; 1027 1028 /* OK, set up the swap map and apply the bad block list */ 1029 if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) { 1030 error = -ENOMEM; 1031 goto bad_swap; 1032 } 1033 1034 error = 0; 1035 memset(p->swap_map, 0, maxpages * sizeof(short)); 1036 for (i=0; i<swap_header->info.nr_badpages; i++) { 1037 int page = swap_header->info.badpages[i]; 1038 if (page <= 0 || page >= swap_header->info.last_page) 1039 error = -EINVAL; 1040 else 1041 p->swap_map[page] = SWAP_MAP_BAD; 1042 } 1043 nr_good_pages = swap_header->info.last_page - 1044 swap_header->info.nr_badpages - 1045 1 /* header page */; 1046 if (error) 1047 goto bad_swap; 1048 } 1049 1050 if (swapfilesize && maxpages > swapfilesize) { 1051 printk(KERN_WARNING 1052 "Swap area shorter than signature indicates\n"); 1053 error = -EINVAL; 1054 goto bad_swap; 1055 } 1056 if (!nr_good_pages) { 1057 printk(KERN_WARNING "Empty swap-file\n"); 1058 error = -EINVAL; 1059 goto bad_swap; 1060 } 1061 p->swap_map[0] = SWAP_MAP_BAD; 1062 swap_list_lock(); 1063 swap_device_lock(p); 1064 p->max = maxpages; 1065 p->flags = SWP_WRITEOK; 1066 p->pages = nr_good_pages; 1067 nr_swap_pages += nr_good_pages; 1068 total_swap_pages += nr_good_pages; 1069 printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n", 1070 nr_good_pages<<(PAGE_SHIFT-10), p->prio); 1071 1072 /* insert swap space into swap_list: */ 1073 prev = -1; 1074 for (i = swap_list.head; i >= 0; i = swap_info[i].next) { 1075 if (p->prio >= swap_info[i].prio) { 1076 break; 1077 } 1078 prev = i; 1079 } 1080 p->next = i; 1081 if (prev < 0) { 1082 swap_list.head = swap_list.next = p - swap_info; 1083 } else { 1084 swap_info[prev].next = p - swap_info; 1085 } 1086 swap_device_unlock(p); 1087 swap_list_unlock(); 1088 error = 0; 1089 goto out; 1090bad_swap: 1091 if (bdev) 1092 blkdev_put(bdev, BDEV_SWAP); 1093bad_swap_2: 1094 swap_list_lock(); 1095 swap_map = p->swap_map; 1096 nd.mnt = p->swap_vfsmnt; 1097 nd.dentry = p->swap_file; 1098 p->swap_device = 0; 1099 p->swap_file = NULL; 1100 p->swap_vfsmnt = NULL; 1101 p->swap_map = NULL; 1102 p->flags = 0; 1103 if (!(swap_flags & SWAP_FLAG_PREFER)) 1104 ++least_priority; 1105 swap_list_unlock(); 1106 if (swap_map) 1107 vfree(swap_map); 1108 path_release(&nd); 1109out: 1110 if (swap_header) 1111 free_page((long) swap_header); 1112 unlock_kernel(); 1113 return error; 1114} 1115 1116void si_swapinfo(struct sysinfo *val) 1117{ 1118 unsigned int i; 1119 unsigned long nr_to_be_unused = 0; 1120 1121 swap_list_lock(); 1122 for (i = 0; i < nr_swapfiles; i++) { 1123 unsigned int j; 1124 if (swap_info[i].flags != SWP_USED) 1125 continue; 1126 for (j = 0; j < swap_info[i].max; ++j) { 1127 switch (swap_info[i].swap_map[j]) { 1128 case 0: 1129 case SWAP_MAP_BAD: 1130 continue; 1131 default: 1132 nr_to_be_unused++; 1133 } 1134 } 1135 } 1136 val->freeswap = nr_swap_pages + nr_to_be_unused; 1137 val->totalswap = total_swap_pages + nr_to_be_unused; 1138 swap_list_unlock(); 1139} 1140 1141/* 1142 * Verify that a swap entry is valid and increment its swap map count. 1143 * 1144 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as 1145 * "permanent", but will be reclaimed by the next swapoff. 1146 */ 1147int swap_duplicate(swp_entry_t entry) 1148{ 1149 struct swap_info_struct * p; 1150 unsigned long offset, type; 1151 int result = 0; 1152 1153 type = SWP_TYPE(entry); 1154 if (type >= nr_swapfiles) 1155 goto bad_file; 1156 p = type + swap_info; 1157 offset = SWP_OFFSET(entry); 1158 1159 swap_device_lock(p); 1160 if (offset < p->max && p->swap_map[offset]) { 1161 if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { 1162 p->swap_map[offset]++; 1163 result = 1; 1164 } else if (p->swap_map[offset] <= SWAP_MAP_MAX) { 1165 if (swap_overflow++ < 5) 1166 printk(KERN_WARNING "swap_dup: swap entry overflow\n"); 1167 p->swap_map[offset] = SWAP_MAP_MAX; 1168 result = 1; 1169 } 1170 } 1171 swap_device_unlock(p); 1172out: 1173 return result; 1174 1175bad_file: 1176 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 1177 goto out; 1178} 1179 1180/* 1181 * Page lock needs to be held in all cases to prevent races with 1182 * swap file deletion. 1183 */ 1184int swap_count(struct page *page) 1185{ 1186 struct swap_info_struct * p; 1187 unsigned long offset, type; 1188 swp_entry_t entry; 1189 int retval = 0; 1190 1191 entry.val = page->index; 1192 if (!entry.val) 1193 goto bad_entry; 1194 type = SWP_TYPE(entry); 1195 if (type >= nr_swapfiles) 1196 goto bad_file; 1197 p = type + swap_info; 1198 offset = SWP_OFFSET(entry); 1199 if (offset >= p->max) 1200 goto bad_offset; 1201 if (!p->swap_map[offset]) 1202 goto bad_unused; 1203 retval = p->swap_map[offset]; 1204out: 1205 return retval; 1206 1207bad_entry: 1208 printk(KERN_ERR "swap_count: null entry!\n"); 1209 goto out; 1210bad_file: 1211 printk(KERN_ERR "swap_count: %s%08lx\n", Bad_file, entry.val); 1212 goto out; 1213bad_offset: 1214 printk(KERN_ERR "swap_count: %s%08lx\n", Bad_offset, entry.val); 1215 goto out; 1216bad_unused: 1217 printk(KERN_ERR "swap_count: %s%08lx\n", Unused_offset, entry.val); 1218 goto out; 1219} 1220 1221/* 1222 * Prior swap_duplicate protects against swap device deletion. 1223 */ 1224void get_swaphandle_info(swp_entry_t entry, unsigned long *offset, 1225 kdev_t *dev, struct inode **swapf) 1226{ 1227 unsigned long type; 1228 struct swap_info_struct *p; 1229 1230 type = SWP_TYPE(entry); 1231 if (type >= nr_swapfiles) { 1232 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val); 1233 return; 1234 } 1235 1236 p = &swap_info[type]; 1237 *offset = SWP_OFFSET(entry); 1238 if (*offset >= p->max && *offset != 0) { 1239 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val); 1240 return; 1241 } 1242 if (p->swap_map && !p->swap_map[*offset]) { 1243 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val); 1244 return; 1245 } 1246 if (!(p->flags & SWP_USED)) { 1247 printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val); 1248 return; 1249 } 1250 1251 if (p->swap_device) { 1252 *dev = p->swap_device; 1253 } else if (p->swap_file) { 1254 *swapf = p->swap_file->d_inode; 1255 } else { 1256 printk(KERN_ERR "rw_swap_page: no swap file or device\n"); 1257 } 1258 return; 1259} 1260 1261/* 1262 * swap_device_lock prevents swap_map being freed. Don't grab an extra 1263 * reference on the swaphandle, it doesn't matter if it becomes unused. 1264 */ 1265int valid_swaphandles(swp_entry_t entry, unsigned long *offset) 1266{ 1267 int ret = 0, i = 1 << page_cluster; 1268 unsigned long toff; 1269 struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info; 1270 1271 if (!page_cluster) /* no readahead */ 1272 return 0; 1273 toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster; 1274 if (!toff) /* first page is swap header */ 1275 toff++, i--; 1276 *offset = toff; 1277 1278 swap_device_lock(swapdev); 1279 do { 1280 /* Don't read-ahead past the end of the swap area */ 1281 if (toff >= swapdev->max) 1282 break; 1283 /* Don't read in free or bad pages */ 1284 if (!swapdev->swap_map[toff]) 1285 break; 1286 if (swapdev->swap_map[toff] == SWAP_MAP_BAD) 1287 break; 1288 toff++; 1289 ret++; 1290 } while (--i); 1291 swap_device_unlock(swapdev); 1292 return ret; 1293} 1294