1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * linux/mm/swap.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8/* 9 * This file contains the default values for the operation of the 10 * Linux VM subsystem. Fine-tuning documentation can be found in 11 * Documentation/sysctl/vm.txt. 12 * Started 18.12.91 13 * Swap aging added 23.2.95, Stephen Tweedie. 14 * Buffermem limits added 12.3.98, Rik van Riel. 15 */ 16 17#include <linux/mm.h> 18#include <linux/sched.h> 19#include <linux/kernel_stat.h> 20#include <linux/swap.h> 21#include <linux/mman.h> 22#include <linux/pagemap.h> 23#include <linux/pagevec.h> 24#include <linux/init.h> 25#include <linux/module.h> 26#include <linux/mm_inline.h> 27#include <linux/buffer_head.h> /* for try_to_release_page() */ 28#include <linux/percpu_counter.h> 29#include <linux/percpu.h> 30#include <linux/cpu.h> 31#include <linux/notifier.h> 32#include <linux/backing-dev.h> 33#include <linux/memcontrol.h> 34#include <linux/gfp.h> 35 36#include "internal.h" 37 38#include <typedefs.h> 39#include <bcmdefs.h> 40 41/* How many pages do we try to swap or page in/out together? */ 42int page_cluster; 43 44static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); 45static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 46 47/* 48 * This path almost never happens for VM activity - pages are normally 49 * freed via pagevecs. But it gets used by networking. 50 */ 51static void __page_cache_release(struct page *page) 52{ 53 if (PageLRU(page)) { 54 unsigned long flags; 55 struct zone *zone = page_zone(page); 56 57 spin_lock_irqsave(&zone->lru_lock, flags); 58 VM_BUG_ON(!PageLRU(page)); 59 __ClearPageLRU(page); 60 del_page_from_lru(zone, page); 61 spin_unlock_irqrestore(&zone->lru_lock, flags); 62 } 63 free_hot_cold_page(page, 0); 64} 65 66static void put_compound_page(struct page *page) 67{ 68 page = compound_head(page); 69 if (put_page_testzero(page)) { 70 compound_page_dtor *dtor; 71 72 dtor = get_compound_page_dtor(page); 73 (*dtor)(page); 74 } 75} 76 77void BCMFASTPATH_HOST put_page(struct page *page) 78{ 79 if (unlikely(PageCompound(page))) 80 put_compound_page(page); 81 else if (put_page_testzero(page)) 82 __page_cache_release(page); 83} 84EXPORT_SYMBOL(put_page); 85 86/** 87 * put_pages_list() - release a list of pages 88 * @pages: list of pages threaded on page->lru 89 * 90 * Release a list of pages which are strung together on page.lru. Currently 91 * used by read_cache_pages() and related error recovery code. 92 */ 93void put_pages_list(struct list_head *pages) 94{ 95 while (!list_empty(pages)) { 96 struct page *victim; 97 98 victim = list_entry(pages->prev, struct page, lru); 99 list_del(&victim->lru); 100 page_cache_release(victim); 101 } 102} 103EXPORT_SYMBOL(put_pages_list); 104 105/* 106 * pagevec_move_tail() must be called with IRQ disabled. 107 * Otherwise this may cause nasty races. 108 */ 109static void pagevec_move_tail(struct pagevec *pvec) 110{ 111 int i; 112 int pgmoved = 0; 113 struct zone *zone = NULL; 114 115 for (i = 0; i < pagevec_count(pvec); i++) { 116 struct page *page = pvec->pages[i]; 117 struct zone *pagezone = page_zone(page); 118 119 if (pagezone != zone) { 120 if (zone) 121 spin_unlock(&zone->lru_lock); 122 zone = pagezone; 123 spin_lock(&zone->lru_lock); 124 } 125 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 126 int lru = page_lru_base_type(page); 127 list_move_tail(&page->lru, &zone->lru[lru].list); 128 pgmoved++; 129 } 130 } 131 if (zone) 132 spin_unlock(&zone->lru_lock); 133 __count_vm_events(PGROTATED, pgmoved); 134 release_pages(pvec->pages, pvec->nr, pvec->cold); 135 pagevec_reinit(pvec); 136} 137 138/* 139 * Writeback is about to end against a page which has been marked for immediate 140 * reclaim. If it still appears to be reclaimable, move it to the tail of the 141 * inactive list. 142 */ 143void rotate_reclaimable_page(struct page *page) 144{ 145 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 146 !PageUnevictable(page) && PageLRU(page)) { 147 struct pagevec *pvec; 148 unsigned long flags; 149 150 page_cache_get(page); 151 local_irq_save(flags); 152 pvec = &__get_cpu_var(lru_rotate_pvecs); 153 if (!pagevec_add(pvec, page)) 154 pagevec_move_tail(pvec); 155 local_irq_restore(flags); 156 } 157} 158 159static void update_page_reclaim_stat(struct zone *zone, struct page *page, 160 int file, int rotated) 161{ 162 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; 163 struct zone_reclaim_stat *memcg_reclaim_stat; 164 165 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); 166 167 reclaim_stat->recent_scanned[file]++; 168 if (rotated) 169 reclaim_stat->recent_rotated[file]++; 170 171 if (!memcg_reclaim_stat) 172 return; 173 174 memcg_reclaim_stat->recent_scanned[file]++; 175 if (rotated) 176 memcg_reclaim_stat->recent_rotated[file]++; 177} 178 179void activate_page(struct page *page) 180{ 181 struct zone *zone = page_zone(page); 182 183 spin_lock_irq(&zone->lru_lock); 184 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 185 int file = page_is_file_cache(page); 186 int lru = page_lru_base_type(page); 187 del_page_from_lru_list(zone, page, lru); 188 189 SetPageActive(page); 190 lru += LRU_ACTIVE; 191 add_page_to_lru_list(zone, page, lru); 192 __count_vm_event(PGACTIVATE); 193 194 update_page_reclaim_stat(zone, page, file, 1); 195 } 196 spin_unlock_irq(&zone->lru_lock); 197} 198 199/* 200 * Mark a page as having seen activity. 201 * 202 * inactive,unreferenced -> inactive,referenced 203 * inactive,referenced -> active,unreferenced 204 * active,unreferenced -> active,referenced 205 */ 206void mark_page_accessed(struct page *page) 207{ 208 if (!PageActive(page) && !PageUnevictable(page) && 209 PageReferenced(page) && PageLRU(page)) { 210 activate_page(page); 211 ClearPageReferenced(page); 212 } else if (!PageReferenced(page)) { 213 SetPageReferenced(page); 214 } 215} 216 217EXPORT_SYMBOL(mark_page_accessed); 218 219void __lru_cache_add(struct page *page, enum lru_list lru) 220{ 221 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; 222 223 page_cache_get(page); 224 if (!pagevec_add(pvec, page)) 225 ____pagevec_lru_add(pvec, lru); 226 put_cpu_var(lru_add_pvecs); 227} 228EXPORT_SYMBOL(__lru_cache_add); 229 230/** 231 * lru_cache_add_lru - add a page to a page list 232 * @page: the page to be added to the LRU. 233 * @lru: the LRU list to which the page is added. 234 */ 235void lru_cache_add_lru(struct page *page, enum lru_list lru) 236{ 237 if (PageActive(page)) { 238 VM_BUG_ON(PageUnevictable(page)); 239 ClearPageActive(page); 240 } else if (PageUnevictable(page)) { 241 VM_BUG_ON(PageActive(page)); 242 ClearPageUnevictable(page); 243 } 244 245 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); 246 __lru_cache_add(page, lru); 247} 248 249/** 250 * add_page_to_unevictable_list - add a page to the unevictable list 251 * @page: the page to be added to the unevictable list 252 * 253 * Add page directly to its zone's unevictable list. To avoid races with 254 * tasks that might be making the page evictable, through eg. munlock, 255 * munmap or exit, while it's not on the lru, we want to add the page 256 * while it's locked or otherwise "invisible" to other tasks. This is 257 * difficult to do when using the pagevec cache, so bypass that. 258 */ 259void add_page_to_unevictable_list(struct page *page) 260{ 261 struct zone *zone = page_zone(page); 262 263 spin_lock_irq(&zone->lru_lock); 264 SetPageUnevictable(page); 265 SetPageLRU(page); 266 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); 267 spin_unlock_irq(&zone->lru_lock); 268} 269 270/* 271 * Drain pages out of the cpu's pagevecs. 272 * Either "cpu" is the current CPU, and preemption has already been 273 * disabled; or "cpu" is being hot-unplugged, and is already dead. 274 */ 275static void drain_cpu_pagevecs(int cpu) 276{ 277 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); 278 struct pagevec *pvec; 279 int lru; 280 281 for_each_lru(lru) { 282 pvec = &pvecs[lru - LRU_BASE]; 283 if (pagevec_count(pvec)) 284 ____pagevec_lru_add(pvec, lru); 285 } 286 287 pvec = &per_cpu(lru_rotate_pvecs, cpu); 288 if (pagevec_count(pvec)) { 289 unsigned long flags; 290 291 /* No harm done if a racing interrupt already did this */ 292 local_irq_save(flags); 293 pagevec_move_tail(pvec); 294 local_irq_restore(flags); 295 } 296} 297 298void lru_add_drain(void) 299{ 300 drain_cpu_pagevecs(get_cpu()); 301 put_cpu(); 302} 303 304static void lru_add_drain_per_cpu(struct work_struct *dummy) 305{ 306 lru_add_drain(); 307} 308 309/* 310 * Returns 0 for success 311 */ 312int lru_add_drain_all(void) 313{ 314 return schedule_on_each_cpu(lru_add_drain_per_cpu); 315} 316 317/* 318 * Batched page_cache_release(). Decrement the reference count on all the 319 * passed pages. If it fell to zero then remove the page from the LRU and 320 * free it. 321 * 322 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 323 * for the remainder of the operation. 324 * 325 * The locking in this function is against shrink_inactive_list(): we recheck 326 * the page count inside the lock to see whether shrink_inactive_list() 327 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() 328 * will free it. 329 */ 330void release_pages(struct page **pages, int nr, int cold) 331{ 332 int i; 333 struct pagevec pages_to_free; 334 struct zone *zone = NULL; 335 unsigned long uninitialized_var(flags); 336 337 pagevec_init(&pages_to_free, cold); 338 for (i = 0; i < nr; i++) { 339 struct page *page = pages[i]; 340 341 if (unlikely(PageCompound(page))) { 342 if (zone) { 343 spin_unlock_irqrestore(&zone->lru_lock, flags); 344 zone = NULL; 345 } 346 put_compound_page(page); 347 continue; 348 } 349 350 if (!put_page_testzero(page)) 351 continue; 352 353 if (PageLRU(page)) { 354 struct zone *pagezone = page_zone(page); 355 356 if (pagezone != zone) { 357 if (zone) 358 spin_unlock_irqrestore(&zone->lru_lock, 359 flags); 360 zone = pagezone; 361 spin_lock_irqsave(&zone->lru_lock, flags); 362 } 363 VM_BUG_ON(!PageLRU(page)); 364 __ClearPageLRU(page); 365 del_page_from_lru(zone, page); 366 } 367 368 if (!pagevec_add(&pages_to_free, page)) { 369 if (zone) { 370 spin_unlock_irqrestore(&zone->lru_lock, flags); 371 zone = NULL; 372 } 373 __pagevec_free(&pages_to_free); 374 pagevec_reinit(&pages_to_free); 375 } 376 } 377 if (zone) 378 spin_unlock_irqrestore(&zone->lru_lock, flags); 379 380 pagevec_free(&pages_to_free); 381} 382 383/* 384 * The pages which we're about to release may be in the deferred lru-addition 385 * queues. That would prevent them from really being freed right now. That's 386 * OK from a correctness point of view but is inefficient - those pages may be 387 * cache-warm and we want to give them back to the page allocator ASAP. 388 * 389 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 390 * and __pagevec_lru_add_active() call release_pages() directly to avoid 391 * mutual recursion. 392 */ 393void __pagevec_release(struct pagevec *pvec) 394{ 395 lru_add_drain(); 396 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 397 pagevec_reinit(pvec); 398} 399 400EXPORT_SYMBOL(__pagevec_release); 401 402/* 403 * Add the passed pages to the LRU, then drop the caller's refcount 404 * on them. Reinitialises the caller's pagevec. 405 */ 406void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 407{ 408 int i; 409 struct zone *zone = NULL; 410 411 VM_BUG_ON(is_unevictable_lru(lru)); 412 413 for (i = 0; i < pagevec_count(pvec); i++) { 414 struct page *page = pvec->pages[i]; 415 struct zone *pagezone = page_zone(page); 416 int file; 417 int active; 418 419 if (pagezone != zone) { 420 if (zone) 421 spin_unlock_irq(&zone->lru_lock); 422 zone = pagezone; 423 spin_lock_irq(&zone->lru_lock); 424 } 425 VM_BUG_ON(PageActive(page)); 426 VM_BUG_ON(PageUnevictable(page)); 427 VM_BUG_ON(PageLRU(page)); 428 SetPageLRU(page); 429 active = is_active_lru(lru); 430 file = is_file_lru(lru); 431 if (active) 432 SetPageActive(page); 433 update_page_reclaim_stat(zone, page, file, active); 434 add_page_to_lru_list(zone, page, lru); 435 } 436 if (zone) 437 spin_unlock_irq(&zone->lru_lock); 438 release_pages(pvec->pages, pvec->nr, pvec->cold); 439 pagevec_reinit(pvec); 440} 441 442EXPORT_SYMBOL(____pagevec_lru_add); 443 444/* 445 * Try to drop buffers from the pages in a pagevec 446 */ 447void pagevec_strip(struct pagevec *pvec) 448{ 449 int i; 450 451 for (i = 0; i < pagevec_count(pvec); i++) { 452 struct page *page = pvec->pages[i]; 453 454 if (page_has_private(page) && trylock_page(page)) { 455 if (page_has_private(page)) 456 try_to_release_page(page, 0); 457 unlock_page(page); 458 } 459 } 460} 461 462/** 463 * pagevec_lookup - gang pagecache lookup 464 * @pvec: Where the resulting pages are placed 465 * @mapping: The address_space to search 466 * @start: The starting page index 467 * @nr_pages: The maximum number of pages 468 * 469 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 470 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 471 * reference against the pages in @pvec. 472 * 473 * The search returns a group of mapping-contiguous pages with ascending 474 * indexes. There may be holes in the indices due to not-present pages. 475 * 476 * pagevec_lookup() returns the number of pages which were found. 477 */ 478unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 479 pgoff_t start, unsigned nr_pages) 480{ 481 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 482 return pagevec_count(pvec); 483} 484 485EXPORT_SYMBOL(pagevec_lookup); 486 487unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 488 pgoff_t *index, int tag, unsigned nr_pages) 489{ 490 pvec->nr = find_get_pages_tag(mapping, index, tag, 491 nr_pages, pvec->pages); 492 return pagevec_count(pvec); 493} 494 495EXPORT_SYMBOL(pagevec_lookup_tag); 496 497/* 498 * Perform any setup for the swap system 499 */ 500void __init swap_setup(void) 501{ 502 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 503 504#ifdef CONFIG_SWAP 505 bdi_init(swapper_space.backing_dev_info); 506#endif 507 508 /* Use a smaller cluster for small-memory machines */ 509 if (megs < 16) 510 page_cluster = 2; 511 else 512 page_cluster = 3; 513 /* 514 * Right now other parts of the system means that we 515 * _really_ don't want to cluster much more 516 */ 517} 518