vm_pageout.c revision 223729
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 2005 Yahoo! Technologies Norway AS 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * The Mach Operating System project at Carnegie-Mellon University. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49 * 50 * Permission to use, copy, modify and distribute this software and 51 * its documentation is hereby granted, provided that both the copyright 52 * notice and this permission notice appear in all copies of the 53 * software, derivative works or modified versions, and any portions 54 * thereof, and that both notices appear in supporting documentation. 55 * 56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59 * 60 * Carnegie Mellon requests users of this software to return to 61 * 62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63 * School of Computer Science 64 * Carnegie Mellon University 65 * Pittsburgh PA 15213-3890 66 * 67 * any improvements or extensions that they make and grant Carnegie the 68 * rights to redistribute these changes. 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/cdefs.h> 76__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 223729 2011-07-02 23:34:47Z alc $"); 77 78#include "opt_vm.h" 79#include <sys/param.h> 80#include <sys/systm.h> 81#include <sys/kernel.h> 82#include <sys/eventhandler.h> 83#include <sys/lock.h> 84#include <sys/mutex.h> 85#include <sys/proc.h> 86#include <sys/kthread.h> 87#include <sys/ktr.h> 88#include <sys/mount.h> 89#include <sys/racct.h> 90#include <sys/resourcevar.h> 91#include <sys/sched.h> 92#include <sys/signalvar.h> 93#include <sys/vnode.h> 94#include <sys/vmmeter.h> 95#include <sys/sx.h> 96#include <sys/sysctl.h> 97 98#include <vm/vm.h> 99#include <vm/vm_param.h> 100#include <vm/vm_object.h> 101#include <vm/vm_page.h> 102#include <vm/vm_map.h> 103#include <vm/vm_pageout.h> 104#include <vm/vm_pager.h> 105#include <vm/swap_pager.h> 106#include <vm/vm_extern.h> 107#include <vm/uma.h> 108 109/* 110 * System initialization 111 */ 112 113/* the kernel process "vm_pageout"*/ 114static void vm_pageout(void); 115static int vm_pageout_clean(vm_page_t); 116static void vm_pageout_scan(int pass); 117 118struct proc *pageproc; 119 120static struct kproc_desc page_kp = { 121 "pagedaemon", 122 vm_pageout, 123 &pageproc 124}; 125SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 126 &page_kp); 127 128#if !defined(NO_SWAPPING) 129/* the kernel process "vm_daemon"*/ 130static void vm_daemon(void); 131static struct proc *vmproc; 132 133static struct kproc_desc vm_kp = { 134 "vmdaemon", 135 vm_daemon, 136 &vmproc 137}; 138SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 139#endif 140 141 142int vm_pages_needed; /* Event on which pageout daemon sleeps */ 143int vm_pageout_deficit; /* Estimated number of pages deficit */ 144int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 145 146#if !defined(NO_SWAPPING) 147static int vm_pageout_req_swapout; /* XXX */ 148static int vm_daemon_needed; 149static struct mtx vm_daemon_mtx; 150/* Allow for use by vm_pageout before vm_daemon is initialized. */ 151MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 152#endif 153static int vm_max_launder = 32; 154static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 155static int vm_pageout_full_stats_interval = 0; 156static int vm_pageout_algorithm=0; 157static int defer_swap_pageouts=0; 158static int disable_swap_pageouts=0; 159 160#if defined(NO_SWAPPING) 161static int vm_swap_enabled=0; 162static int vm_swap_idle_enabled=0; 163#else 164static int vm_swap_enabled=1; 165static int vm_swap_idle_enabled=0; 166#endif 167 168SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 169 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 170 171SYSCTL_INT(_vm, OID_AUTO, max_launder, 172 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 173 174SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 175 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 176 177SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 178 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 179 180SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 181 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 182 183#if defined(NO_SWAPPING) 184SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 186SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188#else 189SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 190 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 191SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 192 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 193#endif 194 195SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 196 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 197 198SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 199 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 200 201static int pageout_lock_miss; 202SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 203 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 204 205#define VM_PAGEOUT_PAGE_COUNT 16 206int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 207 208int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 209SYSCTL_INT(_vm, OID_AUTO, max_wired, 210 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 211 212#if !defined(NO_SWAPPING) 213static void vm_pageout_map_deactivate_pages(vm_map_t, long); 214static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 215static void vm_req_vmdaemon(int req); 216#endif 217static void vm_pageout_page_stats(void); 218 219/* 220 * Initialize a dummy page for marking the caller's place in the specified 221 * paging queue. In principle, this function only needs to set the flag 222 * PG_MARKER. Nonetheless, it sets the flag VPO_BUSY and initializes the hold 223 * count to one as safety precautions. 224 */ 225static void 226vm_pageout_init_marker(vm_page_t marker, u_short queue) 227{ 228 229 bzero(marker, sizeof(*marker)); 230 marker->flags = PG_MARKER; 231 marker->oflags = VPO_BUSY; 232 marker->queue = queue; 233 marker->hold_count = 1; 234} 235 236/* 237 * vm_pageout_fallback_object_lock: 238 * 239 * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is 240 * known to have failed and page queue must be either PQ_ACTIVE or 241 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 242 * while locking the vm object. Use marker page to detect page queue 243 * changes and maintain notion of next page on page queue. Return 244 * TRUE if no changes were detected, FALSE otherwise. vm object is 245 * locked on return. 246 * 247 * This function depends on both the lock portion of struct vm_object 248 * and normal struct vm_page being type stable. 249 */ 250boolean_t 251vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 252{ 253 struct vm_page marker; 254 boolean_t unchanged; 255 u_short queue; 256 vm_object_t object; 257 258 queue = m->queue; 259 vm_pageout_init_marker(&marker, queue); 260 object = m->object; 261 262 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, 263 m, &marker, pageq); 264 vm_page_unlock_queues(); 265 vm_page_unlock(m); 266 VM_OBJECT_LOCK(object); 267 vm_page_lock(m); 268 vm_page_lock_queues(); 269 270 /* Page queue might have changed. */ 271 *next = TAILQ_NEXT(&marker, pageq); 272 unchanged = (m->queue == queue && 273 m->object == object && 274 &marker == TAILQ_NEXT(m, pageq)); 275 TAILQ_REMOVE(&vm_page_queues[queue].pl, 276 &marker, pageq); 277 return (unchanged); 278} 279 280/* 281 * Lock the page while holding the page queue lock. Use marker page 282 * to detect page queue changes and maintain notion of next page on 283 * page queue. Return TRUE if no changes were detected, FALSE 284 * otherwise. The page is locked on return. The page queue lock might 285 * be dropped and reacquired. 286 * 287 * This function depends on normal struct vm_page being type stable. 288 */ 289boolean_t 290vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 291{ 292 struct vm_page marker; 293 boolean_t unchanged; 294 u_short queue; 295 296 vm_page_lock_assert(m, MA_NOTOWNED); 297 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 298 299 if (vm_page_trylock(m)) 300 return (TRUE); 301 302 queue = m->queue; 303 vm_pageout_init_marker(&marker, queue); 304 305 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); 306 vm_page_unlock_queues(); 307 vm_page_lock(m); 308 vm_page_lock_queues(); 309 310 /* Page queue might have changed. */ 311 *next = TAILQ_NEXT(&marker, pageq); 312 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq)); 313 TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq); 314 return (unchanged); 315} 316 317/* 318 * vm_pageout_clean: 319 * 320 * Clean the page and remove it from the laundry. 321 * 322 * We set the busy bit to cause potential page faults on this page to 323 * block. Note the careful timing, however, the busy bit isn't set till 324 * late and we cannot do anything that will mess with the page. 325 */ 326static int 327vm_pageout_clean(vm_page_t m) 328{ 329 vm_object_t object; 330 vm_page_t mc[2*vm_pageout_page_count], pb, ps; 331 int pageout_count; 332 int ib, is, page_base; 333 vm_pindex_t pindex = m->pindex; 334 335 vm_page_lock_assert(m, MA_OWNED); 336 object = m->object; 337 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 338 339 /* 340 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 341 * with the new swapper, but we could have serious problems paging 342 * out other object types if there is insufficient memory. 343 * 344 * Unfortunately, checking free memory here is far too late, so the 345 * check has been moved up a procedural level. 346 */ 347 348 /* 349 * Can't clean the page if it's busy or held. 350 */ 351 KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0, 352 ("vm_pageout_clean: page %p is busy", m)); 353 KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m)); 354 vm_page_unlock(m); 355 356 mc[vm_pageout_page_count] = pb = ps = m; 357 pageout_count = 1; 358 page_base = vm_pageout_page_count; 359 ib = 1; 360 is = 1; 361 362 /* 363 * Scan object for clusterable pages. 364 * 365 * We can cluster ONLY if: ->> the page is NOT 366 * clean, wired, busy, held, or mapped into a 367 * buffer, and one of the following: 368 * 1) The page is inactive, or a seldom used 369 * active page. 370 * -or- 371 * 2) we force the issue. 372 * 373 * During heavy mmap/modification loads the pageout 374 * daemon can really fragment the underlying file 375 * due to flushing pages out of order and not trying 376 * align the clusters (which leave sporatic out-of-order 377 * holes). To solve this problem we do the reverse scan 378 * first and attempt to align our cluster, then do a 379 * forward scan if room remains. 380 */ 381more: 382 while (ib && pageout_count < vm_pageout_page_count) { 383 vm_page_t p; 384 385 if (ib > pindex) { 386 ib = 0; 387 break; 388 } 389 390 if ((p = vm_page_prev(pb)) == NULL || 391 (p->oflags & VPO_BUSY) != 0 || p->busy != 0) { 392 ib = 0; 393 break; 394 } 395 vm_page_lock(p); 396 vm_page_test_dirty(p); 397 if (p->dirty == 0 || 398 p->queue != PQ_INACTIVE || 399 p->hold_count != 0) { /* may be undergoing I/O */ 400 vm_page_unlock(p); 401 ib = 0; 402 break; 403 } 404 vm_page_unlock(p); 405 mc[--page_base] = pb = p; 406 ++pageout_count; 407 ++ib; 408 /* 409 * alignment boundry, stop here and switch directions. Do 410 * not clear ib. 411 */ 412 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 413 break; 414 } 415 416 while (pageout_count < vm_pageout_page_count && 417 pindex + is < object->size) { 418 vm_page_t p; 419 420 if ((p = vm_page_next(ps)) == NULL || 421 (p->oflags & VPO_BUSY) != 0 || p->busy != 0) 422 break; 423 vm_page_lock(p); 424 vm_page_test_dirty(p); 425 if (p->dirty == 0 || 426 p->queue != PQ_INACTIVE || 427 p->hold_count != 0) { /* may be undergoing I/O */ 428 vm_page_unlock(p); 429 break; 430 } 431 vm_page_unlock(p); 432 mc[page_base + pageout_count] = ps = p; 433 ++pageout_count; 434 ++is; 435 } 436 437 /* 438 * If we exhausted our forward scan, continue with the reverse scan 439 * when possible, even past a page boundry. This catches boundry 440 * conditions. 441 */ 442 if (ib && pageout_count < vm_pageout_page_count) 443 goto more; 444 445 /* 446 * we allow reads during pageouts... 447 */ 448 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL)); 449} 450 451/* 452 * vm_pageout_flush() - launder the given pages 453 * 454 * The given pages are laundered. Note that we setup for the start of 455 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 456 * reference count all in here rather then in the parent. If we want 457 * the parent to do more sophisticated things we may have to change 458 * the ordering. 459 * 460 * Returned runlen is the count of pages between mreq and first 461 * page after mreq with status VM_PAGER_AGAIN. 462 */ 463int 464vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen) 465{ 466 vm_object_t object = mc[0]->object; 467 int pageout_status[count]; 468 int numpagedout = 0; 469 int i, runlen; 470 471 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 472 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 473 474 /* 475 * Initiate I/O. Bump the vm_page_t->busy counter and 476 * mark the pages read-only. 477 * 478 * We do not have to fixup the clean/dirty bits here... we can 479 * allow the pager to do it after the I/O completes. 480 * 481 * NOTE! mc[i]->dirty may be partial or fragmented due to an 482 * edge case with file fragments. 483 */ 484 for (i = 0; i < count; i++) { 485 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 486 ("vm_pageout_flush: partially invalid page %p index %d/%d", 487 mc[i], i, count)); 488 vm_page_io_start(mc[i]); 489 pmap_remove_write(mc[i]); 490 } 491 vm_object_pip_add(object, count); 492 493 vm_pager_put_pages(object, mc, count, flags, pageout_status); 494 495 runlen = count - mreq; 496 for (i = 0; i < count; i++) { 497 vm_page_t mt = mc[i]; 498 499 KASSERT(pageout_status[i] == VM_PAGER_PEND || 500 (mt->flags & PG_WRITEABLE) == 0, 501 ("vm_pageout_flush: page %p is not write protected", mt)); 502 switch (pageout_status[i]) { 503 case VM_PAGER_OK: 504 case VM_PAGER_PEND: 505 numpagedout++; 506 break; 507 case VM_PAGER_BAD: 508 /* 509 * Page outside of range of object. Right now we 510 * essentially lose the changes by pretending it 511 * worked. 512 */ 513 vm_page_undirty(mt); 514 break; 515 case VM_PAGER_ERROR: 516 case VM_PAGER_FAIL: 517 /* 518 * If page couldn't be paged out, then reactivate the 519 * page so it doesn't clog the inactive list. (We 520 * will try paging out it again later). 521 */ 522 vm_page_lock(mt); 523 vm_page_activate(mt); 524 vm_page_unlock(mt); 525 break; 526 case VM_PAGER_AGAIN: 527 if (i >= mreq && i - mreq < runlen) 528 runlen = i - mreq; 529 break; 530 } 531 532 /* 533 * If the operation is still going, leave the page busy to 534 * block all other accesses. Also, leave the paging in 535 * progress indicator set so that we don't attempt an object 536 * collapse. 537 */ 538 if (pageout_status[i] != VM_PAGER_PEND) { 539 vm_object_pip_wakeup(object); 540 vm_page_io_finish(mt); 541 if (vm_page_count_severe()) { 542 vm_page_lock(mt); 543 vm_page_try_to_cache(mt); 544 vm_page_unlock(mt); 545 } 546 } 547 } 548 if (prunlen != NULL) 549 *prunlen = runlen; 550 return (numpagedout); 551} 552 553#if !defined(NO_SWAPPING) 554/* 555 * vm_pageout_object_deactivate_pages 556 * 557 * Deactivate enough pages to satisfy the inactive target 558 * requirements. 559 * 560 * The object and map must be locked. 561 */ 562static void 563vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, 564 long desired) 565{ 566 vm_object_t backing_object, object; 567 vm_page_t p; 568 int actcount, remove_mode; 569 570 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 571 if (first_object->type == OBJT_DEVICE || 572 first_object->type == OBJT_SG) 573 return; 574 for (object = first_object;; object = backing_object) { 575 if (pmap_resident_count(pmap) <= desired) 576 goto unlock_return; 577 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 578 if (object->type == OBJT_PHYS || object->paging_in_progress) 579 goto unlock_return; 580 581 remove_mode = 0; 582 if (object->shadow_count > 1) 583 remove_mode = 1; 584 /* 585 * Scan the object's entire memory queue. 586 */ 587 TAILQ_FOREACH(p, &object->memq, listq) { 588 if (pmap_resident_count(pmap) <= desired) 589 goto unlock_return; 590 if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) 591 continue; 592 PCPU_INC(cnt.v_pdpages); 593 vm_page_lock(p); 594 if (p->wire_count != 0 || p->hold_count != 0 || 595 !pmap_page_exists_quick(pmap, p)) { 596 vm_page_unlock(p); 597 continue; 598 } 599 actcount = pmap_ts_referenced(p); 600 if ((p->flags & PG_REFERENCED) != 0) { 601 if (actcount == 0) 602 actcount = 1; 603 vm_page_lock_queues(); 604 vm_page_flag_clear(p, PG_REFERENCED); 605 vm_page_unlock_queues(); 606 } 607 if (p->queue != PQ_ACTIVE && actcount != 0) { 608 vm_page_activate(p); 609 p->act_count += actcount; 610 } else if (p->queue == PQ_ACTIVE) { 611 if (actcount == 0) { 612 p->act_count -= min(p->act_count, 613 ACT_DECLINE); 614 if (!remove_mode && 615 (vm_pageout_algorithm || 616 p->act_count == 0)) { 617 pmap_remove_all(p); 618 vm_page_deactivate(p); 619 } else { 620 vm_page_lock_queues(); 621 vm_page_requeue(p); 622 vm_page_unlock_queues(); 623 } 624 } else { 625 vm_page_activate(p); 626 if (p->act_count < ACT_MAX - 627 ACT_ADVANCE) 628 p->act_count += ACT_ADVANCE; 629 vm_page_lock_queues(); 630 vm_page_requeue(p); 631 vm_page_unlock_queues(); 632 } 633 } else if (p->queue == PQ_INACTIVE) 634 pmap_remove_all(p); 635 vm_page_unlock(p); 636 } 637 if ((backing_object = object->backing_object) == NULL) 638 goto unlock_return; 639 VM_OBJECT_LOCK(backing_object); 640 if (object != first_object) 641 VM_OBJECT_UNLOCK(object); 642 } 643unlock_return: 644 if (object != first_object) 645 VM_OBJECT_UNLOCK(object); 646} 647 648/* 649 * deactivate some number of pages in a map, try to do it fairly, but 650 * that is really hard to do. 651 */ 652static void 653vm_pageout_map_deactivate_pages(map, desired) 654 vm_map_t map; 655 long desired; 656{ 657 vm_map_entry_t tmpe; 658 vm_object_t obj, bigobj; 659 int nothingwired; 660 661 if (!vm_map_trylock(map)) 662 return; 663 664 bigobj = NULL; 665 nothingwired = TRUE; 666 667 /* 668 * first, search out the biggest object, and try to free pages from 669 * that. 670 */ 671 tmpe = map->header.next; 672 while (tmpe != &map->header) { 673 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 674 obj = tmpe->object.vm_object; 675 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 676 if (obj->shadow_count <= 1 && 677 (bigobj == NULL || 678 bigobj->resident_page_count < obj->resident_page_count)) { 679 if (bigobj != NULL) 680 VM_OBJECT_UNLOCK(bigobj); 681 bigobj = obj; 682 } else 683 VM_OBJECT_UNLOCK(obj); 684 } 685 } 686 if (tmpe->wired_count > 0) 687 nothingwired = FALSE; 688 tmpe = tmpe->next; 689 } 690 691 if (bigobj != NULL) { 692 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 693 VM_OBJECT_UNLOCK(bigobj); 694 } 695 /* 696 * Next, hunt around for other pages to deactivate. We actually 697 * do this search sort of wrong -- .text first is not the best idea. 698 */ 699 tmpe = map->header.next; 700 while (tmpe != &map->header) { 701 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 702 break; 703 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 704 obj = tmpe->object.vm_object; 705 if (obj != NULL) { 706 VM_OBJECT_LOCK(obj); 707 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 708 VM_OBJECT_UNLOCK(obj); 709 } 710 } 711 tmpe = tmpe->next; 712 } 713 714 /* 715 * Remove all mappings if a process is swapped out, this will free page 716 * table pages. 717 */ 718 if (desired == 0 && nothingwired) { 719 tmpe = map->header.next; 720 while (tmpe != &map->header) { 721 pmap_remove(vm_map_pmap(map), tmpe->start, tmpe->end); 722 tmpe = tmpe->next; 723 } 724 } 725 vm_map_unlock(map); 726} 727#endif /* !defined(NO_SWAPPING) */ 728 729/* 730 * vm_pageout_scan does the dirty work for the pageout daemon. 731 */ 732static void 733vm_pageout_scan(int pass) 734{ 735 vm_page_t m, next; 736 struct vm_page marker; 737 int page_shortage, maxscan, pcount; 738 int addl_page_shortage, addl_page_shortage_init; 739 vm_object_t object; 740 int actcount; 741 int vnodes_skipped = 0; 742 int maxlaunder; 743 744 /* 745 * Decrease registered cache sizes. 746 */ 747 EVENTHANDLER_INVOKE(vm_lowmem, 0); 748 /* 749 * We do this explicitly after the caches have been drained above. 750 */ 751 uma_reclaim(); 752 753 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 754 755 /* 756 * Calculate the number of pages we want to either free or move 757 * to the cache. 758 */ 759 page_shortage = vm_paging_target() + addl_page_shortage_init; 760 761 vm_pageout_init_marker(&marker, PQ_INACTIVE); 762 763 /* 764 * Start scanning the inactive queue for pages we can move to the 765 * cache or free. The scan will stop when the target is reached or 766 * we have scanned the entire inactive queue. Note that m->act_count 767 * is not used to form decisions for the inactive queue, only for the 768 * active queue. 769 * 770 * maxlaunder limits the number of dirty pages we flush per scan. 771 * For most systems a smaller value (16 or 32) is more robust under 772 * extreme memory and disk pressure because any unnecessary writes 773 * to disk can result in extreme performance degredation. However, 774 * systems with excessive dirty pages (especially when MAP_NOSYNC is 775 * used) will die horribly with limited laundering. If the pageout 776 * daemon cannot clean enough pages in the first pass, we let it go 777 * all out in succeeding passes. 778 */ 779 if ((maxlaunder = vm_max_launder) <= 1) 780 maxlaunder = 1; 781 if (pass) 782 maxlaunder = 10000; 783 vm_page_lock_queues(); 784rescan0: 785 addl_page_shortage = addl_page_shortage_init; 786 maxscan = cnt.v_inactive_count; 787 788 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 789 m != NULL && maxscan-- > 0 && page_shortage > 0; 790 m = next) { 791 792 cnt.v_pdpages++; 793 794 if (m->queue != PQ_INACTIVE) 795 goto rescan0; 796 797 next = TAILQ_NEXT(m, pageq); 798 799 /* 800 * skip marker pages 801 */ 802 if (m->flags & PG_MARKER) 803 continue; 804 805 /* 806 * Lock the page. 807 */ 808 if (!vm_pageout_page_lock(m, &next)) { 809 vm_page_unlock(m); 810 addl_page_shortage++; 811 continue; 812 } 813 814 /* 815 * A held page may be undergoing I/O, so skip it. 816 */ 817 if (m->hold_count) { 818 vm_page_unlock(m); 819 vm_page_requeue(m); 820 addl_page_shortage++; 821 continue; 822 } 823 824 /* 825 * Don't mess with busy pages, keep in the front of the 826 * queue, most likely are being paged out. 827 */ 828 object = m->object; 829 if (!VM_OBJECT_TRYLOCK(object) && 830 (!vm_pageout_fallback_object_lock(m, &next) || 831 m->hold_count != 0)) { 832 VM_OBJECT_UNLOCK(object); 833 vm_page_unlock(m); 834 addl_page_shortage++; 835 continue; 836 } 837 if (m->busy || (m->oflags & VPO_BUSY)) { 838 vm_page_unlock(m); 839 VM_OBJECT_UNLOCK(object); 840 addl_page_shortage++; 841 continue; 842 } 843 844 /* 845 * If the object is not being used, we ignore previous 846 * references. 847 */ 848 if (object->ref_count == 0) { 849 vm_page_flag_clear(m, PG_REFERENCED); 850 KASSERT(!pmap_page_is_mapped(m), 851 ("vm_pageout_scan: page %p is mapped", m)); 852 853 /* 854 * Otherwise, if the page has been referenced while in the 855 * inactive queue, we bump the "activation count" upwards, 856 * making it less likely that the page will be added back to 857 * the inactive queue prematurely again. Here we check the 858 * page tables (or emulated bits, if any), given the upper 859 * level VM system not knowing anything about existing 860 * references. 861 */ 862 } else if (((m->flags & PG_REFERENCED) == 0) && 863 (actcount = pmap_ts_referenced(m))) { 864 vm_page_activate(m); 865 vm_page_unlock(m); 866 m->act_count += actcount + ACT_ADVANCE; 867 VM_OBJECT_UNLOCK(object); 868 continue; 869 } 870 871 /* 872 * If the upper level VM system knows about any page 873 * references, we activate the page. We also set the 874 * "activation count" higher than normal so that we will less 875 * likely place pages back onto the inactive queue again. 876 */ 877 if ((m->flags & PG_REFERENCED) != 0) { 878 vm_page_flag_clear(m, PG_REFERENCED); 879 actcount = pmap_ts_referenced(m); 880 vm_page_activate(m); 881 vm_page_unlock(m); 882 m->act_count += actcount + ACT_ADVANCE + 1; 883 VM_OBJECT_UNLOCK(object); 884 continue; 885 } 886 887 /* 888 * If the upper level VM system does not believe that the page 889 * is fully dirty, but it is mapped for write access, then we 890 * consult the pmap to see if the page's dirty status should 891 * be updated. 892 */ 893 if (m->dirty != VM_PAGE_BITS_ALL && 894 (m->flags & PG_WRITEABLE) != 0) { 895 /* 896 * Avoid a race condition: Unless write access is 897 * removed from the page, another processor could 898 * modify it before all access is removed by the call 899 * to vm_page_cache() below. If vm_page_cache() finds 900 * that the page has been modified when it removes all 901 * access, it panics because it cannot cache dirty 902 * pages. In principle, we could eliminate just write 903 * access here rather than all access. In the expected 904 * case, when there are no last instant modifications 905 * to the page, removing all access will be cheaper 906 * overall. 907 */ 908 if (pmap_is_modified(m)) 909 vm_page_dirty(m); 910 else if (m->dirty == 0) 911 pmap_remove_all(m); 912 } 913 914 if (m->valid == 0) { 915 /* 916 * Invalid pages can be easily freed 917 */ 918 vm_page_free(m); 919 cnt.v_dfree++; 920 --page_shortage; 921 } else if (m->dirty == 0) { 922 /* 923 * Clean pages can be placed onto the cache queue. 924 * This effectively frees them. 925 */ 926 vm_page_cache(m); 927 --page_shortage; 928 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 929 /* 930 * Dirty pages need to be paged out, but flushing 931 * a page is extremely expensive verses freeing 932 * a clean page. Rather then artificially limiting 933 * the number of pages we can flush, we instead give 934 * dirty pages extra priority on the inactive queue 935 * by forcing them to be cycled through the queue 936 * twice before being flushed, after which the 937 * (now clean) page will cycle through once more 938 * before being freed. This significantly extends 939 * the thrash point for a heavily loaded machine. 940 */ 941 vm_page_flag_set(m, PG_WINATCFLS); 942 vm_page_requeue(m); 943 } else if (maxlaunder > 0) { 944 /* 945 * We always want to try to flush some dirty pages if 946 * we encounter them, to keep the system stable. 947 * Normally this number is small, but under extreme 948 * pressure where there are insufficient clean pages 949 * on the inactive queue, we may have to go all out. 950 */ 951 int swap_pageouts_ok, vfslocked = 0; 952 struct vnode *vp = NULL; 953 struct mount *mp = NULL; 954 955 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 956 swap_pageouts_ok = 1; 957 } else { 958 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 959 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 960 vm_page_count_min()); 961 962 } 963 964 /* 965 * We don't bother paging objects that are "dead". 966 * Those objects are in a "rundown" state. 967 */ 968 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 969 vm_page_unlock(m); 970 VM_OBJECT_UNLOCK(object); 971 vm_page_requeue(m); 972 continue; 973 } 974 975 /* 976 * Following operations may unlock 977 * vm_page_queue_mtx, invalidating the 'next' 978 * pointer. To prevent an inordinate number 979 * of restarts we use our marker to remember 980 * our place. 981 * 982 */ 983 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, 984 m, &marker, pageq); 985 /* 986 * The object is already known NOT to be dead. It 987 * is possible for the vget() to block the whole 988 * pageout daemon, but the new low-memory handling 989 * code should prevent it. 990 * 991 * The previous code skipped locked vnodes and, worse, 992 * reordered pages in the queue. This results in 993 * completely non-deterministic operation and, on a 994 * busy system, can lead to extremely non-optimal 995 * pageouts. For example, it can cause clean pages 996 * to be freed and dirty pages to be moved to the end 997 * of the queue. Since dirty pages are also moved to 998 * the end of the queue once-cleaned, this gives 999 * way too large a weighting to defering the freeing 1000 * of dirty pages. 1001 * 1002 * We can't wait forever for the vnode lock, we might 1003 * deadlock due to a vn_read() getting stuck in 1004 * vm_wait while holding this vnode. We skip the 1005 * vnode if we can't get it in a reasonable amount 1006 * of time. 1007 */ 1008 if (object->type == OBJT_VNODE) { 1009 vm_page_unlock_queues(); 1010 vm_page_unlock(m); 1011 vp = object->handle; 1012 if (vp->v_type == VREG && 1013 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1014 mp = NULL; 1015 ++pageout_lock_miss; 1016 if (object->flags & OBJ_MIGHTBEDIRTY) 1017 vnodes_skipped++; 1018 vm_page_lock_queues(); 1019 goto unlock_and_continue; 1020 } 1021 KASSERT(mp != NULL, 1022 ("vp %p with NULL v_mount", vp)); 1023 vm_object_reference_locked(object); 1024 VM_OBJECT_UNLOCK(object); 1025 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1026 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, 1027 curthread)) { 1028 VM_OBJECT_LOCK(object); 1029 vm_page_lock_queues(); 1030 ++pageout_lock_miss; 1031 if (object->flags & OBJ_MIGHTBEDIRTY) 1032 vnodes_skipped++; 1033 vp = NULL; 1034 goto unlock_and_continue; 1035 } 1036 VM_OBJECT_LOCK(object); 1037 vm_page_lock(m); 1038 vm_page_lock_queues(); 1039 /* 1040 * The page might have been moved to another 1041 * queue during potential blocking in vget() 1042 * above. The page might have been freed and 1043 * reused for another vnode. 1044 */ 1045 if (m->queue != PQ_INACTIVE || 1046 m->object != object || 1047 TAILQ_NEXT(m, pageq) != &marker) { 1048 vm_page_unlock(m); 1049 if (object->flags & OBJ_MIGHTBEDIRTY) 1050 vnodes_skipped++; 1051 goto unlock_and_continue; 1052 } 1053 1054 /* 1055 * The page may have been busied during the 1056 * blocking in vget(). We don't move the 1057 * page back onto the end of the queue so that 1058 * statistics are more correct if we don't. 1059 */ 1060 if (m->busy || (m->oflags & VPO_BUSY)) { 1061 vm_page_unlock(m); 1062 goto unlock_and_continue; 1063 } 1064 1065 /* 1066 * If the page has become held it might 1067 * be undergoing I/O, so skip it 1068 */ 1069 if (m->hold_count) { 1070 vm_page_unlock(m); 1071 vm_page_requeue(m); 1072 if (object->flags & OBJ_MIGHTBEDIRTY) 1073 vnodes_skipped++; 1074 goto unlock_and_continue; 1075 } 1076 } 1077 1078 /* 1079 * If a page is dirty, then it is either being washed 1080 * (but not yet cleaned) or it is still in the 1081 * laundry. If it is still in the laundry, then we 1082 * start the cleaning operation. 1083 * 1084 * decrement page_shortage on success to account for 1085 * the (future) cleaned page. Otherwise we could wind 1086 * up laundering or cleaning too many pages. 1087 */ 1088 vm_page_unlock_queues(); 1089 if (vm_pageout_clean(m) != 0) { 1090 --page_shortage; 1091 --maxlaunder; 1092 } 1093 vm_page_lock_queues(); 1094unlock_and_continue: 1095 vm_page_lock_assert(m, MA_NOTOWNED); 1096 VM_OBJECT_UNLOCK(object); 1097 if (mp != NULL) { 1098 vm_page_unlock_queues(); 1099 if (vp != NULL) 1100 vput(vp); 1101 VFS_UNLOCK_GIANT(vfslocked); 1102 vm_object_deallocate(object); 1103 vn_finished_write(mp); 1104 vm_page_lock_queues(); 1105 } 1106 next = TAILQ_NEXT(&marker, pageq); 1107 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, 1108 &marker, pageq); 1109 vm_page_lock_assert(m, MA_NOTOWNED); 1110 continue; 1111 } 1112 vm_page_unlock(m); 1113 VM_OBJECT_UNLOCK(object); 1114 } 1115 1116 /* 1117 * Compute the number of pages we want to try to move from the 1118 * active queue to the inactive queue. 1119 */ 1120 page_shortage = vm_paging_target() + 1121 cnt.v_inactive_target - cnt.v_inactive_count; 1122 page_shortage += addl_page_shortage; 1123 1124 /* 1125 * Scan the active queue for things we can deactivate. We nominally 1126 * track the per-page activity counter and use it to locate 1127 * deactivation candidates. 1128 */ 1129 pcount = cnt.v_active_count; 1130 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1131 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1132 1133 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1134 1135 KASSERT(m->queue == PQ_ACTIVE, 1136 ("vm_pageout_scan: page %p isn't active", m)); 1137 1138 next = TAILQ_NEXT(m, pageq); 1139 if ((m->flags & PG_MARKER) != 0) { 1140 m = next; 1141 continue; 1142 } 1143 if (!vm_pageout_page_lock(m, &next)) { 1144 vm_page_unlock(m); 1145 m = next; 1146 continue; 1147 } 1148 object = m->object; 1149 if (!VM_OBJECT_TRYLOCK(object) && 1150 !vm_pageout_fallback_object_lock(m, &next)) { 1151 VM_OBJECT_UNLOCK(object); 1152 vm_page_unlock(m); 1153 m = next; 1154 continue; 1155 } 1156 1157 /* 1158 * Don't deactivate pages that are busy. 1159 */ 1160 if ((m->busy != 0) || 1161 (m->oflags & VPO_BUSY) || 1162 (m->hold_count != 0)) { 1163 vm_page_unlock(m); 1164 VM_OBJECT_UNLOCK(object); 1165 vm_page_requeue(m); 1166 m = next; 1167 continue; 1168 } 1169 1170 /* 1171 * The count for pagedaemon pages is done after checking the 1172 * page for eligibility... 1173 */ 1174 cnt.v_pdpages++; 1175 1176 /* 1177 * Check to see "how much" the page has been used. 1178 */ 1179 actcount = 0; 1180 if (object->ref_count != 0) { 1181 if (m->flags & PG_REFERENCED) { 1182 actcount += 1; 1183 } 1184 actcount += pmap_ts_referenced(m); 1185 if (actcount) { 1186 m->act_count += ACT_ADVANCE + actcount; 1187 if (m->act_count > ACT_MAX) 1188 m->act_count = ACT_MAX; 1189 } 1190 } 1191 1192 /* 1193 * Since we have "tested" this bit, we need to clear it now. 1194 */ 1195 vm_page_flag_clear(m, PG_REFERENCED); 1196 1197 /* 1198 * Only if an object is currently being used, do we use the 1199 * page activation count stats. 1200 */ 1201 if (actcount && (object->ref_count != 0)) { 1202 vm_page_requeue(m); 1203 } else { 1204 m->act_count -= min(m->act_count, ACT_DECLINE); 1205 if (vm_pageout_algorithm || 1206 object->ref_count == 0 || 1207 m->act_count == 0) { 1208 page_shortage--; 1209 if (object->ref_count == 0) { 1210 KASSERT(!pmap_page_is_mapped(m), 1211 ("vm_pageout_scan: page %p is mapped", m)); 1212 if (m->dirty == 0) 1213 vm_page_cache(m); 1214 else 1215 vm_page_deactivate(m); 1216 } else { 1217 vm_page_deactivate(m); 1218 } 1219 } else { 1220 vm_page_requeue(m); 1221 } 1222 } 1223 vm_page_unlock(m); 1224 VM_OBJECT_UNLOCK(object); 1225 m = next; 1226 } 1227 vm_page_unlock_queues(); 1228#if !defined(NO_SWAPPING) 1229 /* 1230 * Idle process swapout -- run once per second. 1231 */ 1232 if (vm_swap_idle_enabled) { 1233 static long lsec; 1234 if (time_second != lsec) { 1235 vm_req_vmdaemon(VM_SWAP_IDLE); 1236 lsec = time_second; 1237 } 1238 } 1239#endif 1240 1241 /* 1242 * If we didn't get enough free pages, and we have skipped a vnode 1243 * in a writeable object, wakeup the sync daemon. And kick swapout 1244 * if we did not get enough free pages. 1245 */ 1246 if (vm_paging_target() > 0) { 1247 if (vnodes_skipped && vm_page_count_min()) 1248 (void) speedup_syncer(); 1249#if !defined(NO_SWAPPING) 1250 if (vm_swap_enabled && vm_page_count_target()) 1251 vm_req_vmdaemon(VM_SWAP_NORMAL); 1252#endif 1253 } 1254 1255 /* 1256 * If we are critically low on one of RAM or swap and low on 1257 * the other, kill the largest process. However, we avoid 1258 * doing this on the first pass in order to give ourselves a 1259 * chance to flush out dirty vnode-backed pages and to allow 1260 * active pages to be moved to the inactive queue and reclaimed. 1261 */ 1262 if (pass != 0 && 1263 ((swap_pager_avail < 64 && vm_page_count_min()) || 1264 (swap_pager_full && vm_paging_target() > 0))) 1265 vm_pageout_oom(VM_OOM_MEM); 1266} 1267 1268 1269void 1270vm_pageout_oom(int shortage) 1271{ 1272 struct proc *p, *bigproc; 1273 vm_offset_t size, bigsize; 1274 struct thread *td; 1275 struct vmspace *vm; 1276 1277 /* 1278 * We keep the process bigproc locked once we find it to keep anyone 1279 * from messing with it; however, there is a possibility of 1280 * deadlock if process B is bigproc and one of it's child processes 1281 * attempts to propagate a signal to B while we are waiting for A's 1282 * lock while walking this list. To avoid this, we don't block on 1283 * the process lock but just skip a process if it is already locked. 1284 */ 1285 bigproc = NULL; 1286 bigsize = 0; 1287 sx_slock(&allproc_lock); 1288 FOREACH_PROC_IN_SYSTEM(p) { 1289 int breakout; 1290 1291 if (PROC_TRYLOCK(p) == 0) 1292 continue; 1293 /* 1294 * If this is a system, protected or killed process, skip it. 1295 */ 1296 if (p->p_state != PRS_NORMAL || 1297 (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 1298 (p->p_pid == 1) || P_KILLED(p) || 1299 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1300 PROC_UNLOCK(p); 1301 continue; 1302 } 1303 /* 1304 * If the process is in a non-running type state, 1305 * don't touch it. Check all the threads individually. 1306 */ 1307 breakout = 0; 1308 FOREACH_THREAD_IN_PROC(p, td) { 1309 thread_lock(td); 1310 if (!TD_ON_RUNQ(td) && 1311 !TD_IS_RUNNING(td) && 1312 !TD_IS_SLEEPING(td) && 1313 !TD_IS_SUSPENDED(td)) { 1314 thread_unlock(td); 1315 breakout = 1; 1316 break; 1317 } 1318 thread_unlock(td); 1319 } 1320 if (breakout) { 1321 PROC_UNLOCK(p); 1322 continue; 1323 } 1324 /* 1325 * get the process size 1326 */ 1327 vm = vmspace_acquire_ref(p); 1328 if (vm == NULL) { 1329 PROC_UNLOCK(p); 1330 continue; 1331 } 1332 if (!vm_map_trylock_read(&vm->vm_map)) { 1333 vmspace_free(vm); 1334 PROC_UNLOCK(p); 1335 continue; 1336 } 1337 size = vmspace_swap_count(vm); 1338 vm_map_unlock_read(&vm->vm_map); 1339 if (shortage == VM_OOM_MEM) 1340 size += vmspace_resident_count(vm); 1341 vmspace_free(vm); 1342 /* 1343 * if the this process is bigger than the biggest one 1344 * remember it. 1345 */ 1346 if (size > bigsize) { 1347 if (bigproc != NULL) 1348 PROC_UNLOCK(bigproc); 1349 bigproc = p; 1350 bigsize = size; 1351 } else 1352 PROC_UNLOCK(p); 1353 } 1354 sx_sunlock(&allproc_lock); 1355 if (bigproc != NULL) { 1356 killproc(bigproc, "out of swap space"); 1357 sched_nice(bigproc, PRIO_MIN); 1358 PROC_UNLOCK(bigproc); 1359 wakeup(&cnt.v_free_count); 1360 } 1361} 1362 1363/* 1364 * This routine tries to maintain the pseudo LRU active queue, 1365 * so that during long periods of time where there is no paging, 1366 * that some statistic accumulation still occurs. This code 1367 * helps the situation where paging just starts to occur. 1368 */ 1369static void 1370vm_pageout_page_stats() 1371{ 1372 vm_object_t object; 1373 vm_page_t m,next; 1374 int pcount,tpcount; /* Number of pages to check */ 1375 static int fullintervalcount = 0; 1376 int page_shortage; 1377 1378 page_shortage = 1379 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1380 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1381 1382 if (page_shortage <= 0) 1383 return; 1384 1385 vm_page_lock_queues(); 1386 pcount = cnt.v_active_count; 1387 fullintervalcount += vm_pageout_stats_interval; 1388 if (fullintervalcount < vm_pageout_full_stats_interval) { 1389 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / 1390 cnt.v_page_count; 1391 if (pcount > tpcount) 1392 pcount = tpcount; 1393 } else { 1394 fullintervalcount = 0; 1395 } 1396 1397 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1398 while ((m != NULL) && (pcount-- > 0)) { 1399 int actcount; 1400 1401 KASSERT(m->queue == PQ_ACTIVE, 1402 ("vm_pageout_page_stats: page %p isn't active", m)); 1403 1404 next = TAILQ_NEXT(m, pageq); 1405 if ((m->flags & PG_MARKER) != 0) { 1406 m = next; 1407 continue; 1408 } 1409 vm_page_lock_assert(m, MA_NOTOWNED); 1410 if (!vm_pageout_page_lock(m, &next)) { 1411 vm_page_unlock(m); 1412 m = next; 1413 continue; 1414 } 1415 object = m->object; 1416 if (!VM_OBJECT_TRYLOCK(object) && 1417 !vm_pageout_fallback_object_lock(m, &next)) { 1418 VM_OBJECT_UNLOCK(object); 1419 vm_page_unlock(m); 1420 m = next; 1421 continue; 1422 } 1423 1424 /* 1425 * Don't deactivate pages that are busy. 1426 */ 1427 if ((m->busy != 0) || 1428 (m->oflags & VPO_BUSY) || 1429 (m->hold_count != 0)) { 1430 vm_page_unlock(m); 1431 VM_OBJECT_UNLOCK(object); 1432 vm_page_requeue(m); 1433 m = next; 1434 continue; 1435 } 1436 1437 actcount = 0; 1438 if (m->flags & PG_REFERENCED) { 1439 vm_page_flag_clear(m, PG_REFERENCED); 1440 actcount += 1; 1441 } 1442 1443 actcount += pmap_ts_referenced(m); 1444 if (actcount) { 1445 m->act_count += ACT_ADVANCE + actcount; 1446 if (m->act_count > ACT_MAX) 1447 m->act_count = ACT_MAX; 1448 vm_page_requeue(m); 1449 } else { 1450 if (m->act_count == 0) { 1451 /* 1452 * We turn off page access, so that we have 1453 * more accurate RSS stats. We don't do this 1454 * in the normal page deactivation when the 1455 * system is loaded VM wise, because the 1456 * cost of the large number of page protect 1457 * operations would be higher than the value 1458 * of doing the operation. 1459 */ 1460 pmap_remove_all(m); 1461 vm_page_deactivate(m); 1462 } else { 1463 m->act_count -= min(m->act_count, ACT_DECLINE); 1464 vm_page_requeue(m); 1465 } 1466 } 1467 vm_page_unlock(m); 1468 VM_OBJECT_UNLOCK(object); 1469 m = next; 1470 } 1471 vm_page_unlock_queues(); 1472} 1473 1474/* 1475 * vm_pageout is the high level pageout daemon. 1476 */ 1477static void 1478vm_pageout() 1479{ 1480 int error, pass; 1481 1482 /* 1483 * Initialize some paging parameters. 1484 */ 1485 cnt.v_interrupt_free_min = 2; 1486 if (cnt.v_page_count < 2000) 1487 vm_pageout_page_count = 8; 1488 1489 /* 1490 * v_free_reserved needs to include enough for the largest 1491 * swap pager structures plus enough for any pv_entry structs 1492 * when paging. 1493 */ 1494 if (cnt.v_page_count > 1024) 1495 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1496 else 1497 cnt.v_free_min = 4; 1498 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1499 cnt.v_interrupt_free_min; 1500 cnt.v_free_reserved = vm_pageout_page_count + 1501 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1502 cnt.v_free_severe = cnt.v_free_min / 2; 1503 cnt.v_free_min += cnt.v_free_reserved; 1504 cnt.v_free_severe += cnt.v_free_reserved; 1505 1506 /* 1507 * v_free_target and v_cache_min control pageout hysteresis. Note 1508 * that these are more a measure of the VM cache queue hysteresis 1509 * then the VM free queue. Specifically, v_free_target is the 1510 * high water mark (free+cache pages). 1511 * 1512 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1513 * low water mark, while v_free_min is the stop. v_cache_min must 1514 * be big enough to handle memory needs while the pageout daemon 1515 * is signalled and run to free more pages. 1516 */ 1517 if (cnt.v_free_count > 6144) 1518 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1519 else 1520 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1521 1522 if (cnt.v_free_count > 2048) { 1523 cnt.v_cache_min = cnt.v_free_target; 1524 cnt.v_cache_max = 2 * cnt.v_cache_min; 1525 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1526 } else { 1527 cnt.v_cache_min = 0; 1528 cnt.v_cache_max = 0; 1529 cnt.v_inactive_target = cnt.v_free_count / 4; 1530 } 1531 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1532 cnt.v_inactive_target = cnt.v_free_count / 3; 1533 1534 /* XXX does not really belong here */ 1535 if (vm_page_max_wired == 0) 1536 vm_page_max_wired = cnt.v_free_count / 3; 1537 1538 if (vm_pageout_stats_max == 0) 1539 vm_pageout_stats_max = cnt.v_free_target; 1540 1541 /* 1542 * Set interval in seconds for stats scan. 1543 */ 1544 if (vm_pageout_stats_interval == 0) 1545 vm_pageout_stats_interval = 5; 1546 if (vm_pageout_full_stats_interval == 0) 1547 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1548 1549 swap_pager_swap_init(); 1550 pass = 0; 1551 /* 1552 * The pageout daemon is never done, so loop forever. 1553 */ 1554 while (TRUE) { 1555 /* 1556 * If we have enough free memory, wakeup waiters. Do 1557 * not clear vm_pages_needed until we reach our target, 1558 * otherwise we may be woken up over and over again and 1559 * waste a lot of cpu. 1560 */ 1561 mtx_lock(&vm_page_queue_free_mtx); 1562 if (vm_pages_needed && !vm_page_count_min()) { 1563 if (!vm_paging_needed()) 1564 vm_pages_needed = 0; 1565 wakeup(&cnt.v_free_count); 1566 } 1567 if (vm_pages_needed) { 1568 /* 1569 * Still not done, take a second pass without waiting 1570 * (unlimited dirty cleaning), otherwise sleep a bit 1571 * and try again. 1572 */ 1573 ++pass; 1574 if (pass > 1) 1575 msleep(&vm_pages_needed, 1576 &vm_page_queue_free_mtx, PVM, "psleep", 1577 hz / 2); 1578 } else { 1579 /* 1580 * Good enough, sleep & handle stats. Prime the pass 1581 * for the next run. 1582 */ 1583 if (pass > 1) 1584 pass = 1; 1585 else 1586 pass = 0; 1587 error = msleep(&vm_pages_needed, 1588 &vm_page_queue_free_mtx, PVM, "psleep", 1589 vm_pageout_stats_interval * hz); 1590 if (error && !vm_pages_needed) { 1591 mtx_unlock(&vm_page_queue_free_mtx); 1592 pass = 0; 1593 vm_pageout_page_stats(); 1594 continue; 1595 } 1596 } 1597 if (vm_pages_needed) 1598 cnt.v_pdwakeups++; 1599 mtx_unlock(&vm_page_queue_free_mtx); 1600 vm_pageout_scan(pass); 1601 } 1602} 1603 1604/* 1605 * Unless the free page queue lock is held by the caller, this function 1606 * should be regarded as advisory. Specifically, the caller should 1607 * not msleep() on &cnt.v_free_count following this function unless 1608 * the free page queue lock is held until the msleep() is performed. 1609 */ 1610void 1611pagedaemon_wakeup() 1612{ 1613 1614 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1615 vm_pages_needed = 1; 1616 wakeup(&vm_pages_needed); 1617 } 1618} 1619 1620#if !defined(NO_SWAPPING) 1621static void 1622vm_req_vmdaemon(int req) 1623{ 1624 static int lastrun = 0; 1625 1626 mtx_lock(&vm_daemon_mtx); 1627 vm_pageout_req_swapout |= req; 1628 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1629 wakeup(&vm_daemon_needed); 1630 lastrun = ticks; 1631 } 1632 mtx_unlock(&vm_daemon_mtx); 1633} 1634 1635static void 1636vm_daemon() 1637{ 1638 struct rlimit rsslim; 1639 struct proc *p; 1640 struct thread *td; 1641 struct vmspace *vm; 1642 int breakout, swapout_flags, tryagain, attempts; 1643 uint64_t rsize, ravailable; 1644 1645 while (TRUE) { 1646 mtx_lock(&vm_daemon_mtx); 1647#ifdef RACCT 1648 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz); 1649#else 1650 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1651#endif 1652 swapout_flags = vm_pageout_req_swapout; 1653 vm_pageout_req_swapout = 0; 1654 mtx_unlock(&vm_daemon_mtx); 1655 if (swapout_flags) 1656 swapout_procs(swapout_flags); 1657 1658 /* 1659 * scan the processes for exceeding their rlimits or if 1660 * process is swapped out -- deactivate pages 1661 */ 1662 tryagain = 0; 1663 attempts = 0; 1664again: 1665 attempts++; 1666 sx_slock(&allproc_lock); 1667 FOREACH_PROC_IN_SYSTEM(p) { 1668 vm_pindex_t limit, size; 1669 1670 /* 1671 * if this is a system process or if we have already 1672 * looked at this process, skip it. 1673 */ 1674 PROC_LOCK(p); 1675 if (p->p_state != PRS_NORMAL || 1676 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1677 PROC_UNLOCK(p); 1678 continue; 1679 } 1680 /* 1681 * if the process is in a non-running type state, 1682 * don't touch it. 1683 */ 1684 breakout = 0; 1685 FOREACH_THREAD_IN_PROC(p, td) { 1686 thread_lock(td); 1687 if (!TD_ON_RUNQ(td) && 1688 !TD_IS_RUNNING(td) && 1689 !TD_IS_SLEEPING(td) && 1690 !TD_IS_SUSPENDED(td)) { 1691 thread_unlock(td); 1692 breakout = 1; 1693 break; 1694 } 1695 thread_unlock(td); 1696 } 1697 if (breakout) { 1698 PROC_UNLOCK(p); 1699 continue; 1700 } 1701 /* 1702 * get a limit 1703 */ 1704 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1705 limit = OFF_TO_IDX( 1706 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1707 1708 /* 1709 * let processes that are swapped out really be 1710 * swapped out set the limit to nothing (will force a 1711 * swap-out.) 1712 */ 1713 if ((p->p_flag & P_INMEM) == 0) 1714 limit = 0; /* XXX */ 1715 vm = vmspace_acquire_ref(p); 1716 PROC_UNLOCK(p); 1717 if (vm == NULL) 1718 continue; 1719 1720 size = vmspace_resident_count(vm); 1721 if (limit >= 0 && size >= limit) { 1722 vm_pageout_map_deactivate_pages( 1723 &vm->vm_map, limit); 1724 } 1725 rsize = IDX_TO_OFF(size); 1726 PROC_LOCK(p); 1727 racct_set(p, RACCT_RSS, rsize); 1728 ravailable = racct_get_available(p, RACCT_RSS); 1729 PROC_UNLOCK(p); 1730 if (rsize > ravailable) { 1731 /* 1732 * Don't be overly aggressive; this might be 1733 * an innocent process, and the limit could've 1734 * been exceeded by some memory hog. Don't 1735 * try to deactivate more than 1/4th of process' 1736 * resident set size. 1737 */ 1738 if (attempts <= 8) { 1739 if (ravailable < rsize - (rsize / 4)) 1740 ravailable = rsize - (rsize / 4); 1741 } 1742 vm_pageout_map_deactivate_pages( 1743 &vm->vm_map, OFF_TO_IDX(ravailable)); 1744 /* Update RSS usage after paging out. */ 1745 size = vmspace_resident_count(vm); 1746 rsize = IDX_TO_OFF(size); 1747 PROC_LOCK(p); 1748 racct_set(p, RACCT_RSS, rsize); 1749 PROC_UNLOCK(p); 1750 if (rsize > ravailable) 1751 tryagain = 1; 1752 } 1753 vmspace_free(vm); 1754 } 1755 sx_sunlock(&allproc_lock); 1756 if (tryagain != 0 && attempts <= 10) 1757 goto again; 1758 } 1759} 1760#endif /* !defined(NO_SWAPPING) */ 1761