vm_pageout.c revision 207796
1169718Skan/*- 2169718Skan * Copyright (c) 1991 Regents of the University of California. 3169718Skan * All rights reserved. 4169718Skan * Copyright (c) 1994 John S. Dyson 5169718Skan * All rights reserved. 6169718Skan * Copyright (c) 1994 David Greenman 7169718Skan * All rights reserved. 8169718Skan * Copyright (c) 2005 Yahoo! Technologies Norway AS 9169718Skan * All rights reserved. 10169718Skan * 11169718Skan * This code is derived from software contributed to Berkeley by 12169718Skan * The Mach Operating System project at Carnegie-Mellon University. 13169718Skan * 14169718Skan * Redistribution and use in source and binary forms, with or without 15169718Skan * modification, are permitted provided that the following conditions 16169718Skan * are met: 17169718Skan * 1. Redistributions of source code must retain the above copyright 18169718Skan * notice, this list of conditions and the following disclaimer. 19169718Skan * 2. Redistributions in binary form must reproduce the above copyright 20169718Skan * notice, this list of conditions and the following disclaimer in the 21169718Skan * documentation and/or other materials provided with the distribution. 22169718Skan * 3. All advertising materials mentioning features or use of this software 23169718Skan * must display the following acknowledgement: 24169718Skan * This product includes software developed by the University of 25169718Skan * California, Berkeley and its contributors. 26169718Skan * 4. Neither the name of the University nor the names of its contributors 27169718Skan * may be used to endorse or promote products derived from this software 28169718Skan * without specific prior written permission. 29169718Skan * 30169718Skan * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31169718Skan * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32169718Skan * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33169718Skan * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34169718Skan * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35169718Skan * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36169718Skan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37169718Skan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38169718Skan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39169718Skan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40169718Skan * SUCH DAMAGE. 41169718Skan * 42169718Skan * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 43169718Skan * 44169718Skan * 45169718Skan * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46169718Skan * All rights reserved. 47169718Skan * 48169718Skan * Authors: Avadis Tevanian, Jr., Michael Wayne Young 49169718Skan * 50169718Skan * Permission to use, copy, modify and distribute this software and 51169718Skan * its documentation is hereby granted, provided that both the copyright 52169718Skan * notice and this permission notice appear in all copies of the 53169718Skan * software, derivative works or modified versions, and any portions 54169718Skan * thereof, and that both notices appear in supporting documentation. 55169718Skan * 56169718Skan * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 57169718Skan * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 58169718Skan * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 59169718Skan * 60169718Skan * Carnegie Mellon requests users of this software to return to 61169718Skan * 62169718Skan * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 63169718Skan * School of Computer Science 64169718Skan * Carnegie Mellon University 65169718Skan * Pittsburgh PA 15213-3890 66169718Skan * 67169718Skan * any improvements or extensions that they make and grant Carnegie the 68169718Skan * rights to redistribute these changes. 69169718Skan */ 70169718Skan 71169718Skan/* 72169718Skan * The proverbial page-out daemon. 73169718Skan */ 74169718Skan 75169718Skan#include <sys/cdefs.h> 76169718Skan__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 207796 2010-05-08 20:34:01Z alc $"); 77169718Skan 78169718Skan#include "opt_vm.h" 79169718Skan#include <sys/param.h> 80169718Skan#include <sys/systm.h> 81#include <sys/kernel.h> 82#include <sys/eventhandler.h> 83#include <sys/lock.h> 84#include <sys/mutex.h> 85#include <sys/proc.h> 86#include <sys/kthread.h> 87#include <sys/ktr.h> 88#include <sys/mount.h> 89#include <sys/resourcevar.h> 90#include <sys/sched.h> 91#include <sys/signalvar.h> 92#include <sys/vnode.h> 93#include <sys/vmmeter.h> 94#include <sys/sx.h> 95#include <sys/sysctl.h> 96 97#include <vm/vm.h> 98#include <vm/vm_param.h> 99#include <vm/vm_object.h> 100#include <vm/vm_page.h> 101#include <vm/vm_map.h> 102#include <vm/vm_pageout.h> 103#include <vm/vm_pager.h> 104#include <vm/swap_pager.h> 105#include <vm/vm_extern.h> 106#include <vm/uma.h> 107 108/* 109 * System initialization 110 */ 111 112/* the kernel process "vm_pageout"*/ 113static void vm_pageout(void); 114static int vm_pageout_clean(vm_page_t); 115static void vm_pageout_scan(int pass); 116 117struct proc *pageproc; 118 119static struct kproc_desc page_kp = { 120 "pagedaemon", 121 vm_pageout, 122 &pageproc 123}; 124SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, 125 &page_kp); 126 127#if !defined(NO_SWAPPING) 128/* the kernel process "vm_daemon"*/ 129static void vm_daemon(void); 130static struct proc *vmproc; 131 132static struct kproc_desc vm_kp = { 133 "vmdaemon", 134 vm_daemon, 135 &vmproc 136}; 137SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp); 138#endif 139 140 141int vm_pages_needed; /* Event on which pageout daemon sleeps */ 142int vm_pageout_deficit; /* Estimated number of pages deficit */ 143int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 144 145#if !defined(NO_SWAPPING) 146static int vm_pageout_req_swapout; /* XXX */ 147static int vm_daemon_needed; 148static struct mtx vm_daemon_mtx; 149/* Allow for use by vm_pageout before vm_daemon is initialized. */ 150MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF); 151#endif 152static int vm_max_launder = 32; 153static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 154static int vm_pageout_full_stats_interval = 0; 155static int vm_pageout_algorithm=0; 156static int defer_swap_pageouts=0; 157static int disable_swap_pageouts=0; 158 159#if defined(NO_SWAPPING) 160static int vm_swap_enabled=0; 161static int vm_swap_idle_enabled=0; 162#else 163static int vm_swap_enabled=1; 164static int vm_swap_idle_enabled=0; 165#endif 166 167SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 168 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 169 170SYSCTL_INT(_vm, OID_AUTO, max_launder, 171 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 172 173SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 174 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 175 176SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 177 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 178 179SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 180 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 181 182#if defined(NO_SWAPPING) 183SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); 185SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 187#else 188SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 189 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 190SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 191 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 192#endif 193 194SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 195 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 196 197SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 198 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 199 200static int pageout_lock_miss; 201SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 202 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 203 204#define VM_PAGEOUT_PAGE_COUNT 16 205int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 206 207int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 208SYSCTL_INT(_vm, OID_AUTO, max_wired, 209 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count"); 210 211#if !defined(NO_SWAPPING) 212static void vm_pageout_map_deactivate_pages(vm_map_t, long); 213static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 214static void vm_req_vmdaemon(int req); 215#endif 216static void vm_pageout_page_stats(void); 217 218static void 219vm_pageout_init_marker(vm_page_t marker, u_short queue) 220{ 221 222 bzero(marker, sizeof(*marker)); 223 marker->flags = PG_FICTITIOUS | PG_MARKER; 224 marker->oflags = VPO_BUSY; 225 marker->queue = queue; 226 marker->wire_count = 1; 227} 228 229/* 230 * vm_pageout_fallback_object_lock: 231 * 232 * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is 233 * known to have failed and page queue must be either PQ_ACTIVE or 234 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues 235 * while locking the vm object. Use marker page to detect page queue 236 * changes and maintain notion of next page on page queue. Return 237 * TRUE if no changes were detected, FALSE otherwise. vm object is 238 * locked on return. 239 * 240 * This function depends on both the lock portion of struct vm_object 241 * and normal struct vm_page being type stable. 242 */ 243boolean_t 244vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next) 245{ 246 struct vm_page marker; 247 boolean_t unchanged; 248 u_short queue; 249 vm_object_t object; 250 251 queue = m->queue; 252 vm_pageout_init_marker(&marker, queue); 253 object = m->object; 254 255 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, 256 m, &marker, pageq); 257 vm_page_unlock_queues(); 258 vm_page_unlock(m); 259 VM_OBJECT_LOCK(object); 260 vm_page_lock(m); 261 vm_page_lock_queues(); 262 263 /* Page queue might have changed. */ 264 *next = TAILQ_NEXT(&marker, pageq); 265 unchanged = (m->queue == queue && 266 m->object == object && 267 &marker == TAILQ_NEXT(m, pageq)); 268 TAILQ_REMOVE(&vm_page_queues[queue].pl, 269 &marker, pageq); 270 return (unchanged); 271} 272 273/* 274 * Lock the page while holding the page queue lock. Use marker page 275 * to detect page queue changes and maintain notion of next page on 276 * page queue. Return TRUE if no changes were detected, FALSE 277 * otherwise. The page is locked on return. The page queue lock might 278 * be dropped and reacquired. 279 * 280 * This function depends on normal struct vm_page being type stable. 281 */ 282boolean_t 283vm_pageout_page_lock(vm_page_t m, vm_page_t *next) 284{ 285 struct vm_page marker; 286 boolean_t unchanged; 287 u_short queue; 288 289 vm_page_lock_assert(m, MA_NOTOWNED); 290 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 291 292 if (vm_page_trylock(m)) 293 return (TRUE); 294 295 queue = m->queue; 296 vm_pageout_init_marker(&marker, queue); 297 298 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); 299 vm_page_unlock_queues(); 300 vm_page_lock(m); 301 vm_page_lock_queues(); 302 303 /* Page queue might have changed. */ 304 *next = TAILQ_NEXT(&marker, pageq); 305 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq)); 306 TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq); 307 return (unchanged); 308} 309 310/* 311 * vm_pageout_clean: 312 * 313 * Clean the page and remove it from the laundry. 314 * 315 * We set the busy bit to cause potential page faults on this page to 316 * block. Note the careful timing, however, the busy bit isn't set till 317 * late and we cannot do anything that will mess with the page. 318 */ 319static int 320vm_pageout_clean(vm_page_t m) 321{ 322 vm_object_t object; 323 vm_page_t mc[2*vm_pageout_page_count]; 324 int pageout_count; 325 int ib, is, page_base; 326 vm_pindex_t pindex = m->pindex; 327 328 vm_page_lock_assert(m, MA_NOTOWNED); 329 vm_page_lock(m); 330 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 331 332 /* 333 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 334 * with the new swapper, but we could have serious problems paging 335 * out other object types if there is insufficient memory. 336 * 337 * Unfortunately, checking free memory here is far too late, so the 338 * check has been moved up a procedural level. 339 */ 340 341 /* 342 * Can't clean the page if it's busy or held. 343 */ 344 if ((m->hold_count != 0) || 345 ((m->busy != 0) || (m->oflags & VPO_BUSY))) { 346 vm_page_unlock(m); 347 return 0; 348 } 349 350 mc[vm_pageout_page_count] = m; 351 pageout_count = 1; 352 page_base = vm_pageout_page_count; 353 ib = 1; 354 is = 1; 355 356 /* 357 * Scan object for clusterable pages. 358 * 359 * We can cluster ONLY if: ->> the page is NOT 360 * clean, wired, busy, held, or mapped into a 361 * buffer, and one of the following: 362 * 1) The page is inactive, or a seldom used 363 * active page. 364 * -or- 365 * 2) we force the issue. 366 * 367 * During heavy mmap/modification loads the pageout 368 * daemon can really fragment the underlying file 369 * due to flushing pages out of order and not trying 370 * align the clusters (which leave sporatic out-of-order 371 * holes). To solve this problem we do the reverse scan 372 * first and attempt to align our cluster, then do a 373 * forward scan if room remains. 374 */ 375 object = m->object; 376more: 377 while (ib && pageout_count < vm_pageout_page_count) { 378 vm_page_t p; 379 380 if (ib > pindex) { 381 ib = 0; 382 break; 383 } 384 385 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 386 ib = 0; 387 break; 388 } 389 if ((p->oflags & VPO_BUSY) || p->busy) { 390 ib = 0; 391 break; 392 } 393 vm_page_lock(p); 394 vm_page_lock_queues(); 395 vm_page_test_dirty(p); 396 if (p->dirty == 0 || 397 p->queue != PQ_INACTIVE || 398 p->hold_count != 0) { /* may be undergoing I/O */ 399 vm_page_unlock(p); 400 vm_page_unlock_queues(); 401 ib = 0; 402 break; 403 } 404 vm_page_unlock_queues(); 405 vm_page_unlock(p); 406 mc[--page_base] = p; 407 ++pageout_count; 408 ++ib; 409 /* 410 * alignment boundry, stop here and switch directions. Do 411 * not clear ib. 412 */ 413 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 414 break; 415 } 416 417 while (pageout_count < vm_pageout_page_count && 418 pindex + is < object->size) { 419 vm_page_t p; 420 421 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 422 break; 423 if ((p->oflags & VPO_BUSY) || p->busy) { 424 break; 425 } 426 vm_page_lock(p); 427 vm_page_lock_queues(); 428 vm_page_test_dirty(p); 429 if (p->dirty == 0 || 430 p->queue != PQ_INACTIVE || 431 p->hold_count != 0) { /* may be undergoing I/O */ 432 vm_page_unlock_queues(); 433 vm_page_unlock(p); 434 break; 435 } 436 vm_page_unlock_queues(); 437 vm_page_unlock(p); 438 mc[page_base + pageout_count] = p; 439 ++pageout_count; 440 ++is; 441 } 442 443 /* 444 * If we exhausted our forward scan, continue with the reverse scan 445 * when possible, even past a page boundry. This catches boundry 446 * conditions. 447 */ 448 if (ib && pageout_count < vm_pageout_page_count) 449 goto more; 450 451 vm_page_unlock(m); 452 /* 453 * we allow reads during pageouts... 454 */ 455 return (vm_pageout_flush(&mc[page_base], pageout_count, 0)); 456} 457 458/* 459 * vm_pageout_flush() - launder the given pages 460 * 461 * The given pages are laundered. Note that we setup for the start of 462 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 463 * reference count all in here rather then in the parent. If we want 464 * the parent to do more sophisticated things we may have to change 465 * the ordering. 466 */ 467int 468vm_pageout_flush(vm_page_t *mc, int count, int flags) 469{ 470 vm_object_t object = mc[0]->object; 471 int pageout_status[count]; 472 int numpagedout = 0; 473 int i; 474 475 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 476 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED); 477 478 /* 479 * Initiate I/O. Bump the vm_page_t->busy counter and 480 * mark the pages read-only. 481 * 482 * We do not have to fixup the clean/dirty bits here... we can 483 * allow the pager to do it after the I/O completes. 484 * 485 * NOTE! mc[i]->dirty may be partial or fragmented due to an 486 * edge case with file fragments. 487 */ 488 for (i = 0; i < count; i++) { 489 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 490 ("vm_pageout_flush: partially invalid page %p index %d/%d", 491 mc[i], i, count)); 492 vm_page_io_start(mc[i]); 493 pmap_remove_write(mc[i]); 494 } 495 vm_object_pip_add(object, count); 496 497 vm_pager_put_pages(object, mc, count, flags, pageout_status); 498 499 for (i = 0; i < count; i++) { 500 vm_page_t mt = mc[i]; 501 502 KASSERT(pageout_status[i] == VM_PAGER_PEND || 503 (mt->flags & PG_WRITEABLE) == 0, 504 ("vm_pageout_flush: page %p is not write protected", mt)); 505 switch (pageout_status[i]) { 506 case VM_PAGER_OK: 507 case VM_PAGER_PEND: 508 numpagedout++; 509 break; 510 case VM_PAGER_BAD: 511 /* 512 * Page outside of range of object. Right now we 513 * essentially lose the changes by pretending it 514 * worked. 515 */ 516 vm_page_undirty(mt); 517 break; 518 case VM_PAGER_ERROR: 519 case VM_PAGER_FAIL: 520 /* 521 * If page couldn't be paged out, then reactivate the 522 * page so it doesn't clog the inactive list. (We 523 * will try paging out it again later). 524 */ 525 vm_page_lock(mt); 526 vm_page_activate(mt); 527 vm_page_unlock(mt); 528 break; 529 case VM_PAGER_AGAIN: 530 break; 531 } 532 533 /* 534 * If the operation is still going, leave the page busy to 535 * block all other accesses. Also, leave the paging in 536 * progress indicator set so that we don't attempt an object 537 * collapse. 538 */ 539 if (pageout_status[i] != VM_PAGER_PEND) { 540 vm_object_pip_wakeup(object); 541 vm_page_io_finish(mt); 542 if (vm_page_count_severe()) { 543 vm_page_lock(mt); 544 vm_page_try_to_cache(mt); 545 vm_page_unlock(mt); 546 } 547 } 548 } 549 return (numpagedout); 550} 551 552#if !defined(NO_SWAPPING) 553/* 554 * vm_pageout_object_deactivate_pages 555 * 556 * deactivate enough pages to satisfy the inactive target 557 * requirements or if vm_page_proc_limit is set, then 558 * deactivate all of the pages in the object and its 559 * backing_objects. 560 * 561 * The object and map must be locked. 562 */ 563static void 564vm_pageout_object_deactivate_pages(pmap, first_object, desired) 565 pmap_t pmap; 566 vm_object_t first_object; 567 long desired; 568{ 569 vm_object_t backing_object, object; 570 vm_page_t p, next; 571 int actcount, remove_mode; 572 573 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 574 if (first_object->type == OBJT_DEVICE || 575 first_object->type == OBJT_SG) 576 return; 577 for (object = first_object;; object = backing_object) { 578 if (pmap_resident_count(pmap) <= desired) 579 goto unlock_return; 580 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 581 if (object->type == OBJT_PHYS || object->paging_in_progress) 582 goto unlock_return; 583 584 remove_mode = 0; 585 if (object->shadow_count > 1) 586 remove_mode = 1; 587 /* 588 * scan the objects entire memory queue 589 */ 590 p = TAILQ_FIRST(&object->memq); 591 while (p != NULL) { 592 if (pmap_resident_count(pmap) <= desired) 593 goto unlock_return; 594 next = TAILQ_NEXT(p, listq); 595 if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) { 596 p = next; 597 continue; 598 } 599 vm_page_lock(p); 600 vm_page_lock_queues(); 601 cnt.v_pdpages++; 602 if (p->wire_count != 0 || 603 p->hold_count != 0 || 604 !pmap_page_exists_quick(pmap, p)) { 605 vm_page_unlock_queues(); 606 vm_page_unlock(p); 607 p = next; 608 continue; 609 } 610 actcount = pmap_ts_referenced(p); 611 if (actcount) { 612 vm_page_flag_set(p, PG_REFERENCED); 613 } else if (p->flags & PG_REFERENCED) { 614 actcount = 1; 615 } 616 if ((p->queue != PQ_ACTIVE) && 617 (p->flags & PG_REFERENCED)) { 618 vm_page_activate(p); 619 p->act_count += actcount; 620 vm_page_flag_clear(p, PG_REFERENCED); 621 } else if (p->queue == PQ_ACTIVE) { 622 if ((p->flags & PG_REFERENCED) == 0) { 623 p->act_count -= min(p->act_count, ACT_DECLINE); 624 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 625 pmap_remove_all(p); 626 vm_page_deactivate(p); 627 } else { 628 vm_page_requeue(p); 629 } 630 } else { 631 vm_page_activate(p); 632 vm_page_flag_clear(p, PG_REFERENCED); 633 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 634 p->act_count += ACT_ADVANCE; 635 vm_page_requeue(p); 636 } 637 } else if (p->queue == PQ_INACTIVE) { 638 pmap_remove_all(p); 639 } 640 vm_page_unlock_queues(); 641 vm_page_unlock(p); 642 p = next; 643 } 644 if ((backing_object = object->backing_object) == NULL) 645 goto unlock_return; 646 VM_OBJECT_LOCK(backing_object); 647 if (object != first_object) 648 VM_OBJECT_UNLOCK(object); 649 } 650unlock_return: 651 if (object != first_object) 652 VM_OBJECT_UNLOCK(object); 653} 654 655/* 656 * deactivate some number of pages in a map, try to do it fairly, but 657 * that is really hard to do. 658 */ 659static void 660vm_pageout_map_deactivate_pages(map, desired) 661 vm_map_t map; 662 long desired; 663{ 664 vm_map_entry_t tmpe; 665 vm_object_t obj, bigobj; 666 int nothingwired; 667 668 if (!vm_map_trylock(map)) 669 return; 670 671 bigobj = NULL; 672 nothingwired = TRUE; 673 674 /* 675 * first, search out the biggest object, and try to free pages from 676 * that. 677 */ 678 tmpe = map->header.next; 679 while (tmpe != &map->header) { 680 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 681 obj = tmpe->object.vm_object; 682 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 683 if (obj->shadow_count <= 1 && 684 (bigobj == NULL || 685 bigobj->resident_page_count < obj->resident_page_count)) { 686 if (bigobj != NULL) 687 VM_OBJECT_UNLOCK(bigobj); 688 bigobj = obj; 689 } else 690 VM_OBJECT_UNLOCK(obj); 691 } 692 } 693 if (tmpe->wired_count > 0) 694 nothingwired = FALSE; 695 tmpe = tmpe->next; 696 } 697 698 if (bigobj != NULL) { 699 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 700 VM_OBJECT_UNLOCK(bigobj); 701 } 702 /* 703 * Next, hunt around for other pages to deactivate. We actually 704 * do this search sort of wrong -- .text first is not the best idea. 705 */ 706 tmpe = map->header.next; 707 while (tmpe != &map->header) { 708 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 709 break; 710 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 711 obj = tmpe->object.vm_object; 712 if (obj != NULL) { 713 VM_OBJECT_LOCK(obj); 714 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 715 VM_OBJECT_UNLOCK(obj); 716 } 717 } 718 tmpe = tmpe->next; 719 } 720 721 /* 722 * Remove all mappings if a process is swapped out, this will free page 723 * table pages. 724 */ 725 if (desired == 0 && nothingwired) { 726 pmap_remove(vm_map_pmap(map), vm_map_min(map), 727 vm_map_max(map)); 728 } 729 vm_map_unlock(map); 730} 731#endif /* !defined(NO_SWAPPING) */ 732 733/* 734 * vm_pageout_scan does the dirty work for the pageout daemon. 735 */ 736static void 737vm_pageout_scan(int pass) 738{ 739 vm_page_t m, next; 740 struct vm_page marker; 741 int page_shortage, maxscan, pcount; 742 int addl_page_shortage, addl_page_shortage_init; 743 vm_object_t object; 744 int actcount; 745 int vnodes_skipped = 0; 746 int maxlaunder; 747 748 /* 749 * Decrease registered cache sizes. 750 */ 751 EVENTHANDLER_INVOKE(vm_lowmem, 0); 752 /* 753 * We do this explicitly after the caches have been drained above. 754 */ 755 uma_reclaim(); 756 757 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 758 759 /* 760 * Calculate the number of pages we want to either free or move 761 * to the cache. 762 */ 763 page_shortage = vm_paging_target() + addl_page_shortage_init; 764 765 vm_pageout_init_marker(&marker, PQ_INACTIVE); 766 767 /* 768 * Start scanning the inactive queue for pages we can move to the 769 * cache or free. The scan will stop when the target is reached or 770 * we have scanned the entire inactive queue. Note that m->act_count 771 * is not used to form decisions for the inactive queue, only for the 772 * active queue. 773 * 774 * maxlaunder limits the number of dirty pages we flush per scan. 775 * For most systems a smaller value (16 or 32) is more robust under 776 * extreme memory and disk pressure because any unnecessary writes 777 * to disk can result in extreme performance degredation. However, 778 * systems with excessive dirty pages (especially when MAP_NOSYNC is 779 * used) will die horribly with limited laundering. If the pageout 780 * daemon cannot clean enough pages in the first pass, we let it go 781 * all out in succeeding passes. 782 */ 783 if ((maxlaunder = vm_max_launder) <= 1) 784 maxlaunder = 1; 785 if (pass) 786 maxlaunder = 10000; 787 vm_page_lock_queues(); 788rescan0: 789 addl_page_shortage = addl_page_shortage_init; 790 maxscan = cnt.v_inactive_count; 791 792 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 793 m != NULL && maxscan-- > 0 && page_shortage > 0; 794 m = next) { 795 796 cnt.v_pdpages++; 797 798 if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) { 799 goto rescan0; 800 } 801 802 next = TAILQ_NEXT(m, pageq); 803 804 /* 805 * skip marker pages 806 */ 807 if (m->flags & PG_MARKER) 808 continue; 809 810 /* 811 * Lock the page. 812 */ 813 if (!vm_pageout_page_lock(m, &next)) { 814 vm_page_unlock(m); 815 addl_page_shortage++; 816 continue; 817 } 818 819 /* 820 * A held page may be undergoing I/O, so skip it. 821 */ 822 if (m->hold_count || (object = m->object) == NULL) { 823 vm_page_unlock(m); 824 vm_page_requeue(m); 825 addl_page_shortage++; 826 continue; 827 } 828 829 /* 830 * Don't mess with busy pages, keep in the front of the 831 * queue, most likely are being paged out. 832 */ 833 if (!VM_OBJECT_TRYLOCK(object) && 834 (!vm_pageout_fallback_object_lock(m, &next) || 835 m->hold_count != 0)) { 836 VM_OBJECT_UNLOCK(object); 837 vm_page_unlock(m); 838 addl_page_shortage++; 839 continue; 840 } 841 if (m->busy || (m->oflags & VPO_BUSY)) { 842 vm_page_unlock(m); 843 VM_OBJECT_UNLOCK(object); 844 addl_page_shortage++; 845 continue; 846 } 847 848 /* 849 * If the object is not being used, we ignore previous 850 * references. 851 */ 852 if (object->ref_count == 0) { 853 vm_page_flag_clear(m, PG_REFERENCED); 854 KASSERT(!pmap_page_is_mapped(m), 855 ("vm_pageout_scan: page %p is mapped", m)); 856 857 /* 858 * Otherwise, if the page has been referenced while in the 859 * inactive queue, we bump the "activation count" upwards, 860 * making it less likely that the page will be added back to 861 * the inactive queue prematurely again. Here we check the 862 * page tables (or emulated bits, if any), given the upper 863 * level VM system not knowing anything about existing 864 * references. 865 */ 866 } else if (((m->flags & PG_REFERENCED) == 0) && 867 (actcount = pmap_ts_referenced(m))) { 868 vm_page_activate(m); 869 VM_OBJECT_UNLOCK(object); 870 m->act_count += (actcount + ACT_ADVANCE); 871 vm_page_unlock(m); 872 continue; 873 } 874 875 /* 876 * If the upper level VM system knows about any page 877 * references, we activate the page. We also set the 878 * "activation count" higher than normal so that we will less 879 * likely place pages back onto the inactive queue again. 880 */ 881 if ((m->flags & PG_REFERENCED) != 0) { 882 vm_page_flag_clear(m, PG_REFERENCED); 883 actcount = pmap_ts_referenced(m); 884 vm_page_activate(m); 885 VM_OBJECT_UNLOCK(object); 886 m->act_count += (actcount + ACT_ADVANCE + 1); 887 vm_page_unlock(m); 888 continue; 889 } 890 891 /* 892 * If the upper level VM system does not believe that the page 893 * is fully dirty, but it is mapped for write access, then we 894 * consult the pmap to see if the page's dirty status should 895 * be updated. 896 */ 897 if (m->dirty != VM_PAGE_BITS_ALL && 898 (m->flags & PG_WRITEABLE) != 0) { 899 /* 900 * Avoid a race condition: Unless write access is 901 * removed from the page, another processor could 902 * modify it before all access is removed by the call 903 * to vm_page_cache() below. If vm_page_cache() finds 904 * that the page has been modified when it removes all 905 * access, it panics because it cannot cache dirty 906 * pages. In principle, we could eliminate just write 907 * access here rather than all access. In the expected 908 * case, when there are no last instant modifications 909 * to the page, removing all access will be cheaper 910 * overall. 911 */ 912 if (pmap_is_modified(m)) 913 vm_page_dirty(m); 914 else if (m->dirty == 0) 915 pmap_remove_all(m); 916 } 917 918 if (m->valid == 0) { 919 /* 920 * Invalid pages can be easily freed 921 */ 922 vm_page_free(m); 923 cnt.v_dfree++; 924 --page_shortage; 925 } else if (m->dirty == 0) { 926 /* 927 * Clean pages can be placed onto the cache queue. 928 * This effectively frees them. 929 */ 930 vm_page_cache(m); 931 --page_shortage; 932 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 933 /* 934 * Dirty pages need to be paged out, but flushing 935 * a page is extremely expensive verses freeing 936 * a clean page. Rather then artificially limiting 937 * the number of pages we can flush, we instead give 938 * dirty pages extra priority on the inactive queue 939 * by forcing them to be cycled through the queue 940 * twice before being flushed, after which the 941 * (now clean) page will cycle through once more 942 * before being freed. This significantly extends 943 * the thrash point for a heavily loaded machine. 944 */ 945 vm_page_flag_set(m, PG_WINATCFLS); 946 vm_page_requeue(m); 947 } else if (maxlaunder > 0) { 948 /* 949 * We always want to try to flush some dirty pages if 950 * we encounter them, to keep the system stable. 951 * Normally this number is small, but under extreme 952 * pressure where there are insufficient clean pages 953 * on the inactive queue, we may have to go all out. 954 */ 955 int swap_pageouts_ok, vfslocked = 0; 956 struct vnode *vp = NULL; 957 struct mount *mp = NULL; 958 959 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 960 swap_pageouts_ok = 1; 961 } else { 962 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 963 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 964 vm_page_count_min()); 965 966 } 967 968 /* 969 * We don't bother paging objects that are "dead". 970 * Those objects are in a "rundown" state. 971 */ 972 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 973 vm_page_unlock(m); 974 VM_OBJECT_UNLOCK(object); 975 vm_page_requeue(m); 976 continue; 977 } 978 979 /* 980 * Following operations may unlock 981 * vm_page_queue_mtx, invalidating the 'next' 982 * pointer. To prevent an inordinate number 983 * of restarts we use our marker to remember 984 * our place. 985 * 986 */ 987 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, 988 m, &marker, pageq); 989 /* 990 * The object is already known NOT to be dead. It 991 * is possible for the vget() to block the whole 992 * pageout daemon, but the new low-memory handling 993 * code should prevent it. 994 * 995 * The previous code skipped locked vnodes and, worse, 996 * reordered pages in the queue. This results in 997 * completely non-deterministic operation and, on a 998 * busy system, can lead to extremely non-optimal 999 * pageouts. For example, it can cause clean pages 1000 * to be freed and dirty pages to be moved to the end 1001 * of the queue. Since dirty pages are also moved to 1002 * the end of the queue once-cleaned, this gives 1003 * way too large a weighting to defering the freeing 1004 * of dirty pages. 1005 * 1006 * We can't wait forever for the vnode lock, we might 1007 * deadlock due to a vn_read() getting stuck in 1008 * vm_wait while holding this vnode. We skip the 1009 * vnode if we can't get it in a reasonable amount 1010 * of time. 1011 */ 1012 if (object->type == OBJT_VNODE) { 1013 vm_page_unlock_queues(); 1014 vm_page_unlock(m); 1015 vp = object->handle; 1016 if (vp->v_type == VREG && 1017 vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1018 mp = NULL; 1019 ++pageout_lock_miss; 1020 if (object->flags & OBJ_MIGHTBEDIRTY) 1021 vnodes_skipped++; 1022 vm_page_lock_queues(); 1023 goto unlock_and_continue; 1024 } 1025 KASSERT(mp != NULL, 1026 ("vp %p with NULL v_mount", vp)); 1027 vm_object_reference_locked(object); 1028 VM_OBJECT_UNLOCK(object); 1029 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1030 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, 1031 curthread)) { 1032 VM_OBJECT_LOCK(object); 1033 vm_page_lock_queues(); 1034 ++pageout_lock_miss; 1035 if (object->flags & OBJ_MIGHTBEDIRTY) 1036 vnodes_skipped++; 1037 vp = NULL; 1038 goto unlock_and_continue; 1039 } 1040 VM_OBJECT_LOCK(object); 1041 vm_page_lock(m); 1042 vm_page_lock_queues(); 1043 /* 1044 * The page might have been moved to another 1045 * queue during potential blocking in vget() 1046 * above. The page might have been freed and 1047 * reused for another vnode. 1048 */ 1049 if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE || 1050 m->object != object || 1051 TAILQ_NEXT(m, pageq) != &marker) { 1052 vm_page_unlock(m); 1053 if (object->flags & OBJ_MIGHTBEDIRTY) 1054 vnodes_skipped++; 1055 goto unlock_and_continue; 1056 } 1057 1058 /* 1059 * The page may have been busied during the 1060 * blocking in vget(). We don't move the 1061 * page back onto the end of the queue so that 1062 * statistics are more correct if we don't. 1063 */ 1064 if (m->busy || (m->oflags & VPO_BUSY)) { 1065 vm_page_unlock(m); 1066 goto unlock_and_continue; 1067 } 1068 1069 /* 1070 * If the page has become held it might 1071 * be undergoing I/O, so skip it 1072 */ 1073 if (m->hold_count) { 1074 vm_page_unlock(m); 1075 vm_page_requeue(m); 1076 if (object->flags & OBJ_MIGHTBEDIRTY) 1077 vnodes_skipped++; 1078 goto unlock_and_continue; 1079 } 1080 } 1081 vm_page_unlock(m); 1082 1083 /* 1084 * If a page is dirty, then it is either being washed 1085 * (but not yet cleaned) or it is still in the 1086 * laundry. If it is still in the laundry, then we 1087 * start the cleaning operation. 1088 * 1089 * decrement page_shortage on success to account for 1090 * the (future) cleaned page. Otherwise we could wind 1091 * up laundering or cleaning too many pages. 1092 */ 1093 vm_page_unlock_queues(); 1094 if (vm_pageout_clean(m) != 0) { 1095 --page_shortage; 1096 --maxlaunder; 1097 } 1098 vm_page_lock_queues(); 1099unlock_and_continue: 1100 vm_page_lock_assert(m, MA_NOTOWNED); 1101 VM_OBJECT_UNLOCK(object); 1102 if (mp != NULL) { 1103 vm_page_unlock_queues(); 1104 if (vp != NULL) 1105 vput(vp); 1106 VFS_UNLOCK_GIANT(vfslocked); 1107 vm_object_deallocate(object); 1108 vn_finished_write(mp); 1109 vm_page_lock_queues(); 1110 } 1111 next = TAILQ_NEXT(&marker, pageq); 1112 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, 1113 &marker, pageq); 1114 vm_page_lock_assert(m, MA_NOTOWNED); 1115 continue; 1116 } 1117 vm_page_unlock(m); 1118 VM_OBJECT_UNLOCK(object); 1119 } 1120 1121 /* 1122 * Compute the number of pages we want to try to move from the 1123 * active queue to the inactive queue. 1124 */ 1125 page_shortage = vm_paging_target() + 1126 cnt.v_inactive_target - cnt.v_inactive_count; 1127 page_shortage += addl_page_shortage; 1128 1129 /* 1130 * Scan the active queue for things we can deactivate. We nominally 1131 * track the per-page activity counter and use it to locate 1132 * deactivation candidates. 1133 */ 1134 pcount = cnt.v_active_count; 1135 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1136 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1137 1138 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1139 1140 KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE), 1141 ("vm_pageout_scan: page %p isn't active", m)); 1142 1143 next = TAILQ_NEXT(m, pageq); 1144 object = m->object; 1145 if ((m->flags & PG_MARKER) != 0) { 1146 m = next; 1147 continue; 1148 } 1149 if (!vm_pageout_page_lock(m, &next) || 1150 (object = m->object) == NULL) { 1151 vm_page_unlock(m); 1152 m = next; 1153 continue; 1154 } 1155 if (!VM_OBJECT_TRYLOCK(object) && 1156 !vm_pageout_fallback_object_lock(m, &next)) { 1157 VM_OBJECT_UNLOCK(object); 1158 vm_page_unlock(m); 1159 m = next; 1160 continue; 1161 } 1162 1163 /* 1164 * Don't deactivate pages that are busy. 1165 */ 1166 if ((m->busy != 0) || 1167 (m->oflags & VPO_BUSY) || 1168 (m->hold_count != 0)) { 1169 vm_page_unlock(m); 1170 VM_OBJECT_UNLOCK(object); 1171 vm_page_requeue(m); 1172 m = next; 1173 continue; 1174 } 1175 1176 /* 1177 * The count for pagedaemon pages is done after checking the 1178 * page for eligibility... 1179 */ 1180 cnt.v_pdpages++; 1181 1182 /* 1183 * Check to see "how much" the page has been used. 1184 */ 1185 actcount = 0; 1186 if (object->ref_count != 0) { 1187 if (m->flags & PG_REFERENCED) { 1188 actcount += 1; 1189 } 1190 actcount += pmap_ts_referenced(m); 1191 if (actcount) { 1192 m->act_count += ACT_ADVANCE + actcount; 1193 if (m->act_count > ACT_MAX) 1194 m->act_count = ACT_MAX; 1195 } 1196 } 1197 1198 /* 1199 * Since we have "tested" this bit, we need to clear it now. 1200 */ 1201 vm_page_flag_clear(m, PG_REFERENCED); 1202 1203 /* 1204 * Only if an object is currently being used, do we use the 1205 * page activation count stats. 1206 */ 1207 if (actcount && (object->ref_count != 0)) { 1208 vm_page_requeue(m); 1209 } else { 1210 m->act_count -= min(m->act_count, ACT_DECLINE); 1211 if (vm_pageout_algorithm || 1212 object->ref_count == 0 || 1213 m->act_count == 0) { 1214 page_shortage--; 1215 if (object->ref_count == 0) { 1216 KASSERT(!pmap_page_is_mapped(m), 1217 ("vm_pageout_scan: page %p is mapped", m)); 1218 if (m->dirty == 0) 1219 vm_page_cache(m); 1220 else 1221 vm_page_deactivate(m); 1222 } else { 1223 vm_page_deactivate(m); 1224 } 1225 } else { 1226 vm_page_requeue(m); 1227 } 1228 } 1229 vm_page_unlock(m); 1230 VM_OBJECT_UNLOCK(object); 1231 m = next; 1232 } 1233 vm_page_unlock_queues(); 1234#if !defined(NO_SWAPPING) 1235 /* 1236 * Idle process swapout -- run once per second. 1237 */ 1238 if (vm_swap_idle_enabled) { 1239 static long lsec; 1240 if (time_second != lsec) { 1241 vm_req_vmdaemon(VM_SWAP_IDLE); 1242 lsec = time_second; 1243 } 1244 } 1245#endif 1246 1247 /* 1248 * If we didn't get enough free pages, and we have skipped a vnode 1249 * in a writeable object, wakeup the sync daemon. And kick swapout 1250 * if we did not get enough free pages. 1251 */ 1252 if (vm_paging_target() > 0) { 1253 if (vnodes_skipped && vm_page_count_min()) 1254 (void) speedup_syncer(); 1255#if !defined(NO_SWAPPING) 1256 if (vm_swap_enabled && vm_page_count_target()) 1257 vm_req_vmdaemon(VM_SWAP_NORMAL); 1258#endif 1259 } 1260 1261 /* 1262 * If we are critically low on one of RAM or swap and low on 1263 * the other, kill the largest process. However, we avoid 1264 * doing this on the first pass in order to give ourselves a 1265 * chance to flush out dirty vnode-backed pages and to allow 1266 * active pages to be moved to the inactive queue and reclaimed. 1267 */ 1268 if (pass != 0 && 1269 ((swap_pager_avail < 64 && vm_page_count_min()) || 1270 (swap_pager_full && vm_paging_target() > 0))) 1271 vm_pageout_oom(VM_OOM_MEM); 1272} 1273 1274 1275void 1276vm_pageout_oom(int shortage) 1277{ 1278 struct proc *p, *bigproc; 1279 vm_offset_t size, bigsize; 1280 struct thread *td; 1281 struct vmspace *vm; 1282 1283 /* 1284 * We keep the process bigproc locked once we find it to keep anyone 1285 * from messing with it; however, there is a possibility of 1286 * deadlock if process B is bigproc and one of it's child processes 1287 * attempts to propagate a signal to B while we are waiting for A's 1288 * lock while walking this list. To avoid this, we don't block on 1289 * the process lock but just skip a process if it is already locked. 1290 */ 1291 bigproc = NULL; 1292 bigsize = 0; 1293 sx_slock(&allproc_lock); 1294 FOREACH_PROC_IN_SYSTEM(p) { 1295 int breakout; 1296 1297 if (PROC_TRYLOCK(p) == 0) 1298 continue; 1299 /* 1300 * If this is a system, protected or killed process, skip it. 1301 */ 1302 if ((p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) || 1303 (p->p_pid == 1) || P_KILLED(p) || 1304 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1305 PROC_UNLOCK(p); 1306 continue; 1307 } 1308 /* 1309 * If the process is in a non-running type state, 1310 * don't touch it. Check all the threads individually. 1311 */ 1312 breakout = 0; 1313 FOREACH_THREAD_IN_PROC(p, td) { 1314 thread_lock(td); 1315 if (!TD_ON_RUNQ(td) && 1316 !TD_IS_RUNNING(td) && 1317 !TD_IS_SLEEPING(td)) { 1318 thread_unlock(td); 1319 breakout = 1; 1320 break; 1321 } 1322 thread_unlock(td); 1323 } 1324 if (breakout) { 1325 PROC_UNLOCK(p); 1326 continue; 1327 } 1328 /* 1329 * get the process size 1330 */ 1331 vm = vmspace_acquire_ref(p); 1332 if (vm == NULL) { 1333 PROC_UNLOCK(p); 1334 continue; 1335 } 1336 if (!vm_map_trylock_read(&vm->vm_map)) { 1337 vmspace_free(vm); 1338 PROC_UNLOCK(p); 1339 continue; 1340 } 1341 size = vmspace_swap_count(vm); 1342 vm_map_unlock_read(&vm->vm_map); 1343 if (shortage == VM_OOM_MEM) 1344 size += vmspace_resident_count(vm); 1345 vmspace_free(vm); 1346 /* 1347 * if the this process is bigger than the biggest one 1348 * remember it. 1349 */ 1350 if (size > bigsize) { 1351 if (bigproc != NULL) 1352 PROC_UNLOCK(bigproc); 1353 bigproc = p; 1354 bigsize = size; 1355 } else 1356 PROC_UNLOCK(p); 1357 } 1358 sx_sunlock(&allproc_lock); 1359 if (bigproc != NULL) { 1360 killproc(bigproc, "out of swap space"); 1361 sched_nice(bigproc, PRIO_MIN); 1362 PROC_UNLOCK(bigproc); 1363 wakeup(&cnt.v_free_count); 1364 } 1365} 1366 1367/* 1368 * This routine tries to maintain the pseudo LRU active queue, 1369 * so that during long periods of time where there is no paging, 1370 * that some statistic accumulation still occurs. This code 1371 * helps the situation where paging just starts to occur. 1372 */ 1373static void 1374vm_pageout_page_stats() 1375{ 1376 vm_object_t object; 1377 vm_page_t m,next; 1378 int pcount,tpcount; /* Number of pages to check */ 1379 static int fullintervalcount = 0; 1380 int page_shortage; 1381 1382 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1383 page_shortage = 1384 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1385 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1386 1387 if (page_shortage <= 0) 1388 return; 1389 1390 pcount = cnt.v_active_count; 1391 fullintervalcount += vm_pageout_stats_interval; 1392 if (fullintervalcount < vm_pageout_full_stats_interval) { 1393 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / 1394 cnt.v_page_count; 1395 if (pcount > tpcount) 1396 pcount = tpcount; 1397 } else { 1398 fullintervalcount = 0; 1399 } 1400 1401 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1402 while ((m != NULL) && (pcount-- > 0)) { 1403 int actcount; 1404 1405 KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE), 1406 ("vm_pageout_page_stats: page %p isn't active", m)); 1407 1408 next = TAILQ_NEXT(m, pageq); 1409 if ((m->flags & PG_MARKER) != 0) { 1410 m = next; 1411 continue; 1412 } 1413 vm_page_lock_assert(m, MA_NOTOWNED); 1414 if (!vm_pageout_page_lock(m, &next) || 1415 (object = m->object) == NULL) { 1416 vm_page_unlock(m); 1417 m = next; 1418 continue; 1419 } 1420 if (!VM_OBJECT_TRYLOCK(object) && 1421 !vm_pageout_fallback_object_lock(m, &next)) { 1422 VM_OBJECT_UNLOCK(object); 1423 vm_page_unlock(m); 1424 m = next; 1425 continue; 1426 } 1427 1428 /* 1429 * Don't deactivate pages that are busy. 1430 */ 1431 if ((m->busy != 0) || 1432 (m->oflags & VPO_BUSY) || 1433 (m->hold_count != 0)) { 1434 vm_page_unlock(m); 1435 VM_OBJECT_UNLOCK(object); 1436 vm_page_requeue(m); 1437 m = next; 1438 continue; 1439 } 1440 1441 actcount = 0; 1442 if (m->flags & PG_REFERENCED) { 1443 vm_page_flag_clear(m, PG_REFERENCED); 1444 actcount += 1; 1445 } 1446 1447 actcount += pmap_ts_referenced(m); 1448 if (actcount) { 1449 m->act_count += ACT_ADVANCE + actcount; 1450 if (m->act_count > ACT_MAX) 1451 m->act_count = ACT_MAX; 1452 vm_page_requeue(m); 1453 } else { 1454 if (m->act_count == 0) { 1455 /* 1456 * We turn off page access, so that we have 1457 * more accurate RSS stats. We don't do this 1458 * in the normal page deactivation when the 1459 * system is loaded VM wise, because the 1460 * cost of the large number of page protect 1461 * operations would be higher than the value 1462 * of doing the operation. 1463 */ 1464 pmap_remove_all(m); 1465 vm_page_deactivate(m); 1466 } else { 1467 m->act_count -= min(m->act_count, ACT_DECLINE); 1468 vm_page_requeue(m); 1469 } 1470 } 1471 vm_page_unlock(m); 1472 VM_OBJECT_UNLOCK(object); 1473 m = next; 1474 } 1475} 1476 1477/* 1478 * vm_pageout is the high level pageout daemon. 1479 */ 1480static void 1481vm_pageout() 1482{ 1483 int error, pass; 1484 1485 /* 1486 * Initialize some paging parameters. 1487 */ 1488 cnt.v_interrupt_free_min = 2; 1489 if (cnt.v_page_count < 2000) 1490 vm_pageout_page_count = 8; 1491 1492 /* 1493 * v_free_reserved needs to include enough for the largest 1494 * swap pager structures plus enough for any pv_entry structs 1495 * when paging. 1496 */ 1497 if (cnt.v_page_count > 1024) 1498 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1499 else 1500 cnt.v_free_min = 4; 1501 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1502 cnt.v_interrupt_free_min; 1503 cnt.v_free_reserved = vm_pageout_page_count + 1504 cnt.v_pageout_free_min + (cnt.v_page_count / 768); 1505 cnt.v_free_severe = cnt.v_free_min / 2; 1506 cnt.v_free_min += cnt.v_free_reserved; 1507 cnt.v_free_severe += cnt.v_free_reserved; 1508 1509 /* 1510 * v_free_target and v_cache_min control pageout hysteresis. Note 1511 * that these are more a measure of the VM cache queue hysteresis 1512 * then the VM free queue. Specifically, v_free_target is the 1513 * high water mark (free+cache pages). 1514 * 1515 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1516 * low water mark, while v_free_min is the stop. v_cache_min must 1517 * be big enough to handle memory needs while the pageout daemon 1518 * is signalled and run to free more pages. 1519 */ 1520 if (cnt.v_free_count > 6144) 1521 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1522 else 1523 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1524 1525 if (cnt.v_free_count > 2048) { 1526 cnt.v_cache_min = cnt.v_free_target; 1527 cnt.v_cache_max = 2 * cnt.v_cache_min; 1528 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1529 } else { 1530 cnt.v_cache_min = 0; 1531 cnt.v_cache_max = 0; 1532 cnt.v_inactive_target = cnt.v_free_count / 4; 1533 } 1534 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1535 cnt.v_inactive_target = cnt.v_free_count / 3; 1536 1537 /* XXX does not really belong here */ 1538 if (vm_page_max_wired == 0) 1539 vm_page_max_wired = cnt.v_free_count / 3; 1540 1541 if (vm_pageout_stats_max == 0) 1542 vm_pageout_stats_max = cnt.v_free_target; 1543 1544 /* 1545 * Set interval in seconds for stats scan. 1546 */ 1547 if (vm_pageout_stats_interval == 0) 1548 vm_pageout_stats_interval = 5; 1549 if (vm_pageout_full_stats_interval == 0) 1550 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1551 1552 swap_pager_swap_init(); 1553 pass = 0; 1554 /* 1555 * The pageout daemon is never done, so loop forever. 1556 */ 1557 while (TRUE) { 1558 /* 1559 * If we have enough free memory, wakeup waiters. Do 1560 * not clear vm_pages_needed until we reach our target, 1561 * otherwise we may be woken up over and over again and 1562 * waste a lot of cpu. 1563 */ 1564 mtx_lock(&vm_page_queue_free_mtx); 1565 if (vm_pages_needed && !vm_page_count_min()) { 1566 if (!vm_paging_needed()) 1567 vm_pages_needed = 0; 1568 wakeup(&cnt.v_free_count); 1569 } 1570 if (vm_pages_needed) { 1571 /* 1572 * Still not done, take a second pass without waiting 1573 * (unlimited dirty cleaning), otherwise sleep a bit 1574 * and try again. 1575 */ 1576 ++pass; 1577 if (pass > 1) 1578 msleep(&vm_pages_needed, 1579 &vm_page_queue_free_mtx, PVM, "psleep", 1580 hz / 2); 1581 } else { 1582 /* 1583 * Good enough, sleep & handle stats. Prime the pass 1584 * for the next run. 1585 */ 1586 if (pass > 1) 1587 pass = 1; 1588 else 1589 pass = 0; 1590 error = msleep(&vm_pages_needed, 1591 &vm_page_queue_free_mtx, PVM, "psleep", 1592 vm_pageout_stats_interval * hz); 1593 if (error && !vm_pages_needed) { 1594 mtx_unlock(&vm_page_queue_free_mtx); 1595 pass = 0; 1596 vm_page_lock_queues(); 1597 vm_pageout_page_stats(); 1598 vm_page_unlock_queues(); 1599 continue; 1600 } 1601 } 1602 if (vm_pages_needed) 1603 cnt.v_pdwakeups++; 1604 mtx_unlock(&vm_page_queue_free_mtx); 1605 vm_pageout_scan(pass); 1606 } 1607} 1608 1609/* 1610 * Unless the free page queue lock is held by the caller, this function 1611 * should be regarded as advisory. Specifically, the caller should 1612 * not msleep() on &cnt.v_free_count following this function unless 1613 * the free page queue lock is held until the msleep() is performed. 1614 */ 1615void 1616pagedaemon_wakeup() 1617{ 1618 1619 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1620 vm_pages_needed = 1; 1621 wakeup(&vm_pages_needed); 1622 } 1623} 1624 1625#if !defined(NO_SWAPPING) 1626static void 1627vm_req_vmdaemon(int req) 1628{ 1629 static int lastrun = 0; 1630 1631 mtx_lock(&vm_daemon_mtx); 1632 vm_pageout_req_swapout |= req; 1633 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1634 wakeup(&vm_daemon_needed); 1635 lastrun = ticks; 1636 } 1637 mtx_unlock(&vm_daemon_mtx); 1638} 1639 1640static void 1641vm_daemon() 1642{ 1643 struct rlimit rsslim; 1644 struct proc *p; 1645 struct thread *td; 1646 struct vmspace *vm; 1647 int breakout, swapout_flags; 1648 1649 while (TRUE) { 1650 mtx_lock(&vm_daemon_mtx); 1651 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0); 1652 swapout_flags = vm_pageout_req_swapout; 1653 vm_pageout_req_swapout = 0; 1654 mtx_unlock(&vm_daemon_mtx); 1655 if (swapout_flags) 1656 swapout_procs(swapout_flags); 1657 1658 /* 1659 * scan the processes for exceeding their rlimits or if 1660 * process is swapped out -- deactivate pages 1661 */ 1662 sx_slock(&allproc_lock); 1663 FOREACH_PROC_IN_SYSTEM(p) { 1664 vm_pindex_t limit, size; 1665 1666 /* 1667 * if this is a system process or if we have already 1668 * looked at this process, skip it. 1669 */ 1670 PROC_LOCK(p); 1671 if (p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) { 1672 PROC_UNLOCK(p); 1673 continue; 1674 } 1675 /* 1676 * if the process is in a non-running type state, 1677 * don't touch it. 1678 */ 1679 breakout = 0; 1680 FOREACH_THREAD_IN_PROC(p, td) { 1681 thread_lock(td); 1682 if (!TD_ON_RUNQ(td) && 1683 !TD_IS_RUNNING(td) && 1684 !TD_IS_SLEEPING(td)) { 1685 thread_unlock(td); 1686 breakout = 1; 1687 break; 1688 } 1689 thread_unlock(td); 1690 } 1691 if (breakout) { 1692 PROC_UNLOCK(p); 1693 continue; 1694 } 1695 /* 1696 * get a limit 1697 */ 1698 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1699 limit = OFF_TO_IDX( 1700 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1701 1702 /* 1703 * let processes that are swapped out really be 1704 * swapped out set the limit to nothing (will force a 1705 * swap-out.) 1706 */ 1707 if ((p->p_flag & P_INMEM) == 0) 1708 limit = 0; /* XXX */ 1709 vm = vmspace_acquire_ref(p); 1710 PROC_UNLOCK(p); 1711 if (vm == NULL) 1712 continue; 1713 1714 size = vmspace_resident_count(vm); 1715 if (limit >= 0 && size >= limit) { 1716 vm_pageout_map_deactivate_pages( 1717 &vm->vm_map, limit); 1718 } 1719 vmspace_free(vm); 1720 } 1721 sx_sunlock(&allproc_lock); 1722 } 1723} 1724#endif /* !defined(NO_SWAPPING) */ 1725