vm_pageout.c revision 132040
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * The proverbial page-out daemon. 71 */ 72 73#include <sys/cdefs.h> 74__FBSDID("$FreeBSD: head/sys/vm/vm_pageout.c 132040 2004-07-12 17:45:37Z alc $"); 75 76#include "opt_vm.h" 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/kernel.h> 80#include <sys/eventhandler.h> 81#include <sys/lock.h> 82#include <sys/mutex.h> 83#include <sys/proc.h> 84#include <sys/kthread.h> 85#include <sys/ktr.h> 86#include <sys/resourcevar.h> 87#include <sys/sched.h> 88#include <sys/signalvar.h> 89#include <sys/vnode.h> 90#include <sys/vmmeter.h> 91#include <sys/sx.h> 92#include <sys/sysctl.h> 93 94#include <vm/vm.h> 95#include <vm/vm_param.h> 96#include <vm/vm_object.h> 97#include <vm/vm_page.h> 98#include <vm/vm_map.h> 99#include <vm/vm_pageout.h> 100#include <vm/vm_pager.h> 101#include <vm/swap_pager.h> 102#include <vm/vm_extern.h> 103#include <vm/uma.h> 104 105#include <machine/mutex.h> 106 107/* 108 * System initialization 109 */ 110 111/* the kernel process "vm_pageout"*/ 112static void vm_pageout(void); 113static int vm_pageout_clean(vm_page_t); 114static void vm_pageout_pmap_collect(void); 115static void vm_pageout_scan(int pass); 116 117struct proc *pageproc; 118 119static struct kproc_desc page_kp = { 120 "pagedaemon", 121 vm_pageout, 122 &pageproc 123}; 124SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 125 126#if !defined(NO_SWAPPING) 127/* the kernel process "vm_daemon"*/ 128static void vm_daemon(void); 129static struct proc *vmproc; 130 131static struct kproc_desc vm_kp = { 132 "vmdaemon", 133 vm_daemon, 134 &vmproc 135}; 136SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 137#endif 138 139 140int vm_pages_needed; /* Event on which pageout daemon sleeps */ 141int vm_pageout_deficit; /* Estimated number of pages deficit */ 142int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ 143 144#if !defined(NO_SWAPPING) 145static int vm_pageout_req_swapout; /* XXX */ 146static int vm_daemon_needed; 147#endif 148static int vm_max_launder = 32; 149static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 150static int vm_pageout_full_stats_interval = 0; 151static int vm_pageout_algorithm=0; 152static int defer_swap_pageouts=0; 153static int disable_swap_pageouts=0; 154 155#if defined(NO_SWAPPING) 156static int vm_swap_enabled=0; 157static int vm_swap_idle_enabled=0; 158#else 159static int vm_swap_enabled=1; 160static int vm_swap_idle_enabled=0; 161#endif 162 163SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 164 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 165 166SYSCTL_INT(_vm, OID_AUTO, max_launder, 167 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 170 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 171 172SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 173 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 174 175SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 176 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 177 178#if defined(NO_SWAPPING) 179SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 180 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 181SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 183#else 184SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 186SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188#endif 189 190SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 192 193SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 195 196static int pageout_lock_miss; 197SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 198 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 199 200#define VM_PAGEOUT_PAGE_COUNT 16 201int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 202 203int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 204 205#if !defined(NO_SWAPPING) 206static void vm_pageout_map_deactivate_pages(vm_map_t, long); 207static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long); 208static void vm_req_vmdaemon(void); 209#endif 210static void vm_pageout_page_stats(void); 211 212/* 213 * vm_pageout_clean: 214 * 215 * Clean the page and remove it from the laundry. 216 * 217 * We set the busy bit to cause potential page faults on this page to 218 * block. Note the careful timing, however, the busy bit isn't set till 219 * late and we cannot do anything that will mess with the page. 220 */ 221static int 222vm_pageout_clean(m) 223 vm_page_t m; 224{ 225 vm_object_t object; 226 vm_page_t mc[2*vm_pageout_page_count]; 227 int pageout_count; 228 int ib, is, page_base; 229 vm_pindex_t pindex = m->pindex; 230 231 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 232 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 233 234 /* 235 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 236 * with the new swapper, but we could have serious problems paging 237 * out other object types if there is insufficient memory. 238 * 239 * Unfortunately, checking free memory here is far too late, so the 240 * check has been moved up a procedural level. 241 */ 242 243 /* 244 * Don't mess with the page if it's busy, held, or special 245 */ 246 if ((m->hold_count != 0) || 247 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 248 return 0; 249 } 250 251 mc[vm_pageout_page_count] = m; 252 pageout_count = 1; 253 page_base = vm_pageout_page_count; 254 ib = 1; 255 is = 1; 256 257 /* 258 * Scan object for clusterable pages. 259 * 260 * We can cluster ONLY if: ->> the page is NOT 261 * clean, wired, busy, held, or mapped into a 262 * buffer, and one of the following: 263 * 1) The page is inactive, or a seldom used 264 * active page. 265 * -or- 266 * 2) we force the issue. 267 * 268 * During heavy mmap/modification loads the pageout 269 * daemon can really fragment the underlying file 270 * due to flushing pages out of order and not trying 271 * align the clusters (which leave sporatic out-of-order 272 * holes). To solve this problem we do the reverse scan 273 * first and attempt to align our cluster, then do a 274 * forward scan if room remains. 275 */ 276 object = m->object; 277more: 278 while (ib && pageout_count < vm_pageout_page_count) { 279 vm_page_t p; 280 281 if (ib > pindex) { 282 ib = 0; 283 break; 284 } 285 286 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 287 ib = 0; 288 break; 289 } 290 if (((p->queue - p->pc) == PQ_CACHE) || 291 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 292 ib = 0; 293 break; 294 } 295 vm_page_test_dirty(p); 296 if ((p->dirty & p->valid) == 0 || 297 p->queue != PQ_INACTIVE || 298 p->wire_count != 0 || /* may be held by buf cache */ 299 p->hold_count != 0) { /* may be undergoing I/O */ 300 ib = 0; 301 break; 302 } 303 mc[--page_base] = p; 304 ++pageout_count; 305 ++ib; 306 /* 307 * alignment boundry, stop here and switch directions. Do 308 * not clear ib. 309 */ 310 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 311 break; 312 } 313 314 while (pageout_count < vm_pageout_page_count && 315 pindex + is < object->size) { 316 vm_page_t p; 317 318 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 319 break; 320 if (((p->queue - p->pc) == PQ_CACHE) || 321 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 322 break; 323 } 324 vm_page_test_dirty(p); 325 if ((p->dirty & p->valid) == 0 || 326 p->queue != PQ_INACTIVE || 327 p->wire_count != 0 || /* may be held by buf cache */ 328 p->hold_count != 0) { /* may be undergoing I/O */ 329 break; 330 } 331 mc[page_base + pageout_count] = p; 332 ++pageout_count; 333 ++is; 334 } 335 336 /* 337 * If we exhausted our forward scan, continue with the reverse scan 338 * when possible, even past a page boundry. This catches boundry 339 * conditions. 340 */ 341 if (ib && pageout_count < vm_pageout_page_count) 342 goto more; 343 344 /* 345 * we allow reads during pageouts... 346 */ 347 return (vm_pageout_flush(&mc[page_base], pageout_count, 0)); 348} 349 350/* 351 * vm_pageout_flush() - launder the given pages 352 * 353 * The given pages are laundered. Note that we setup for the start of 354 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 355 * reference count all in here rather then in the parent. If we want 356 * the parent to do more sophisticated things we may have to change 357 * the ordering. 358 */ 359int 360vm_pageout_flush(vm_page_t *mc, int count, int flags) 361{ 362 vm_object_t object = mc[0]->object; 363 int pageout_status[count]; 364 int numpagedout = 0; 365 int i; 366 367 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 368 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 369 /* 370 * Initiate I/O. Bump the vm_page_t->busy counter and 371 * mark the pages read-only. 372 * 373 * We do not have to fixup the clean/dirty bits here... we can 374 * allow the pager to do it after the I/O completes. 375 * 376 * NOTE! mc[i]->dirty may be partial or fragmented due to an 377 * edge case with file fragments. 378 */ 379 for (i = 0; i < count; i++) { 380 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, 381 ("vm_pageout_flush: partially invalid page %p index %d/%d", 382 mc[i], i, count)); 383 vm_page_io_start(mc[i]); 384 pmap_page_protect(mc[i], VM_PROT_READ); 385 } 386 vm_page_unlock_queues(); 387 vm_object_pip_add(object, count); 388 389 vm_pager_put_pages(object, mc, count, 390 (flags | ((object == kernel_object) ? VM_PAGER_PUT_SYNC : 0)), 391 pageout_status); 392 393 vm_page_lock_queues(); 394 for (i = 0; i < count; i++) { 395 vm_page_t mt = mc[i]; 396 397 KASSERT((mt->flags & PG_WRITEABLE) == 0, 398 ("vm_pageout_flush: page %p is not write protected", mt)); 399 switch (pageout_status[i]) { 400 case VM_PAGER_OK: 401 case VM_PAGER_PEND: 402 numpagedout++; 403 break; 404 case VM_PAGER_BAD: 405 /* 406 * Page outside of range of object. Right now we 407 * essentially lose the changes by pretending it 408 * worked. 409 */ 410 pmap_clear_modify(mt); 411 vm_page_undirty(mt); 412 break; 413 case VM_PAGER_ERROR: 414 case VM_PAGER_FAIL: 415 /* 416 * If page couldn't be paged out, then reactivate the 417 * page so it doesn't clog the inactive list. (We 418 * will try paging out it again later). 419 */ 420 vm_page_activate(mt); 421 break; 422 case VM_PAGER_AGAIN: 423 break; 424 } 425 426 /* 427 * If the operation is still going, leave the page busy to 428 * block all other accesses. Also, leave the paging in 429 * progress indicator set so that we don't attempt an object 430 * collapse. 431 */ 432 if (pageout_status[i] != VM_PAGER_PEND) { 433 vm_object_pip_wakeup(object); 434 vm_page_io_finish(mt); 435 if (vm_page_count_severe()) 436 vm_page_try_to_cache(mt); 437 } 438 } 439 return numpagedout; 440} 441 442#if !defined(NO_SWAPPING) 443/* 444 * vm_pageout_object_deactivate_pages 445 * 446 * deactivate enough pages to satisfy the inactive target 447 * requirements or if vm_page_proc_limit is set, then 448 * deactivate all of the pages in the object and its 449 * backing_objects. 450 * 451 * The object and map must be locked. 452 */ 453static void 454vm_pageout_object_deactivate_pages(pmap, first_object, desired) 455 pmap_t pmap; 456 vm_object_t first_object; 457 long desired; 458{ 459 vm_object_t backing_object, object; 460 vm_page_t p, next; 461 int actcount, rcount, remove_mode; 462 463 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 464 if (first_object->type == OBJT_DEVICE || first_object->type == OBJT_PHYS) 465 return; 466 for (object = first_object;; object = backing_object) { 467 if (pmap_resident_count(pmap) <= desired) 468 goto unlock_return; 469 if (object->paging_in_progress) 470 goto unlock_return; 471 472 remove_mode = 0; 473 if (object->shadow_count > 1) 474 remove_mode = 1; 475 /* 476 * scan the objects entire memory queue 477 */ 478 rcount = object->resident_page_count; 479 p = TAILQ_FIRST(&object->memq); 480 vm_page_lock_queues(); 481 while (p && (rcount-- > 0)) { 482 if (pmap_resident_count(pmap) <= desired) { 483 vm_page_unlock_queues(); 484 goto unlock_return; 485 } 486 next = TAILQ_NEXT(p, listq); 487 cnt.v_pdpages++; 488 if (p->wire_count != 0 || 489 p->hold_count != 0 || 490 p->busy != 0 || 491 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 492 !pmap_page_exists_quick(pmap, p)) { 493 p = next; 494 continue; 495 } 496 actcount = pmap_ts_referenced(p); 497 if (actcount) { 498 vm_page_flag_set(p, PG_REFERENCED); 499 } else if (p->flags & PG_REFERENCED) { 500 actcount = 1; 501 } 502 if ((p->queue != PQ_ACTIVE) && 503 (p->flags & PG_REFERENCED)) { 504 vm_page_activate(p); 505 p->act_count += actcount; 506 vm_page_flag_clear(p, PG_REFERENCED); 507 } else if (p->queue == PQ_ACTIVE) { 508 if ((p->flags & PG_REFERENCED) == 0) { 509 p->act_count -= min(p->act_count, ACT_DECLINE); 510 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 511 pmap_remove_all(p); 512 vm_page_deactivate(p); 513 } else { 514 vm_pageq_requeue(p); 515 } 516 } else { 517 vm_page_activate(p); 518 vm_page_flag_clear(p, PG_REFERENCED); 519 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 520 p->act_count += ACT_ADVANCE; 521 vm_pageq_requeue(p); 522 } 523 } else if (p->queue == PQ_INACTIVE) { 524 pmap_remove_all(p); 525 } 526 p = next; 527 } 528 vm_page_unlock_queues(); 529 if ((backing_object = object->backing_object) == NULL) 530 goto unlock_return; 531 VM_OBJECT_LOCK(backing_object); 532 if (object != first_object) 533 VM_OBJECT_UNLOCK(object); 534 } 535unlock_return: 536 if (object != first_object) 537 VM_OBJECT_UNLOCK(object); 538} 539 540/* 541 * deactivate some number of pages in a map, try to do it fairly, but 542 * that is really hard to do. 543 */ 544static void 545vm_pageout_map_deactivate_pages(map, desired) 546 vm_map_t map; 547 long desired; 548{ 549 vm_map_entry_t tmpe; 550 vm_object_t obj, bigobj; 551 int nothingwired; 552 553 if (!vm_map_trylock(map)) 554 return; 555 556 bigobj = NULL; 557 nothingwired = TRUE; 558 559 /* 560 * first, search out the biggest object, and try to free pages from 561 * that. 562 */ 563 tmpe = map->header.next; 564 while (tmpe != &map->header) { 565 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 566 obj = tmpe->object.vm_object; 567 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) { 568 if (obj->shadow_count <= 1 && 569 (bigobj == NULL || 570 bigobj->resident_page_count < obj->resident_page_count)) { 571 if (bigobj != NULL) 572 VM_OBJECT_UNLOCK(bigobj); 573 bigobj = obj; 574 } else 575 VM_OBJECT_UNLOCK(obj); 576 } 577 } 578 if (tmpe->wired_count > 0) 579 nothingwired = FALSE; 580 tmpe = tmpe->next; 581 } 582 583 if (bigobj != NULL) { 584 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired); 585 VM_OBJECT_UNLOCK(bigobj); 586 } 587 /* 588 * Next, hunt around for other pages to deactivate. We actually 589 * do this search sort of wrong -- .text first is not the best idea. 590 */ 591 tmpe = map->header.next; 592 while (tmpe != &map->header) { 593 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 594 break; 595 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 596 obj = tmpe->object.vm_object; 597 if (obj != NULL) { 598 VM_OBJECT_LOCK(obj); 599 vm_pageout_object_deactivate_pages(map->pmap, obj, desired); 600 VM_OBJECT_UNLOCK(obj); 601 } 602 } 603 tmpe = tmpe->next; 604 } 605 606 /* 607 * Remove all mappings if a process is swapped out, this will free page 608 * table pages. 609 */ 610 if (desired == 0 && nothingwired) { 611 GIANT_REQUIRED; 612 vm_page_lock_queues(); 613 pmap_remove(vm_map_pmap(map), vm_map_min(map), 614 vm_map_max(map)); 615 vm_page_unlock_queues(); 616 } 617 vm_map_unlock(map); 618} 619#endif /* !defined(NO_SWAPPING) */ 620 621/* 622 * This routine is very drastic, but can save the system 623 * in a pinch. 624 */ 625static void 626vm_pageout_pmap_collect(void) 627{ 628 int i; 629 vm_page_t m; 630 static int warningdone; 631 632 if (pmap_pagedaemon_waken == 0) 633 return; 634 if (warningdone < 5) { 635 printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 636 warningdone++; 637 } 638 vm_page_lock_queues(); 639 for (i = 0; i < vm_page_array_size; i++) { 640 m = &vm_page_array[i]; 641 if (m->wire_count || m->hold_count || m->busy || 642 (m->flags & (PG_BUSY | PG_UNMANAGED))) 643 continue; 644 pmap_remove_all(m); 645 } 646 vm_page_unlock_queues(); 647 pmap_pagedaemon_waken = 0; 648} 649 650/* 651 * vm_pageout_scan does the dirty work for the pageout daemon. 652 */ 653static void 654vm_pageout_scan(int pass) 655{ 656 vm_page_t m, next; 657 struct vm_page marker; 658 int page_shortage, maxscan, pcount; 659 int addl_page_shortage, addl_page_shortage_init; 660 struct proc *p, *bigproc; 661 struct thread *td; 662 vm_offset_t size, bigsize; 663 vm_object_t object; 664 int actcount; 665 int vnodes_skipped = 0; 666 int maxlaunder; 667 668 mtx_lock(&Giant); 669 /* 670 * Decrease registered cache sizes. 671 */ 672 EVENTHANDLER_INVOKE(vm_lowmem, 0); 673 /* 674 * We do this explicitly after the caches have been drained above. 675 */ 676 uma_reclaim(); 677 /* 678 * Do whatever cleanup that the pmap code can. 679 */ 680 vm_pageout_pmap_collect(); 681 682 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit); 683 684 /* 685 * Calculate the number of pages we want to either free or move 686 * to the cache. 687 */ 688 page_shortage = vm_paging_target() + addl_page_shortage_init; 689 690 /* 691 * Initialize our marker 692 */ 693 bzero(&marker, sizeof(marker)); 694 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 695 marker.queue = PQ_INACTIVE; 696 marker.wire_count = 1; 697 698 /* 699 * Start scanning the inactive queue for pages we can move to the 700 * cache or free. The scan will stop when the target is reached or 701 * we have scanned the entire inactive queue. Note that m->act_count 702 * is not used to form decisions for the inactive queue, only for the 703 * active queue. 704 * 705 * maxlaunder limits the number of dirty pages we flush per scan. 706 * For most systems a smaller value (16 or 32) is more robust under 707 * extreme memory and disk pressure because any unnecessary writes 708 * to disk can result in extreme performance degredation. However, 709 * systems with excessive dirty pages (especially when MAP_NOSYNC is 710 * used) will die horribly with limited laundering. If the pageout 711 * daemon cannot clean enough pages in the first pass, we let it go 712 * all out in succeeding passes. 713 */ 714 if ((maxlaunder = vm_max_launder) <= 1) 715 maxlaunder = 1; 716 if (pass) 717 maxlaunder = 10000; 718 vm_page_lock_queues(); 719rescan0: 720 addl_page_shortage = addl_page_shortage_init; 721 maxscan = cnt.v_inactive_count; 722 723 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 724 m != NULL && maxscan-- > 0 && page_shortage > 0; 725 m = next) { 726 727 cnt.v_pdpages++; 728 729 if (m->queue != PQ_INACTIVE) { 730 goto rescan0; 731 } 732 733 next = TAILQ_NEXT(m, pageq); 734 735 /* 736 * skip marker pages 737 */ 738 if (m->flags & PG_MARKER) 739 continue; 740 741 /* 742 * A held page may be undergoing I/O, so skip it. 743 */ 744 if (m->hold_count) { 745 vm_pageq_requeue(m); 746 addl_page_shortage++; 747 continue; 748 } 749 /* 750 * Don't mess with busy pages, keep in the front of the 751 * queue, most likely are being paged out. 752 */ 753 if (m->busy || (m->flags & PG_BUSY)) { 754 addl_page_shortage++; 755 continue; 756 } 757 758 /* 759 * If the object is not being used, we ignore previous 760 * references. 761 */ 762 if (m->object->ref_count == 0) { 763 vm_page_flag_clear(m, PG_REFERENCED); 764 pmap_clear_reference(m); 765 766 /* 767 * Otherwise, if the page has been referenced while in the 768 * inactive queue, we bump the "activation count" upwards, 769 * making it less likely that the page will be added back to 770 * the inactive queue prematurely again. Here we check the 771 * page tables (or emulated bits, if any), given the upper 772 * level VM system not knowing anything about existing 773 * references. 774 */ 775 } else if (((m->flags & PG_REFERENCED) == 0) && 776 (actcount = pmap_ts_referenced(m))) { 777 vm_page_activate(m); 778 m->act_count += (actcount + ACT_ADVANCE); 779 continue; 780 } 781 782 /* 783 * If the upper level VM system knows about any page 784 * references, we activate the page. We also set the 785 * "activation count" higher than normal so that we will less 786 * likely place pages back onto the inactive queue again. 787 */ 788 if ((m->flags & PG_REFERENCED) != 0) { 789 vm_page_flag_clear(m, PG_REFERENCED); 790 actcount = pmap_ts_referenced(m); 791 vm_page_activate(m); 792 m->act_count += (actcount + ACT_ADVANCE + 1); 793 continue; 794 } 795 796 /* 797 * If the upper level VM system doesn't know anything about 798 * the page being dirty, we have to check for it again. As 799 * far as the VM code knows, any partially dirty pages are 800 * fully dirty. 801 */ 802 if (m->dirty == 0 && !pmap_is_modified(m)) { 803 /* 804 * Avoid a race condition: Unless write access is 805 * removed from the page, another processor could 806 * modify it before all access is removed by the call 807 * to vm_page_cache() below. If vm_page_cache() finds 808 * that the page has been modified when it removes all 809 * access, it panics because it cannot cache dirty 810 * pages. In principle, we could eliminate just write 811 * access here rather than all access. In the expected 812 * case, when there are no last instant modifications 813 * to the page, removing all access will be cheaper 814 * overall. 815 */ 816 if ((m->flags & PG_WRITEABLE) != 0) 817 pmap_remove_all(m); 818 } else { 819 vm_page_dirty(m); 820 } 821 822 object = m->object; 823 if (!VM_OBJECT_TRYLOCK(object)) 824 continue; 825 if (m->valid == 0) { 826 /* 827 * Invalid pages can be easily freed 828 */ 829 vm_page_busy(m); 830 pmap_remove_all(m); 831 vm_page_free(m); 832 cnt.v_dfree++; 833 --page_shortage; 834 } else if (m->dirty == 0) { 835 /* 836 * Clean pages can be placed onto the cache queue. 837 * This effectively frees them. 838 */ 839 vm_page_cache(m); 840 --page_shortage; 841 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 842 /* 843 * Dirty pages need to be paged out, but flushing 844 * a page is extremely expensive verses freeing 845 * a clean page. Rather then artificially limiting 846 * the number of pages we can flush, we instead give 847 * dirty pages extra priority on the inactive queue 848 * by forcing them to be cycled through the queue 849 * twice before being flushed, after which the 850 * (now clean) page will cycle through once more 851 * before being freed. This significantly extends 852 * the thrash point for a heavily loaded machine. 853 */ 854 vm_page_flag_set(m, PG_WINATCFLS); 855 vm_pageq_requeue(m); 856 } else if (maxlaunder > 0) { 857 /* 858 * We always want to try to flush some dirty pages if 859 * we encounter them, to keep the system stable. 860 * Normally this number is small, but under extreme 861 * pressure where there are insufficient clean pages 862 * on the inactive queue, we may have to go all out. 863 */ 864 int swap_pageouts_ok; 865 struct vnode *vp = NULL; 866 struct mount *mp; 867 868 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 869 swap_pageouts_ok = 1; 870 } else { 871 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 872 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 873 vm_page_count_min()); 874 875 } 876 877 /* 878 * We don't bother paging objects that are "dead". 879 * Those objects are in a "rundown" state. 880 */ 881 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 882 VM_OBJECT_UNLOCK(object); 883 vm_pageq_requeue(m); 884 continue; 885 } 886 887 /* 888 * The object is already known NOT to be dead. It 889 * is possible for the vget() to block the whole 890 * pageout daemon, but the new low-memory handling 891 * code should prevent it. 892 * 893 * The previous code skipped locked vnodes and, worse, 894 * reordered pages in the queue. This results in 895 * completely non-deterministic operation and, on a 896 * busy system, can lead to extremely non-optimal 897 * pageouts. For example, it can cause clean pages 898 * to be freed and dirty pages to be moved to the end 899 * of the queue. Since dirty pages are also moved to 900 * the end of the queue once-cleaned, this gives 901 * way too large a weighting to defering the freeing 902 * of dirty pages. 903 * 904 * We can't wait forever for the vnode lock, we might 905 * deadlock due to a vn_read() getting stuck in 906 * vm_wait while holding this vnode. We skip the 907 * vnode if we can't get it in a reasonable amount 908 * of time. 909 */ 910 if (object->type == OBJT_VNODE) { 911 vp = object->handle; 912 mp = NULL; 913 if (vp->v_type == VREG) 914 vn_start_write(vp, &mp, V_NOWAIT); 915 vm_page_unlock_queues(); 916 VI_LOCK(vp); 917 VM_OBJECT_UNLOCK(object); 918 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | 919 LK_TIMELOCK, curthread)) { 920 VM_OBJECT_LOCK(object); 921 vm_page_lock_queues(); 922 ++pageout_lock_miss; 923 vn_finished_write(mp); 924 if (object->flags & OBJ_MIGHTBEDIRTY) 925 vnodes_skipped++; 926 VM_OBJECT_UNLOCK(object); 927 continue; 928 } 929 VM_OBJECT_LOCK(object); 930 vm_page_lock_queues(); 931 /* 932 * The page might have been moved to another 933 * queue during potential blocking in vget() 934 * above. The page might have been freed and 935 * reused for another vnode. The object might 936 * have been reused for another vnode. 937 */ 938 if (m->queue != PQ_INACTIVE || 939 m->object != object || 940 object->handle != vp) { 941 if (object->flags & OBJ_MIGHTBEDIRTY) 942 vnodes_skipped++; 943 goto unlock_and_continue; 944 } 945 946 /* 947 * The page may have been busied during the 948 * blocking in vput(); We don't move the 949 * page back onto the end of the queue so that 950 * statistics are more correct if we don't. 951 */ 952 if (m->busy || (m->flags & PG_BUSY)) { 953 goto unlock_and_continue; 954 } 955 956 /* 957 * If the page has become held it might 958 * be undergoing I/O, so skip it 959 */ 960 if (m->hold_count) { 961 vm_pageq_requeue(m); 962 if (object->flags & OBJ_MIGHTBEDIRTY) 963 vnodes_skipped++; 964 goto unlock_and_continue; 965 } 966 } 967 968 /* 969 * If a page is dirty, then it is either being washed 970 * (but not yet cleaned) or it is still in the 971 * laundry. If it is still in the laundry, then we 972 * start the cleaning operation. 973 * 974 * This operation may cluster, invalidating the 'next' 975 * pointer. To prevent an inordinate number of 976 * restarts we use our marker to remember our place. 977 * 978 * decrement page_shortage on success to account for 979 * the (future) cleaned page. Otherwise we could wind 980 * up laundering or cleaning too many pages. 981 */ 982 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 983 if (vm_pageout_clean(m) != 0) { 984 --page_shortage; 985 --maxlaunder; 986 } 987 next = TAILQ_NEXT(&marker, pageq); 988 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 989unlock_and_continue: 990 VM_OBJECT_UNLOCK(object); 991 if (vp) { 992 vm_page_unlock_queues(); 993 vput(vp); 994 vn_finished_write(mp); 995 vm_page_lock_queues(); 996 } 997 continue; 998 } 999 VM_OBJECT_UNLOCK(object); 1000 } 1001 1002 /* 1003 * Compute the number of pages we want to try to move from the 1004 * active queue to the inactive queue. 1005 */ 1006 page_shortage = vm_paging_target() + 1007 cnt.v_inactive_target - cnt.v_inactive_count; 1008 page_shortage += addl_page_shortage; 1009 1010 /* 1011 * Scan the active queue for things we can deactivate. We nominally 1012 * track the per-page activity counter and use it to locate 1013 * deactivation candidates. 1014 */ 1015 pcount = cnt.v_active_count; 1016 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1017 1018 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 1019 1020 KASSERT(m->queue == PQ_ACTIVE, 1021 ("vm_pageout_scan: page %p isn't active", m)); 1022 1023 next = TAILQ_NEXT(m, pageq); 1024 /* 1025 * Don't deactivate pages that are busy. 1026 */ 1027 if ((m->busy != 0) || 1028 (m->flags & PG_BUSY) || 1029 (m->hold_count != 0)) { 1030 vm_pageq_requeue(m); 1031 m = next; 1032 continue; 1033 } 1034 1035 /* 1036 * The count for pagedaemon pages is done after checking the 1037 * page for eligibility... 1038 */ 1039 cnt.v_pdpages++; 1040 1041 /* 1042 * Check to see "how much" the page has been used. 1043 */ 1044 actcount = 0; 1045 if (m->object->ref_count != 0) { 1046 if (m->flags & PG_REFERENCED) { 1047 actcount += 1; 1048 } 1049 actcount += pmap_ts_referenced(m); 1050 if (actcount) { 1051 m->act_count += ACT_ADVANCE + actcount; 1052 if (m->act_count > ACT_MAX) 1053 m->act_count = ACT_MAX; 1054 } 1055 } 1056 1057 /* 1058 * Since we have "tested" this bit, we need to clear it now. 1059 */ 1060 vm_page_flag_clear(m, PG_REFERENCED); 1061 1062 /* 1063 * Only if an object is currently being used, do we use the 1064 * page activation count stats. 1065 */ 1066 if (actcount && (m->object->ref_count != 0)) { 1067 vm_pageq_requeue(m); 1068 } else { 1069 m->act_count -= min(m->act_count, ACT_DECLINE); 1070 if (vm_pageout_algorithm || 1071 m->object->ref_count == 0 || 1072 m->act_count == 0) { 1073 page_shortage--; 1074 if (m->object->ref_count == 0) { 1075 pmap_remove_all(m); 1076 if (m->dirty == 0) 1077 vm_page_cache(m); 1078 else 1079 vm_page_deactivate(m); 1080 } else { 1081 vm_page_deactivate(m); 1082 } 1083 } else { 1084 vm_pageq_requeue(m); 1085 } 1086 } 1087 m = next; 1088 } 1089 1090 /* 1091 * We try to maintain some *really* free pages, this allows interrupt 1092 * code to be guaranteed space. Since both cache and free queues 1093 * are considered basically 'free', moving pages from cache to free 1094 * does not effect other calculations. 1095 */ 1096 while (cnt.v_free_count < cnt.v_free_reserved) { 1097 static int cache_rover = 0; 1098 1099 if ((m = vm_page_select_cache(cache_rover)) == NULL) 1100 break; 1101 cache_rover = (m->pc + PQ_PRIME2) & PQ_L2_MASK; 1102 object = m->object; 1103 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1104 vm_page_busy(m); 1105 vm_page_free(m); 1106 VM_OBJECT_UNLOCK(object); 1107 cnt.v_dfree++; 1108 } 1109 vm_page_unlock_queues(); 1110#if !defined(NO_SWAPPING) 1111 /* 1112 * Idle process swapout -- run once per second. 1113 */ 1114 if (vm_swap_idle_enabled) { 1115 static long lsec; 1116 if (time_second != lsec) { 1117 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1118 vm_req_vmdaemon(); 1119 lsec = time_second; 1120 } 1121 } 1122#endif 1123 1124 /* 1125 * If we didn't get enough free pages, and we have skipped a vnode 1126 * in a writeable object, wakeup the sync daemon. And kick swapout 1127 * if we did not get enough free pages. 1128 */ 1129 if (vm_paging_target() > 0) { 1130 if (vnodes_skipped && vm_page_count_min()) 1131 (void) speedup_syncer(); 1132#if !defined(NO_SWAPPING) 1133 if (vm_swap_enabled && vm_page_count_target()) { 1134 vm_req_vmdaemon(); 1135 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1136 } 1137#endif 1138 } 1139 1140 /* 1141 * If we are critically low on one of RAM or swap and low on 1142 * the other, kill the largest process. However, we avoid 1143 * doing this on the first pass in order to give ourselves a 1144 * chance to flush out dirty vnode-backed pages and to allow 1145 * active pages to be moved to the inactive queue and reclaimed. 1146 * 1147 * We keep the process bigproc locked once we find it to keep anyone 1148 * from messing with it; however, there is a possibility of 1149 * deadlock if process B is bigproc and one of it's child processes 1150 * attempts to propagate a signal to B while we are waiting for A's 1151 * lock while walking this list. To avoid this, we don't block on 1152 * the process lock but just skip a process if it is already locked. 1153 */ 1154 if (pass != 0 && 1155 ((swap_pager_avail < 64 && vm_page_count_min()) || 1156 (swap_pager_full && vm_paging_target() > 0))) { 1157 bigproc = NULL; 1158 bigsize = 0; 1159 sx_slock(&allproc_lock); 1160 FOREACH_PROC_IN_SYSTEM(p) { 1161 int breakout; 1162 1163 if (PROC_TRYLOCK(p) == 0) 1164 continue; 1165 /* 1166 * If this is a system or protected process, skip it. 1167 */ 1168 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1169 (p->p_flag & P_PROTECTED) || 1170 ((p->p_pid < 48) && (swap_pager_avail != 0))) { 1171 PROC_UNLOCK(p); 1172 continue; 1173 } 1174 /* 1175 * If the process is in a non-running type state, 1176 * don't touch it. Check all the threads individually. 1177 */ 1178 mtx_lock_spin(&sched_lock); 1179 breakout = 0; 1180 FOREACH_THREAD_IN_PROC(p, td) { 1181 if (!TD_ON_RUNQ(td) && 1182 !TD_IS_RUNNING(td) && 1183 !TD_IS_SLEEPING(td)) { 1184 breakout = 1; 1185 break; 1186 } 1187 } 1188 if (breakout) { 1189 mtx_unlock_spin(&sched_lock); 1190 PROC_UNLOCK(p); 1191 continue; 1192 } 1193 mtx_unlock_spin(&sched_lock); 1194 /* 1195 * get the process size 1196 */ 1197 if (!vm_map_trylock_read(&p->p_vmspace->vm_map)) { 1198 PROC_UNLOCK(p); 1199 continue; 1200 } 1201 size = vmspace_swap_count(p->p_vmspace); 1202 vm_map_unlock_read(&p->p_vmspace->vm_map); 1203 size += vmspace_resident_count(p->p_vmspace); 1204 /* 1205 * if the this process is bigger than the biggest one 1206 * remember it. 1207 */ 1208 if (size > bigsize) { 1209 if (bigproc != NULL) 1210 PROC_UNLOCK(bigproc); 1211 bigproc = p; 1212 bigsize = size; 1213 } else 1214 PROC_UNLOCK(p); 1215 } 1216 sx_sunlock(&allproc_lock); 1217 if (bigproc != NULL) { 1218 killproc(bigproc, "out of swap space"); 1219 mtx_lock_spin(&sched_lock); 1220 sched_nice(bigproc, PRIO_MIN); 1221 mtx_unlock_spin(&sched_lock); 1222 PROC_UNLOCK(bigproc); 1223 wakeup(&cnt.v_free_count); 1224 } 1225 } 1226 mtx_unlock(&Giant); 1227} 1228 1229/* 1230 * This routine tries to maintain the pseudo LRU active queue, 1231 * so that during long periods of time where there is no paging, 1232 * that some statistic accumulation still occurs. This code 1233 * helps the situation where paging just starts to occur. 1234 */ 1235static void 1236vm_pageout_page_stats() 1237{ 1238 vm_page_t m,next; 1239 int pcount,tpcount; /* Number of pages to check */ 1240 static int fullintervalcount = 0; 1241 int page_shortage; 1242 1243 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1244 page_shortage = 1245 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1246 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1247 1248 if (page_shortage <= 0) 1249 return; 1250 1251 pcount = cnt.v_active_count; 1252 fullintervalcount += vm_pageout_stats_interval; 1253 if (fullintervalcount < vm_pageout_full_stats_interval) { 1254 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1255 if (pcount > tpcount) 1256 pcount = tpcount; 1257 } else { 1258 fullintervalcount = 0; 1259 } 1260 1261 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1262 while ((m != NULL) && (pcount-- > 0)) { 1263 int actcount; 1264 1265 KASSERT(m->queue == PQ_ACTIVE, 1266 ("vm_pageout_page_stats: page %p isn't active", m)); 1267 1268 next = TAILQ_NEXT(m, pageq); 1269 /* 1270 * Don't deactivate pages that are busy. 1271 */ 1272 if ((m->busy != 0) || 1273 (m->flags & PG_BUSY) || 1274 (m->hold_count != 0)) { 1275 vm_pageq_requeue(m); 1276 m = next; 1277 continue; 1278 } 1279 1280 actcount = 0; 1281 if (m->flags & PG_REFERENCED) { 1282 vm_page_flag_clear(m, PG_REFERENCED); 1283 actcount += 1; 1284 } 1285 1286 actcount += pmap_ts_referenced(m); 1287 if (actcount) { 1288 m->act_count += ACT_ADVANCE + actcount; 1289 if (m->act_count > ACT_MAX) 1290 m->act_count = ACT_MAX; 1291 vm_pageq_requeue(m); 1292 } else { 1293 if (m->act_count == 0) { 1294 /* 1295 * We turn off page access, so that we have 1296 * more accurate RSS stats. We don't do this 1297 * in the normal page deactivation when the 1298 * system is loaded VM wise, because the 1299 * cost of the large number of page protect 1300 * operations would be higher than the value 1301 * of doing the operation. 1302 */ 1303 pmap_remove_all(m); 1304 vm_page_deactivate(m); 1305 } else { 1306 m->act_count -= min(m->act_count, ACT_DECLINE); 1307 vm_pageq_requeue(m); 1308 } 1309 } 1310 1311 m = next; 1312 } 1313} 1314 1315/* 1316 * vm_pageout is the high level pageout daemon. 1317 */ 1318static void 1319vm_pageout() 1320{ 1321 int error, pass; 1322 1323 /* 1324 * Initialize some paging parameters. 1325 */ 1326 cnt.v_interrupt_free_min = 2; 1327 if (cnt.v_page_count < 2000) 1328 vm_pageout_page_count = 8; 1329 1330 /* 1331 * v_free_reserved needs to include enough for the largest 1332 * swap pager structures plus enough for any pv_entry structs 1333 * when paging. 1334 */ 1335 if (cnt.v_page_count > 1024) 1336 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1337 else 1338 cnt.v_free_min = 4; 1339 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1340 cnt.v_interrupt_free_min; 1341 cnt.v_free_reserved = vm_pageout_page_count + 1342 cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_L2_SIZE; 1343 cnt.v_free_severe = cnt.v_free_min / 2; 1344 cnt.v_free_min += cnt.v_free_reserved; 1345 cnt.v_free_severe += cnt.v_free_reserved; 1346 1347 /* 1348 * v_free_target and v_cache_min control pageout hysteresis. Note 1349 * that these are more a measure of the VM cache queue hysteresis 1350 * then the VM free queue. Specifically, v_free_target is the 1351 * high water mark (free+cache pages). 1352 * 1353 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1354 * low water mark, while v_free_min is the stop. v_cache_min must 1355 * be big enough to handle memory needs while the pageout daemon 1356 * is signalled and run to free more pages. 1357 */ 1358 if (cnt.v_free_count > 6144) 1359 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1360 else 1361 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1362 1363 if (cnt.v_free_count > 2048) { 1364 cnt.v_cache_min = cnt.v_free_target; 1365 cnt.v_cache_max = 2 * cnt.v_cache_min; 1366 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1367 } else { 1368 cnt.v_cache_min = 0; 1369 cnt.v_cache_max = 0; 1370 cnt.v_inactive_target = cnt.v_free_count / 4; 1371 } 1372 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1373 cnt.v_inactive_target = cnt.v_free_count / 3; 1374 1375 /* XXX does not really belong here */ 1376 if (vm_page_max_wired == 0) 1377 vm_page_max_wired = cnt.v_free_count / 3; 1378 1379 if (vm_pageout_stats_max == 0) 1380 vm_pageout_stats_max = cnt.v_free_target; 1381 1382 /* 1383 * Set interval in seconds for stats scan. 1384 */ 1385 if (vm_pageout_stats_interval == 0) 1386 vm_pageout_stats_interval = 5; 1387 if (vm_pageout_full_stats_interval == 0) 1388 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1389 1390 swap_pager_swap_init(); 1391 pass = 0; 1392 /* 1393 * The pageout daemon is never done, so loop forever. 1394 */ 1395 while (TRUE) { 1396 vm_page_lock_queues(); 1397 /* 1398 * If we have enough free memory, wakeup waiters. Do 1399 * not clear vm_pages_needed until we reach our target, 1400 * otherwise we may be woken up over and over again and 1401 * waste a lot of cpu. 1402 */ 1403 if (vm_pages_needed && !vm_page_count_min()) { 1404 if (!vm_paging_needed()) 1405 vm_pages_needed = 0; 1406 wakeup(&cnt.v_free_count); 1407 } 1408 if (vm_pages_needed) { 1409 /* 1410 * Still not done, take a second pass without waiting 1411 * (unlimited dirty cleaning), otherwise sleep a bit 1412 * and try again. 1413 */ 1414 ++pass; 1415 if (pass > 1) 1416 msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1417 "psleep", hz/2); 1418 } else { 1419 /* 1420 * Good enough, sleep & handle stats. Prime the pass 1421 * for the next run. 1422 */ 1423 if (pass > 1) 1424 pass = 1; 1425 else 1426 pass = 0; 1427 error = msleep(&vm_pages_needed, &vm_page_queue_mtx, PVM, 1428 "psleep", vm_pageout_stats_interval * hz); 1429 if (error && !vm_pages_needed) { 1430 pass = 0; 1431 vm_pageout_page_stats(); 1432 vm_page_unlock_queues(); 1433 continue; 1434 } 1435 } 1436 if (vm_pages_needed) 1437 cnt.v_pdwakeups++; 1438 vm_page_unlock_queues(); 1439 vm_pageout_scan(pass); 1440 } 1441} 1442 1443/* 1444 * Unless the page queue lock is held by the caller, this function 1445 * should be regarded as advisory. Specifically, the caller should 1446 * not msleep() on &cnt.v_free_count following this function unless 1447 * the page queue lock is held until the msleep() is performed. 1448 */ 1449void 1450pagedaemon_wakeup() 1451{ 1452 1453 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1454 vm_pages_needed = 1; 1455 wakeup(&vm_pages_needed); 1456 } 1457} 1458 1459#if !defined(NO_SWAPPING) 1460static void 1461vm_req_vmdaemon() 1462{ 1463 static int lastrun = 0; 1464 1465 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1466 wakeup(&vm_daemon_needed); 1467 lastrun = ticks; 1468 } 1469} 1470 1471static void 1472vm_daemon() 1473{ 1474 struct rlimit rsslim; 1475 struct proc *p; 1476 struct thread *td; 1477 int breakout; 1478 1479 mtx_lock(&Giant); 1480 while (TRUE) { 1481 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1482 if (vm_pageout_req_swapout) { 1483 swapout_procs(vm_pageout_req_swapout); 1484 vm_pageout_req_swapout = 0; 1485 } 1486 /* 1487 * scan the processes for exceeding their rlimits or if 1488 * process is swapped out -- deactivate pages 1489 */ 1490 sx_slock(&allproc_lock); 1491 LIST_FOREACH(p, &allproc, p_list) { 1492 vm_pindex_t limit, size; 1493 1494 /* 1495 * if this is a system process or if we have already 1496 * looked at this process, skip it. 1497 */ 1498 PROC_LOCK(p); 1499 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1500 PROC_UNLOCK(p); 1501 continue; 1502 } 1503 /* 1504 * if the process is in a non-running type state, 1505 * don't touch it. 1506 */ 1507 mtx_lock_spin(&sched_lock); 1508 breakout = 0; 1509 FOREACH_THREAD_IN_PROC(p, td) { 1510 if (!TD_ON_RUNQ(td) && 1511 !TD_IS_RUNNING(td) && 1512 !TD_IS_SLEEPING(td)) { 1513 breakout = 1; 1514 break; 1515 } 1516 } 1517 mtx_unlock_spin(&sched_lock); 1518 if (breakout) { 1519 PROC_UNLOCK(p); 1520 continue; 1521 } 1522 /* 1523 * get a limit 1524 */ 1525 lim_rlimit(p, RLIMIT_RSS, &rsslim); 1526 limit = OFF_TO_IDX( 1527 qmin(rsslim.rlim_cur, rsslim.rlim_max)); 1528 1529 /* 1530 * let processes that are swapped out really be 1531 * swapped out set the limit to nothing (will force a 1532 * swap-out.) 1533 */ 1534 if ((p->p_sflag & PS_INMEM) == 0) 1535 limit = 0; /* XXX */ 1536 PROC_UNLOCK(p); 1537 1538 size = vmspace_resident_count(p->p_vmspace); 1539 if (limit >= 0 && size >= limit) { 1540 vm_pageout_map_deactivate_pages( 1541 &p->p_vmspace->vm_map, limit); 1542 } 1543 } 1544 sx_sunlock(&allproc_lock); 1545 } 1546} 1547#endif /* !defined(NO_SWAPPING) */ 1548