vm_pageout.c revision 74927
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 74927 2001-03-28 11:52:56Z jhb $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/proc.h> 80#include <sys/kthread.h> 81#include <sys/ktr.h> 82#include <sys/resourcevar.h> 83#include <sys/signalvar.h> 84#include <sys/vnode.h> 85#include <sys/vmmeter.h> 86#include <sys/sx.h> 87#include <sys/sysctl.h> 88 89#include <vm/vm.h> 90#include <vm/vm_param.h> 91#include <sys/lock.h> 92#include <vm/vm_object.h> 93#include <vm/vm_page.h> 94#include <vm/vm_map.h> 95#include <vm/vm_pageout.h> 96#include <vm/vm_pager.h> 97#include <vm/vm_zone.h> 98#include <vm/swap_pager.h> 99#include <vm/vm_extern.h> 100 101#include <machine/mutex.h> 102 103/* 104 * System initialization 105 */ 106 107/* the kernel process "vm_pageout"*/ 108static void vm_pageout __P((void)); 109static int vm_pageout_clean __P((vm_page_t)); 110static void vm_pageout_scan __P((int pass)); 111static int vm_pageout_free_page_calc __P((vm_size_t count)); 112struct proc *pageproc; 113 114static struct kproc_desc page_kp = { 115 "pagedaemon", 116 vm_pageout, 117 &pageproc 118}; 119SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 120 121#if !defined(NO_SWAPPING) 122/* the kernel process "vm_daemon"*/ 123static void vm_daemon __P((void)); 124static struct proc *vmproc; 125 126static struct kproc_desc vm_kp = { 127 "vmdaemon", 128 vm_daemon, 129 &vmproc 130}; 131SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 132#endif 133 134 135int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 136int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 137int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 138 139#if !defined(NO_SWAPPING) 140static int vm_pageout_req_swapout; /* XXX */ 141static int vm_daemon_needed; 142#endif 143extern int vm_swap_size; 144static int vm_max_launder = 32; 145static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 146static int vm_pageout_full_stats_interval = 0; 147static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 148static int defer_swap_pageouts=0; 149static int disable_swap_pageouts=0; 150 151#if defined(NO_SWAPPING) 152static int vm_swap_enabled=0; 153static int vm_swap_idle_enabled=0; 154#else 155static int vm_swap_enabled=1; 156static int vm_swap_idle_enabled=0; 157#endif 158 159SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 160 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 161 162SYSCTL_INT(_vm, OID_AUTO, max_launder, 163 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 164 165SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 166 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 167 168SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 169 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 170 171SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 172 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 173 174SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 175 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 176 177#if defined(NO_SWAPPING) 178SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 179 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 180SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 181 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 182#else 183SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 184 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 185SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 186 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 187#endif 188 189SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 190 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 191 192SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 193 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 194 195#define VM_PAGEOUT_PAGE_COUNT 16 196int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 197 198int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 199 200#if !defined(NO_SWAPPING) 201typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 202static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 203static freeer_fcn_t vm_pageout_object_deactivate_pages; 204static void vm_req_vmdaemon __P((void)); 205#endif 206static void vm_pageout_page_stats(void); 207 208/* 209 * vm_pageout_clean: 210 * 211 * Clean the page and remove it from the laundry. 212 * 213 * We set the busy bit to cause potential page faults on this page to 214 * block. Note the careful timing, however, the busy bit isn't set till 215 * late and we cannot do anything that will mess with the page. 216 */ 217 218static int 219vm_pageout_clean(m) 220 vm_page_t m; 221{ 222 register vm_object_t object; 223 vm_page_t mc[2*vm_pageout_page_count]; 224 int pageout_count; 225 int ib, is, page_base; 226 vm_pindex_t pindex = m->pindex; 227 228 object = m->object; 229 230 /* 231 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 232 * with the new swapper, but we could have serious problems paging 233 * out other object types if there is insufficient memory. 234 * 235 * Unfortunately, checking free memory here is far too late, so the 236 * check has been moved up a procedural level. 237 */ 238 239 /* 240 * Don't mess with the page if it's busy, held, or special 241 */ 242 if ((m->hold_count != 0) || 243 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 244 return 0; 245 } 246 247 mc[vm_pageout_page_count] = m; 248 pageout_count = 1; 249 page_base = vm_pageout_page_count; 250 ib = 1; 251 is = 1; 252 253 /* 254 * Scan object for clusterable pages. 255 * 256 * We can cluster ONLY if: ->> the page is NOT 257 * clean, wired, busy, held, or mapped into a 258 * buffer, and one of the following: 259 * 1) The page is inactive, or a seldom used 260 * active page. 261 * -or- 262 * 2) we force the issue. 263 * 264 * During heavy mmap/modification loads the pageout 265 * daemon can really fragment the underlying file 266 * due to flushing pages out of order and not trying 267 * align the clusters (which leave sporatic out-of-order 268 * holes). To solve this problem we do the reverse scan 269 * first and attempt to align our cluster, then do a 270 * forward scan if room remains. 271 */ 272 273more: 274 while (ib && pageout_count < vm_pageout_page_count) { 275 vm_page_t p; 276 277 if (ib > pindex) { 278 ib = 0; 279 break; 280 } 281 282 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 283 ib = 0; 284 break; 285 } 286 if (((p->queue - p->pc) == PQ_CACHE) || 287 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 288 ib = 0; 289 break; 290 } 291 vm_page_test_dirty(p); 292 if ((p->dirty & p->valid) == 0 || 293 p->queue != PQ_INACTIVE || 294 p->wire_count != 0 || 295 p->hold_count != 0) { 296 ib = 0; 297 break; 298 } 299 mc[--page_base] = p; 300 ++pageout_count; 301 ++ib; 302 /* 303 * alignment boundry, stop here and switch directions. Do 304 * not clear ib. 305 */ 306 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 307 break; 308 } 309 310 while (pageout_count < vm_pageout_page_count && 311 pindex + is < object->size) { 312 vm_page_t p; 313 314 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 315 break; 316 if (((p->queue - p->pc) == PQ_CACHE) || 317 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 318 break; 319 } 320 vm_page_test_dirty(p); 321 if ((p->dirty & p->valid) == 0 || 322 p->queue != PQ_INACTIVE || 323 p->wire_count != 0 || 324 p->hold_count != 0) { 325 break; 326 } 327 mc[page_base + pageout_count] = p; 328 ++pageout_count; 329 ++is; 330 } 331 332 /* 333 * If we exhausted our forward scan, continue with the reverse scan 334 * when possible, even past a page boundry. This catches boundry 335 * conditions. 336 */ 337 if (ib && pageout_count < vm_pageout_page_count) 338 goto more; 339 340 /* 341 * we allow reads during pageouts... 342 */ 343 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 344} 345 346/* 347 * vm_pageout_flush() - launder the given pages 348 * 349 * The given pages are laundered. Note that we setup for the start of 350 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 351 * reference count all in here rather then in the parent. If we want 352 * the parent to do more sophisticated things we may have to change 353 * the ordering. 354 */ 355 356int 357vm_pageout_flush(mc, count, flags) 358 vm_page_t *mc; 359 int count; 360 int flags; 361{ 362 register vm_object_t object; 363 int pageout_status[count]; 364 int numpagedout = 0; 365 int i; 366 367 /* 368 * Initiate I/O. Bump the vm_page_t->busy counter and 369 * mark the pages read-only. 370 * 371 * We do not have to fixup the clean/dirty bits here... we can 372 * allow the pager to do it after the I/O completes. 373 * 374 * NOTE! mc[i]->dirty may be partial or fragmented due to an 375 * edge case with file fragments. 376 */ 377 378 for (i = 0; i < count; i++) { 379 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 380 vm_page_io_start(mc[i]); 381 vm_page_protect(mc[i], VM_PROT_READ); 382 } 383 384 object = mc[0]->object; 385 vm_object_pip_add(object, count); 386 387 vm_pager_put_pages(object, mc, count, 388 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 389 pageout_status); 390 391 for (i = 0; i < count; i++) { 392 vm_page_t mt = mc[i]; 393 394 switch (pageout_status[i]) { 395 case VM_PAGER_OK: 396 numpagedout++; 397 break; 398 case VM_PAGER_PEND: 399 numpagedout++; 400 break; 401 case VM_PAGER_BAD: 402 /* 403 * Page outside of range of object. Right now we 404 * essentially lose the changes by pretending it 405 * worked. 406 */ 407 pmap_clear_modify(mt); 408 vm_page_undirty(mt); 409 break; 410 case VM_PAGER_ERROR: 411 case VM_PAGER_FAIL: 412 /* 413 * If page couldn't be paged out, then reactivate the 414 * page so it doesn't clog the inactive list. (We 415 * will try paging out it again later). 416 */ 417 vm_page_activate(mt); 418 break; 419 case VM_PAGER_AGAIN: 420 break; 421 } 422 423 /* 424 * If the operation is still going, leave the page busy to 425 * block all other accesses. Also, leave the paging in 426 * progress indicator set so that we don't attempt an object 427 * collapse. 428 */ 429 if (pageout_status[i] != VM_PAGER_PEND) { 430 vm_object_pip_wakeup(object); 431 vm_page_io_finish(mt); 432 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 433 vm_page_protect(mt, VM_PROT_READ); 434 } 435 } 436 return numpagedout; 437} 438 439#if !defined(NO_SWAPPING) 440/* 441 * vm_pageout_object_deactivate_pages 442 * 443 * deactivate enough pages to satisfy the inactive target 444 * requirements or if vm_page_proc_limit is set, then 445 * deactivate all of the pages in the object and its 446 * backing_objects. 447 * 448 * The object and map must be locked. 449 */ 450static void 451vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 452 vm_map_t map; 453 vm_object_t object; 454 vm_pindex_t desired; 455 int map_remove_only; 456{ 457 register vm_page_t p, next; 458 int rcount; 459 int remove_mode; 460 int s; 461 462 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 463 return; 464 465 while (object) { 466 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 467 return; 468 if (object->paging_in_progress) 469 return; 470 471 remove_mode = map_remove_only; 472 if (object->shadow_count > 1) 473 remove_mode = 1; 474 /* 475 * scan the objects entire memory queue 476 */ 477 rcount = object->resident_page_count; 478 p = TAILQ_FIRST(&object->memq); 479 while (p && (rcount-- > 0)) { 480 int actcount; 481 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 482 return; 483 next = TAILQ_NEXT(p, listq); 484 cnt.v_pdpages++; 485 if (p->wire_count != 0 || 486 p->hold_count != 0 || 487 p->busy != 0 || 488 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 489 !pmap_page_exists(vm_map_pmap(map), p)) { 490 p = next; 491 continue; 492 } 493 494 actcount = pmap_ts_referenced(p); 495 if (actcount) { 496 vm_page_flag_set(p, PG_REFERENCED); 497 } else if (p->flags & PG_REFERENCED) { 498 actcount = 1; 499 } 500 501 if ((p->queue != PQ_ACTIVE) && 502 (p->flags & PG_REFERENCED)) { 503 vm_page_activate(p); 504 p->act_count += actcount; 505 vm_page_flag_clear(p, PG_REFERENCED); 506 } else if (p->queue == PQ_ACTIVE) { 507 if ((p->flags & PG_REFERENCED) == 0) { 508 p->act_count -= min(p->act_count, ACT_DECLINE); 509 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 510 vm_page_protect(p, VM_PROT_NONE); 511 vm_page_deactivate(p); 512 } else { 513 s = splvm(); 514 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 515 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 516 splx(s); 517 } 518 } else { 519 vm_page_activate(p); 520 vm_page_flag_clear(p, PG_REFERENCED); 521 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 522 p->act_count += ACT_ADVANCE; 523 s = splvm(); 524 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 525 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 526 splx(s); 527 } 528 } else if (p->queue == PQ_INACTIVE) { 529 vm_page_protect(p, VM_PROT_NONE); 530 } 531 p = next; 532 } 533 object = object->backing_object; 534 } 535 return; 536} 537 538/* 539 * deactivate some number of pages in a map, try to do it fairly, but 540 * that is really hard to do. 541 */ 542static void 543vm_pageout_map_deactivate_pages(map, desired) 544 vm_map_t map; 545 vm_pindex_t desired; 546{ 547 vm_map_entry_t tmpe; 548 vm_object_t obj, bigobj; 549 550 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 551 return; 552 } 553 554 bigobj = NULL; 555 556 /* 557 * first, search out the biggest object, and try to free pages from 558 * that. 559 */ 560 tmpe = map->header.next; 561 while (tmpe != &map->header) { 562 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 563 obj = tmpe->object.vm_object; 564 if ((obj != NULL) && (obj->shadow_count <= 1) && 565 ((bigobj == NULL) || 566 (bigobj->resident_page_count < obj->resident_page_count))) { 567 bigobj = obj; 568 } 569 } 570 tmpe = tmpe->next; 571 } 572 573 if (bigobj) 574 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 575 576 /* 577 * Next, hunt around for other pages to deactivate. We actually 578 * do this search sort of wrong -- .text first is not the best idea. 579 */ 580 tmpe = map->header.next; 581 while (tmpe != &map->header) { 582 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 583 break; 584 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 585 obj = tmpe->object.vm_object; 586 if (obj) 587 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 588 } 589 tmpe = tmpe->next; 590 }; 591 592 /* 593 * Remove all mappings if a process is swapped out, this will free page 594 * table pages. 595 */ 596 if (desired == 0) 597 pmap_remove(vm_map_pmap(map), 598 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 599 vm_map_unlock(map); 600 return; 601} 602#endif 603 604/* 605 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 606 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 607 * which we know can be trivially freed. 608 */ 609 610void 611vm_pageout_page_free(vm_page_t m) { 612 vm_object_t object = m->object; 613 int type = object->type; 614 615 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 616 vm_object_reference(object); 617 vm_page_busy(m); 618 vm_page_protect(m, VM_PROT_NONE); 619 vm_page_free(m); 620 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 621 vm_object_deallocate(object); 622} 623 624/* 625 * vm_pageout_scan does the dirty work for the pageout daemon. 626 */ 627static void 628vm_pageout_scan(int pass) 629{ 630 vm_page_t m, next; 631 struct vm_page marker; 632 int save_page_shortage; 633 int save_inactive_count; 634 int page_shortage, maxscan, pcount; 635 int addl_page_shortage, addl_page_shortage_init; 636 struct proc *p, *bigproc; 637 vm_offset_t size, bigsize; 638 vm_object_t object; 639 int actcount; 640 int vnodes_skipped = 0; 641 int maxlaunder; 642 int s; 643 644 /* 645 * Do whatever cleanup that the pmap code can. 646 */ 647 pmap_collect(); 648 649 addl_page_shortage_init = vm_pageout_deficit; 650 vm_pageout_deficit = 0; 651 652 /* 653 * Calculate the number of pages we want to either free or move 654 * to the cache. 655 */ 656 page_shortage = vm_paging_target() + addl_page_shortage_init; 657 save_page_shortage = page_shortage; 658 save_inactive_count = cnt.v_inactive_count; 659 660 /* 661 * Initialize our marker 662 */ 663 bzero(&marker, sizeof(marker)); 664 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 665 marker.queue = PQ_INACTIVE; 666 marker.wire_count = 1; 667 668 /* 669 * Start scanning the inactive queue for pages we can move to the 670 * cache or free. The scan will stop when the target is reached or 671 * we have scanned the entire inactive queue. Note that m->act_count 672 * is not used to form decisions for the inactive queue, only for the 673 * active queue. 674 * 675 * maxlaunder limits the number of dirty pages we flush per scan. 676 * For most systems a smaller value (16 or 32) is more robust under 677 * extreme memory and disk pressure because any unnecessary writes 678 * to disk can result in extreme performance degredation. However, 679 * systems with excessive dirty pages (especially when MAP_NOSYNC is 680 * used) will die horribly with limited laundering. If the pageout 681 * daemon cannot clean enough pages in the first pass, we let it go 682 * all out in succeeding passes. 683 */ 684 685 if ((maxlaunder = vm_max_launder) <= 1) 686 maxlaunder = 1; 687 if (pass) 688 maxlaunder = 10000; 689 690rescan0: 691 addl_page_shortage = addl_page_shortage_init; 692 maxscan = cnt.v_inactive_count; 693 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 694 m != NULL && maxscan-- > 0 && page_shortage > 0; 695 m = next) { 696 697 cnt.v_pdpages++; 698 699 if (m->queue != PQ_INACTIVE) { 700 goto rescan0; 701 } 702 703 next = TAILQ_NEXT(m, pageq); 704 705 /* 706 * skip marker pages 707 */ 708 if (m->flags & PG_MARKER) 709 continue; 710 711 if (m->hold_count) { 712 s = splvm(); 713 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 714 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 715 splx(s); 716 addl_page_shortage++; 717 continue; 718 } 719 /* 720 * Dont mess with busy pages, keep in the front of the 721 * queue, most likely are being paged out. 722 */ 723 if (m->busy || (m->flags & PG_BUSY)) { 724 addl_page_shortage++; 725 continue; 726 } 727 728 /* 729 * If the object is not being used, we ignore previous 730 * references. 731 */ 732 if (m->object->ref_count == 0) { 733 vm_page_flag_clear(m, PG_REFERENCED); 734 pmap_clear_reference(m); 735 736 /* 737 * Otherwise, if the page has been referenced while in the 738 * inactive queue, we bump the "activation count" upwards, 739 * making it less likely that the page will be added back to 740 * the inactive queue prematurely again. Here we check the 741 * page tables (or emulated bits, if any), given the upper 742 * level VM system not knowing anything about existing 743 * references. 744 */ 745 } else if (((m->flags & PG_REFERENCED) == 0) && 746 (actcount = pmap_ts_referenced(m))) { 747 vm_page_activate(m); 748 m->act_count += (actcount + ACT_ADVANCE); 749 continue; 750 } 751 752 /* 753 * If the upper level VM system knows about any page 754 * references, we activate the page. We also set the 755 * "activation count" higher than normal so that we will less 756 * likely place pages back onto the inactive queue again. 757 */ 758 if ((m->flags & PG_REFERENCED) != 0) { 759 vm_page_flag_clear(m, PG_REFERENCED); 760 actcount = pmap_ts_referenced(m); 761 vm_page_activate(m); 762 m->act_count += (actcount + ACT_ADVANCE + 1); 763 continue; 764 } 765 766 /* 767 * If the upper level VM system doesn't know anything about 768 * the page being dirty, we have to check for it again. As 769 * far as the VM code knows, any partially dirty pages are 770 * fully dirty. 771 */ 772 if (m->dirty == 0) { 773 vm_page_test_dirty(m); 774 } else { 775 vm_page_dirty(m); 776 } 777 778 /* 779 * Invalid pages can be easily freed 780 */ 781 if (m->valid == 0) { 782 vm_pageout_page_free(m); 783 cnt.v_dfree++; 784 --page_shortage; 785 786 /* 787 * Clean pages can be placed onto the cache queue. This 788 * effectively frees them. 789 */ 790 } else if (m->dirty == 0) { 791 vm_page_cache(m); 792 --page_shortage; 793 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 794 /* 795 * Dirty pages need to be paged out, but flushing 796 * a page is extremely expensive verses freeing 797 * a clean page. Rather then artificially limiting 798 * the number of pages we can flush, we instead give 799 * dirty pages extra priority on the inactive queue 800 * by forcing them to be cycled through the queue 801 * twice before being flushed, after which the 802 * (now clean) page will cycle through once more 803 * before being freed. This significantly extends 804 * the thrash point for a heavily loaded machine. 805 */ 806 s = splvm(); 807 vm_page_flag_set(m, PG_WINATCFLS); 808 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 809 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 810 splx(s); 811 } else if (maxlaunder > 0) { 812 /* 813 * We always want to try to flush some dirty pages if 814 * we encounter them, to keep the system stable. 815 * Normally this number is small, but under extreme 816 * pressure where there are insufficient clean pages 817 * on the inactive queue, we may have to go all out. 818 */ 819 int swap_pageouts_ok; 820 struct vnode *vp = NULL; 821 struct mount *mp; 822 823 object = m->object; 824 825 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 826 swap_pageouts_ok = 1; 827 } else { 828 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 829 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 830 vm_page_count_min()); 831 832 } 833 834 /* 835 * We don't bother paging objects that are "dead". 836 * Those objects are in a "rundown" state. 837 */ 838 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 839 s = splvm(); 840 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 841 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 842 splx(s); 843 continue; 844 } 845 846 /* 847 * The object is already known NOT to be dead. It 848 * is possible for the vget() to block the whole 849 * pageout daemon, but the new low-memory handling 850 * code should prevent it. 851 * 852 * The previous code skipped locked vnodes and, worse, 853 * reordered pages in the queue. This results in 854 * completely non-deterministic operation and, on a 855 * busy system, can lead to extremely non-optimal 856 * pageouts. For example, it can cause clean pages 857 * to be freed and dirty pages to be moved to the end 858 * of the queue. Since dirty pages are also moved to 859 * the end of the queue once-cleaned, this gives 860 * way too large a weighting to defering the freeing 861 * of dirty pages. 862 * 863 * XXX we need to be able to apply a timeout to the 864 * vget() lock attempt. 865 */ 866 867 if (object->type == OBJT_VNODE) { 868 vp = object->handle; 869 870 mp = NULL; 871 if (vp->v_type == VREG) 872 vn_start_write(vp, &mp, V_NOWAIT); 873 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 874 vn_finished_write(mp); 875 if (object->flags & OBJ_MIGHTBEDIRTY) 876 vnodes_skipped++; 877 continue; 878 } 879 880 /* 881 * The page might have been moved to another 882 * queue during potential blocking in vget() 883 * above. The page might have been freed and 884 * reused for another vnode. The object might 885 * have been reused for another vnode. 886 */ 887 if (m->queue != PQ_INACTIVE || 888 m->object != object || 889 object->handle != vp) { 890 if (object->flags & OBJ_MIGHTBEDIRTY) 891 vnodes_skipped++; 892 vput(vp); 893 vn_finished_write(mp); 894 continue; 895 } 896 897 /* 898 * The page may have been busied during the 899 * blocking in vput(); We don't move the 900 * page back onto the end of the queue so that 901 * statistics are more correct if we don't. 902 */ 903 if (m->busy || (m->flags & PG_BUSY)) { 904 vput(vp); 905 vn_finished_write(mp); 906 continue; 907 } 908 909 /* 910 * If the page has become held, then skip it 911 */ 912 if (m->hold_count) { 913 s = splvm(); 914 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 915 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 916 splx(s); 917 if (object->flags & OBJ_MIGHTBEDIRTY) 918 vnodes_skipped++; 919 vput(vp); 920 vn_finished_write(mp); 921 continue; 922 } 923 } 924 925 /* 926 * If a page is dirty, then it is either being washed 927 * (but not yet cleaned) or it is still in the 928 * laundry. If it is still in the laundry, then we 929 * start the cleaning operation. 930 * 931 * This operation may cluster, invalidating the 'next' 932 * pointer. To prevent an inordinate number of 933 * restarts we use our marker to remember our place. 934 * 935 * decrement page_shortage on success to account for 936 * the (future) cleaned page. Otherwise we could wind 937 * up laundering or cleaning too many pages. 938 */ 939 s = splvm(); 940 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 941 splx(s); 942 if (vm_pageout_clean(m) != 0) { 943 --page_shortage; 944 --maxlaunder; 945 } 946 s = splvm(); 947 next = TAILQ_NEXT(&marker, pageq); 948 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 949 splx(s); 950 if (vp) { 951 vput(vp); 952 vn_finished_write(mp); 953 } 954 } 955 } 956 957 /* 958 * Compute the number of pages we want to try to move from the 959 * active queue to the inactive queue. 960 */ 961 page_shortage = vm_paging_target() + 962 cnt.v_inactive_target - cnt.v_inactive_count; 963 page_shortage += addl_page_shortage; 964 965 /* 966 * Scan the active queue for things we can deactivate. We nominally 967 * track the per-page activity counter and use it to locate 968 * deactivation candidates. 969 */ 970 971 pcount = cnt.v_active_count; 972 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 973 974 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 975 976 /* 977 * This is a consistency check, and should likely be a panic 978 * or warning. 979 */ 980 if (m->queue != PQ_ACTIVE) { 981 break; 982 } 983 984 next = TAILQ_NEXT(m, pageq); 985 /* 986 * Don't deactivate pages that are busy. 987 */ 988 if ((m->busy != 0) || 989 (m->flags & PG_BUSY) || 990 (m->hold_count != 0)) { 991 s = splvm(); 992 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 993 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 994 splx(s); 995 m = next; 996 continue; 997 } 998 999 /* 1000 * The count for pagedaemon pages is done after checking the 1001 * page for eligibility... 1002 */ 1003 cnt.v_pdpages++; 1004 1005 /* 1006 * Check to see "how much" the page has been used. 1007 */ 1008 actcount = 0; 1009 if (m->object->ref_count != 0) { 1010 if (m->flags & PG_REFERENCED) { 1011 actcount += 1; 1012 } 1013 actcount += pmap_ts_referenced(m); 1014 if (actcount) { 1015 m->act_count += ACT_ADVANCE + actcount; 1016 if (m->act_count > ACT_MAX) 1017 m->act_count = ACT_MAX; 1018 } 1019 } 1020 1021 /* 1022 * Since we have "tested" this bit, we need to clear it now. 1023 */ 1024 vm_page_flag_clear(m, PG_REFERENCED); 1025 1026 /* 1027 * Only if an object is currently being used, do we use the 1028 * page activation count stats. 1029 */ 1030 if (actcount && (m->object->ref_count != 0)) { 1031 s = splvm(); 1032 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1033 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1034 splx(s); 1035 } else { 1036 m->act_count -= min(m->act_count, ACT_DECLINE); 1037 if (vm_pageout_algorithm || 1038 m->object->ref_count == 0 || 1039 m->act_count == 0) { 1040 page_shortage--; 1041 if (m->object->ref_count == 0) { 1042 vm_page_protect(m, VM_PROT_NONE); 1043 if (m->dirty == 0) 1044 vm_page_cache(m); 1045 else 1046 vm_page_deactivate(m); 1047 } else { 1048 vm_page_deactivate(m); 1049 } 1050 } else { 1051 s = splvm(); 1052 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1053 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1054 splx(s); 1055 } 1056 } 1057 m = next; 1058 } 1059 1060 s = splvm(); 1061 1062 /* 1063 * We try to maintain some *really* free pages, this allows interrupt 1064 * code to be guaranteed space. Since both cache and free queues 1065 * are considered basically 'free', moving pages from cache to free 1066 * does not effect other calculations. 1067 */ 1068 1069 while (cnt.v_free_count < cnt.v_free_reserved) { 1070 static int cache_rover = 0; 1071 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1072 if (!m) 1073 break; 1074 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1075 m->busy || 1076 m->hold_count || 1077 m->wire_count) { 1078#ifdef INVARIANTS 1079 printf("Warning: busy page %p found in cache\n", m); 1080#endif 1081 vm_page_deactivate(m); 1082 continue; 1083 } 1084 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1085 vm_pageout_page_free(m); 1086 cnt.v_dfree++; 1087 } 1088 splx(s); 1089 1090#if !defined(NO_SWAPPING) 1091 /* 1092 * Idle process swapout -- run once per second. 1093 */ 1094 if (vm_swap_idle_enabled) { 1095 static long lsec; 1096 if (time_second != lsec) { 1097 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1098 vm_req_vmdaemon(); 1099 lsec = time_second; 1100 } 1101 } 1102#endif 1103 1104 /* 1105 * If we didn't get enough free pages, and we have skipped a vnode 1106 * in a writeable object, wakeup the sync daemon. And kick swapout 1107 * if we did not get enough free pages. 1108 */ 1109 if (vm_paging_target() > 0) { 1110 if (vnodes_skipped && vm_page_count_min()) 1111 (void) speedup_syncer(); 1112#if !defined(NO_SWAPPING) 1113 if (vm_swap_enabled && vm_page_count_target()) { 1114 vm_req_vmdaemon(); 1115 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1116 } 1117#endif 1118 } 1119 1120 /* 1121 * make sure that we have swap space -- if we are low on memory and 1122 * swap -- then kill the biggest process. 1123 */ 1124 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1125 bigproc = NULL; 1126 bigsize = 0; 1127 sx_slock(&allproc_lock); 1128 LIST_FOREACH(p, &allproc, p_list) { 1129 /* 1130 * if this is a system process, skip it 1131 */ 1132 PROC_LOCK(p); 1133 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1134 (p->p_pid == 1) || 1135 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1136 PROC_UNLOCK(p); 1137 continue; 1138 } 1139 PROC_UNLOCK(p); 1140 /* 1141 * if the process is in a non-running type state, 1142 * don't touch it. 1143 */ 1144 mtx_lock_spin(&sched_lock); 1145 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1146 mtx_unlock_spin(&sched_lock); 1147 continue; 1148 } 1149 mtx_unlock_spin(&sched_lock); 1150 /* 1151 * get the process size 1152 */ 1153 size = vmspace_resident_count(p->p_vmspace); 1154 /* 1155 * if the this process is bigger than the biggest one 1156 * remember it. 1157 */ 1158 if (size > bigsize) { 1159 bigproc = p; 1160 bigsize = size; 1161 } 1162 } 1163 sx_sunlock(&allproc_lock); 1164 if (bigproc != NULL) { 1165 killproc(bigproc, "out of swap space"); 1166 mtx_lock_spin(&sched_lock); 1167 bigproc->p_estcpu = 0; 1168 bigproc->p_nice = PRIO_MIN; 1169 resetpriority(bigproc); 1170 mtx_unlock_spin(&sched_lock); 1171 wakeup(&cnt.v_free_count); 1172 } 1173 } 1174} 1175 1176/* 1177 * This routine tries to maintain the pseudo LRU active queue, 1178 * so that during long periods of time where there is no paging, 1179 * that some statistic accumulation still occurs. This code 1180 * helps the situation where paging just starts to occur. 1181 */ 1182static void 1183vm_pageout_page_stats() 1184{ 1185 int s; 1186 vm_page_t m,next; 1187 int pcount,tpcount; /* Number of pages to check */ 1188 static int fullintervalcount = 0; 1189 int page_shortage; 1190 int s0; 1191 1192 page_shortage = 1193 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1194 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1195 1196 if (page_shortage <= 0) 1197 return; 1198 1199 s0 = splvm(); 1200 1201 pcount = cnt.v_active_count; 1202 fullintervalcount += vm_pageout_stats_interval; 1203 if (fullintervalcount < vm_pageout_full_stats_interval) { 1204 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1205 if (pcount > tpcount) 1206 pcount = tpcount; 1207 } else { 1208 fullintervalcount = 0; 1209 } 1210 1211 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1212 while ((m != NULL) && (pcount-- > 0)) { 1213 int actcount; 1214 1215 if (m->queue != PQ_ACTIVE) { 1216 break; 1217 } 1218 1219 next = TAILQ_NEXT(m, pageq); 1220 /* 1221 * Don't deactivate pages that are busy. 1222 */ 1223 if ((m->busy != 0) || 1224 (m->flags & PG_BUSY) || 1225 (m->hold_count != 0)) { 1226 s = splvm(); 1227 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1228 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1229 splx(s); 1230 m = next; 1231 continue; 1232 } 1233 1234 actcount = 0; 1235 if (m->flags & PG_REFERENCED) { 1236 vm_page_flag_clear(m, PG_REFERENCED); 1237 actcount += 1; 1238 } 1239 1240 actcount += pmap_ts_referenced(m); 1241 if (actcount) { 1242 m->act_count += ACT_ADVANCE + actcount; 1243 if (m->act_count > ACT_MAX) 1244 m->act_count = ACT_MAX; 1245 s = splvm(); 1246 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1247 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1248 splx(s); 1249 } else { 1250 if (m->act_count == 0) { 1251 /* 1252 * We turn off page access, so that we have 1253 * more accurate RSS stats. We don't do this 1254 * in the normal page deactivation when the 1255 * system is loaded VM wise, because the 1256 * cost of the large number of page protect 1257 * operations would be higher than the value 1258 * of doing the operation. 1259 */ 1260 vm_page_protect(m, VM_PROT_NONE); 1261 vm_page_deactivate(m); 1262 } else { 1263 m->act_count -= min(m->act_count, ACT_DECLINE); 1264 s = splvm(); 1265 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1266 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1267 splx(s); 1268 } 1269 } 1270 1271 m = next; 1272 } 1273 splx(s0); 1274} 1275 1276static int 1277vm_pageout_free_page_calc(count) 1278vm_size_t count; 1279{ 1280 if (count < cnt.v_page_count) 1281 return 0; 1282 /* 1283 * free_reserved needs to include enough for the largest swap pager 1284 * structures plus enough for any pv_entry structs when paging. 1285 */ 1286 if (cnt.v_page_count > 1024) 1287 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1288 else 1289 cnt.v_free_min = 4; 1290 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1291 cnt.v_interrupt_free_min; 1292 cnt.v_free_reserved = vm_pageout_page_count + 1293 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1294 cnt.v_free_severe = cnt.v_free_min / 2; 1295 cnt.v_free_min += cnt.v_free_reserved; 1296 cnt.v_free_severe += cnt.v_free_reserved; 1297 return 1; 1298} 1299 1300 1301/* 1302 * vm_pageout is the high level pageout daemon. 1303 */ 1304static void 1305vm_pageout() 1306{ 1307 int pass; 1308 1309 mtx_lock(&Giant); 1310 1311 /* 1312 * Initialize some paging parameters. 1313 */ 1314 1315 cnt.v_interrupt_free_min = 2; 1316 if (cnt.v_page_count < 2000) 1317 vm_pageout_page_count = 8; 1318 1319 vm_pageout_free_page_calc(cnt.v_page_count); 1320 /* 1321 * v_free_target and v_cache_min control pageout hysteresis. Note 1322 * that these are more a measure of the VM cache queue hysteresis 1323 * then the VM free queue. Specifically, v_free_target is the 1324 * high water mark (free+cache pages). 1325 * 1326 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1327 * low water mark, while v_free_min is the stop. v_cache_min must 1328 * be big enough to handle memory needs while the pageout daemon 1329 * is signalled and run to free more pages. 1330 */ 1331 if (cnt.v_free_count > 6144) 1332 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1333 else 1334 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1335 1336 if (cnt.v_free_count > 2048) { 1337 cnt.v_cache_min = cnt.v_free_target; 1338 cnt.v_cache_max = 2 * cnt.v_cache_min; 1339 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1340 } else { 1341 cnt.v_cache_min = 0; 1342 cnt.v_cache_max = 0; 1343 cnt.v_inactive_target = cnt.v_free_count / 4; 1344 } 1345 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1346 cnt.v_inactive_target = cnt.v_free_count / 3; 1347 1348 /* XXX does not really belong here */ 1349 if (vm_page_max_wired == 0) 1350 vm_page_max_wired = cnt.v_free_count / 3; 1351 1352 if (vm_pageout_stats_max == 0) 1353 vm_pageout_stats_max = cnt.v_free_target; 1354 1355 /* 1356 * Set interval in seconds for stats scan. 1357 */ 1358 if (vm_pageout_stats_interval == 0) 1359 vm_pageout_stats_interval = 5; 1360 if (vm_pageout_full_stats_interval == 0) 1361 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1362 1363 1364 /* 1365 * Set maximum free per pass 1366 */ 1367 if (vm_pageout_stats_free_max == 0) 1368 vm_pageout_stats_free_max = 5; 1369 1370 curproc->p_flag |= P_BUFEXHAUST; 1371 swap_pager_swap_init(); 1372 pass = 0; 1373 /* 1374 * The pageout daemon is never done, so loop forever. 1375 */ 1376 while (TRUE) { 1377 int error; 1378 int s = splvm(); 1379 1380 /* 1381 * If we have enough free memory, wakeup waiters. Do 1382 * not clear vm_pages_needed until we reach our target, 1383 * otherwise we may be woken up over and over again and 1384 * waste a lot of cpu. 1385 */ 1386 if (vm_pages_needed && !vm_page_count_min()) { 1387 if (vm_paging_needed() <= 0) 1388 vm_pages_needed = 0; 1389 wakeup(&cnt.v_free_count); 1390 } 1391 if (vm_pages_needed) { 1392 /* 1393 * Still not done, take a second pass without waiting 1394 * (unlimited dirty cleaning), otherwise sleep a bit 1395 * and try again. 1396 */ 1397 ++pass; 1398 if (pass > 1) 1399 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1400 } else { 1401 /* 1402 * Good enough, sleep & handle stats. Prime the pass 1403 * for the next run. 1404 */ 1405 if (pass > 1) 1406 pass = 1; 1407 else 1408 pass = 0; 1409 error = tsleep(&vm_pages_needed, 1410 PVM, "psleep", vm_pageout_stats_interval * hz); 1411 if (error && !vm_pages_needed) { 1412 splx(s); 1413 pass = 0; 1414 vm_pageout_page_stats(); 1415 continue; 1416 } 1417 } 1418 1419 if (vm_pages_needed) 1420 cnt.v_pdwakeups++; 1421 splx(s); 1422 vm_pageout_scan(pass); 1423 vm_pageout_deficit = 0; 1424 } 1425} 1426 1427void 1428pagedaemon_wakeup() 1429{ 1430 if (!vm_pages_needed && curproc != pageproc) { 1431 vm_pages_needed++; 1432 wakeup(&vm_pages_needed); 1433 } 1434} 1435 1436#if !defined(NO_SWAPPING) 1437static void 1438vm_req_vmdaemon() 1439{ 1440 static int lastrun = 0; 1441 1442 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1443 wakeup(&vm_daemon_needed); 1444 lastrun = ticks; 1445 } 1446} 1447 1448static void 1449vm_daemon() 1450{ 1451 struct proc *p; 1452 1453 mtx_lock(&Giant); 1454 1455 while (TRUE) { 1456 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1457 if (vm_pageout_req_swapout) { 1458 swapout_procs(vm_pageout_req_swapout); 1459 vm_pageout_req_swapout = 0; 1460 } 1461 /* 1462 * scan the processes for exceeding their rlimits or if 1463 * process is swapped out -- deactivate pages 1464 */ 1465 1466 sx_slock(&allproc_lock); 1467 LIST_FOREACH(p, &allproc, p_list) { 1468 vm_pindex_t limit, size; 1469 1470 /* 1471 * if this is a system process or if we have already 1472 * looked at this process, skip it. 1473 */ 1474 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1475 continue; 1476 } 1477 /* 1478 * if the process is in a non-running type state, 1479 * don't touch it. 1480 */ 1481 mtx_lock_spin(&sched_lock); 1482 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1483 mtx_unlock_spin(&sched_lock); 1484 continue; 1485 } 1486 /* 1487 * get a limit 1488 */ 1489 limit = OFF_TO_IDX( 1490 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1491 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1492 1493 /* 1494 * let processes that are swapped out really be 1495 * swapped out set the limit to nothing (will force a 1496 * swap-out.) 1497 */ 1498 if ((p->p_sflag & PS_INMEM) == 0) 1499 limit = 0; /* XXX */ 1500 mtx_unlock_spin(&sched_lock); 1501 1502 size = vmspace_resident_count(p->p_vmspace); 1503 if (limit >= 0 && size >= limit) { 1504 vm_pageout_map_deactivate_pages( 1505 &p->p_vmspace->vm_map, limit); 1506 } 1507 } 1508 sx_sunlock(&allproc_lock); 1509 } 1510} 1511#endif 1512