vm_pageout.c revision 79242
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 79242 2001-07-04 19:00:13Z dillon $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/lock.h> 80#include <sys/mutex.h> 81#include <sys/proc.h> 82#include <sys/kthread.h> 83#include <sys/ktr.h> 84#include <sys/resourcevar.h> 85#include <sys/signalvar.h> 86#include <sys/vnode.h> 87#include <sys/vmmeter.h> 88#include <sys/sx.h> 89#include <sys/sysctl.h> 90 91#include <vm/vm.h> 92#include <vm/vm_param.h> 93#include <vm/vm_object.h> 94#include <vm/vm_page.h> 95#include <vm/vm_map.h> 96#include <vm/vm_pageout.h> 97#include <vm/vm_pager.h> 98#include <vm/vm_zone.h> 99#include <vm/swap_pager.h> 100#include <vm/vm_extern.h> 101 102#include <machine/mutex.h> 103 104/* 105 * System initialization 106 */ 107 108/* the kernel process "vm_pageout"*/ 109static void vm_pageout __P((void)); 110static int vm_pageout_clean __P((vm_page_t)); 111static void vm_pageout_scan __P((int pass)); 112static int vm_pageout_free_page_calc __P((vm_size_t count)); 113struct proc *pageproc; 114 115static struct kproc_desc page_kp = { 116 "pagedaemon", 117 vm_pageout, 118 &pageproc 119}; 120SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 121 122#if !defined(NO_SWAPPING) 123/* the kernel process "vm_daemon"*/ 124static void vm_daemon __P((void)); 125static struct proc *vmproc; 126 127static struct kproc_desc vm_kp = { 128 "vmdaemon", 129 vm_daemon, 130 &vmproc 131}; 132SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 133#endif 134 135 136int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 137int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 138int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 139 140#if !defined(NO_SWAPPING) 141static int vm_pageout_req_swapout; /* XXX */ 142static int vm_daemon_needed; 143#endif 144extern int vm_swap_size; 145static int vm_max_launder = 32; 146static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 147static int vm_pageout_full_stats_interval = 0; 148static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 149static int defer_swap_pageouts=0; 150static int disable_swap_pageouts=0; 151 152#if defined(NO_SWAPPING) 153static int vm_swap_enabled=0; 154static int vm_swap_idle_enabled=0; 155#else 156static int vm_swap_enabled=1; 157static int vm_swap_idle_enabled=0; 158#endif 159 160SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 161 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 162 163SYSCTL_INT(_vm, OID_AUTO, max_launder, 164 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 165 166SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 167 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 170 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 171 172SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 173 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 174 175SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 176 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 177 178#if defined(NO_SWAPPING) 179SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 180 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 181SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 183#else 184SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 186SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188#endif 189 190SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 192 193SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 195 196#define VM_PAGEOUT_PAGE_COUNT 16 197int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 198 199int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 200 201#if !defined(NO_SWAPPING) 202typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 203static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 204static freeer_fcn_t vm_pageout_object_deactivate_pages; 205static void vm_req_vmdaemon __P((void)); 206#endif 207static void vm_pageout_page_stats(void); 208 209/* 210 * vm_pageout_clean: 211 * 212 * Clean the page and remove it from the laundry. 213 * 214 * We set the busy bit to cause potential page faults on this page to 215 * block. Note the careful timing, however, the busy bit isn't set till 216 * late and we cannot do anything that will mess with the page. 217 */ 218 219static int 220vm_pageout_clean(m) 221 vm_page_t m; 222{ 223 vm_object_t object; 224 vm_page_t mc[2*vm_pageout_page_count]; 225 int pageout_count; 226 int ib, is, page_base; 227 vm_pindex_t pindex = m->pindex; 228 229 GIANT_REQUIRED; 230 231 object = m->object; 232 233 /* 234 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 235 * with the new swapper, but we could have serious problems paging 236 * out other object types if there is insufficient memory. 237 * 238 * Unfortunately, checking free memory here is far too late, so the 239 * check has been moved up a procedural level. 240 */ 241 242 /* 243 * Don't mess with the page if it's busy, held, or special 244 */ 245 if ((m->hold_count != 0) || 246 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 247 return 0; 248 } 249 250 mc[vm_pageout_page_count] = m; 251 pageout_count = 1; 252 page_base = vm_pageout_page_count; 253 ib = 1; 254 is = 1; 255 256 /* 257 * Scan object for clusterable pages. 258 * 259 * We can cluster ONLY if: ->> the page is NOT 260 * clean, wired, busy, held, or mapped into a 261 * buffer, and one of the following: 262 * 1) The page is inactive, or a seldom used 263 * active page. 264 * -or- 265 * 2) we force the issue. 266 * 267 * During heavy mmap/modification loads the pageout 268 * daemon can really fragment the underlying file 269 * due to flushing pages out of order and not trying 270 * align the clusters (which leave sporatic out-of-order 271 * holes). To solve this problem we do the reverse scan 272 * first and attempt to align our cluster, then do a 273 * forward scan if room remains. 274 */ 275 276more: 277 while (ib && pageout_count < vm_pageout_page_count) { 278 vm_page_t p; 279 280 if (ib > pindex) { 281 ib = 0; 282 break; 283 } 284 285 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 286 ib = 0; 287 break; 288 } 289 if (((p->queue - p->pc) == PQ_CACHE) || 290 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 291 ib = 0; 292 break; 293 } 294 vm_page_test_dirty(p); 295 if ((p->dirty & p->valid) == 0 || 296 p->queue != PQ_INACTIVE || 297 p->wire_count != 0 || 298 p->hold_count != 0) { 299 ib = 0; 300 break; 301 } 302 mc[--page_base] = p; 303 ++pageout_count; 304 ++ib; 305 /* 306 * alignment boundry, stop here and switch directions. Do 307 * not clear ib. 308 */ 309 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 310 break; 311 } 312 313 while (pageout_count < vm_pageout_page_count && 314 pindex + is < object->size) { 315 vm_page_t p; 316 317 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 318 break; 319 if (((p->queue - p->pc) == PQ_CACHE) || 320 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 321 break; 322 } 323 vm_page_test_dirty(p); 324 if ((p->dirty & p->valid) == 0 || 325 p->queue != PQ_INACTIVE || 326 p->wire_count != 0 || 327 p->hold_count != 0) { 328 break; 329 } 330 mc[page_base + pageout_count] = p; 331 ++pageout_count; 332 ++is; 333 } 334 335 /* 336 * If we exhausted our forward scan, continue with the reverse scan 337 * when possible, even past a page boundry. This catches boundry 338 * conditions. 339 */ 340 if (ib && pageout_count < vm_pageout_page_count) 341 goto more; 342 343 /* 344 * we allow reads during pageouts... 345 */ 346 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 347} 348 349/* 350 * vm_pageout_flush() - launder the given pages 351 * 352 * The given pages are laundered. Note that we setup for the start of 353 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 354 * reference count all in here rather then in the parent. If we want 355 * the parent to do more sophisticated things we may have to change 356 * the ordering. 357 */ 358 359int 360vm_pageout_flush(mc, count, flags) 361 vm_page_t *mc; 362 int count; 363 int flags; 364{ 365 vm_object_t object; 366 int pageout_status[count]; 367 int numpagedout = 0; 368 int i; 369 370 GIANT_REQUIRED; 371 /* 372 * Initiate I/O. Bump the vm_page_t->busy counter and 373 * mark the pages read-only. 374 * 375 * We do not have to fixup the clean/dirty bits here... we can 376 * allow the pager to do it after the I/O completes. 377 * 378 * NOTE! mc[i]->dirty may be partial or fragmented due to an 379 * edge case with file fragments. 380 */ 381 382 for (i = 0; i < count; i++) { 383 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 384 vm_page_io_start(mc[i]); 385 vm_page_protect(mc[i], VM_PROT_READ); 386 } 387 388 object = mc[0]->object; 389 vm_object_pip_add(object, count); 390 391 vm_pager_put_pages(object, mc, count, 392 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 393 pageout_status); 394 395 for (i = 0; i < count; i++) { 396 vm_page_t mt = mc[i]; 397 398 switch (pageout_status[i]) { 399 case VM_PAGER_OK: 400 numpagedout++; 401 break; 402 case VM_PAGER_PEND: 403 numpagedout++; 404 break; 405 case VM_PAGER_BAD: 406 /* 407 * Page outside of range of object. Right now we 408 * essentially lose the changes by pretending it 409 * worked. 410 */ 411 pmap_clear_modify(mt); 412 vm_page_undirty(mt); 413 break; 414 case VM_PAGER_ERROR: 415 case VM_PAGER_FAIL: 416 /* 417 * If page couldn't be paged out, then reactivate the 418 * page so it doesn't clog the inactive list. (We 419 * will try paging out it again later). 420 */ 421 vm_page_activate(mt); 422 break; 423 case VM_PAGER_AGAIN: 424 break; 425 } 426 427 /* 428 * If the operation is still going, leave the page busy to 429 * block all other accesses. Also, leave the paging in 430 * progress indicator set so that we don't attempt an object 431 * collapse. 432 */ 433 if (pageout_status[i] != VM_PAGER_PEND) { 434 vm_object_pip_wakeup(object); 435 vm_page_io_finish(mt); 436 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 437 vm_page_protect(mt, VM_PROT_READ); 438 } 439 } 440 return numpagedout; 441} 442 443#if !defined(NO_SWAPPING) 444/* 445 * vm_pageout_object_deactivate_pages 446 * 447 * deactivate enough pages to satisfy the inactive target 448 * requirements or if vm_page_proc_limit is set, then 449 * deactivate all of the pages in the object and its 450 * backing_objects. 451 * 452 * The object and map must be locked. 453 */ 454static void 455vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 456 vm_map_t map; 457 vm_object_t object; 458 vm_pindex_t desired; 459 int map_remove_only; 460{ 461 vm_page_t p, next; 462 int rcount; 463 int remove_mode; 464 int s; 465 466 GIANT_REQUIRED; 467 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 468 return; 469 470 while (object) { 471 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 472 return; 473 if (object->paging_in_progress) 474 return; 475 476 remove_mode = map_remove_only; 477 if (object->shadow_count > 1) 478 remove_mode = 1; 479 /* 480 * scan the objects entire memory queue 481 */ 482 rcount = object->resident_page_count; 483 p = TAILQ_FIRST(&object->memq); 484 while (p && (rcount-- > 0)) { 485 int actcount; 486 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 487 return; 488 next = TAILQ_NEXT(p, listq); 489 cnt.v_pdpages++; 490 if (p->wire_count != 0 || 491 p->hold_count != 0 || 492 p->busy != 0 || 493 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 494 !pmap_page_exists(vm_map_pmap(map), p)) { 495 p = next; 496 continue; 497 } 498 499 actcount = pmap_ts_referenced(p); 500 if (actcount) { 501 vm_page_flag_set(p, PG_REFERENCED); 502 } else if (p->flags & PG_REFERENCED) { 503 actcount = 1; 504 } 505 506 if ((p->queue != PQ_ACTIVE) && 507 (p->flags & PG_REFERENCED)) { 508 vm_page_activate(p); 509 p->act_count += actcount; 510 vm_page_flag_clear(p, PG_REFERENCED); 511 } else if (p->queue == PQ_ACTIVE) { 512 if ((p->flags & PG_REFERENCED) == 0) { 513 p->act_count -= min(p->act_count, ACT_DECLINE); 514 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 515 vm_page_protect(p, VM_PROT_NONE); 516 vm_page_deactivate(p); 517 } else { 518 s = splvm(); 519 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 520 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 521 splx(s); 522 } 523 } else { 524 vm_page_activate(p); 525 vm_page_flag_clear(p, PG_REFERENCED); 526 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 527 p->act_count += ACT_ADVANCE; 528 s = splvm(); 529 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 530 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq); 531 splx(s); 532 } 533 } else if (p->queue == PQ_INACTIVE) { 534 vm_page_protect(p, VM_PROT_NONE); 535 } 536 p = next; 537 } 538 object = object->backing_object; 539 } 540 return; 541} 542 543/* 544 * deactivate some number of pages in a map, try to do it fairly, but 545 * that is really hard to do. 546 */ 547static void 548vm_pageout_map_deactivate_pages(map, desired) 549 vm_map_t map; 550 vm_pindex_t desired; 551{ 552 vm_map_entry_t tmpe; 553 vm_object_t obj, bigobj; 554 555 GIANT_REQUIRED; 556 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 557 return; 558 } 559 560 bigobj = NULL; 561 562 /* 563 * first, search out the biggest object, and try to free pages from 564 * that. 565 */ 566 tmpe = map->header.next; 567 while (tmpe != &map->header) { 568 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 569 obj = tmpe->object.vm_object; 570 if ((obj != NULL) && (obj->shadow_count <= 1) && 571 ((bigobj == NULL) || 572 (bigobj->resident_page_count < obj->resident_page_count))) { 573 bigobj = obj; 574 } 575 } 576 tmpe = tmpe->next; 577 } 578 579 if (bigobj) 580 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 581 582 /* 583 * Next, hunt around for other pages to deactivate. We actually 584 * do this search sort of wrong -- .text first is not the best idea. 585 */ 586 tmpe = map->header.next; 587 while (tmpe != &map->header) { 588 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 589 break; 590 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 591 obj = tmpe->object.vm_object; 592 if (obj) 593 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 594 } 595 tmpe = tmpe->next; 596 }; 597 598 /* 599 * Remove all mappings if a process is swapped out, this will free page 600 * table pages. 601 */ 602 if (desired == 0) 603 pmap_remove(vm_map_pmap(map), 604 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 605 vm_map_unlock(map); 606 return; 607} 608#endif 609 610/* 611 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 612 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 613 * which we know can be trivially freed. 614 */ 615 616void 617vm_pageout_page_free(vm_page_t m) { 618 vm_object_t object = m->object; 619 int type = object->type; 620 621 GIANT_REQUIRED; 622 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 623 vm_object_reference(object); 624 vm_page_busy(m); 625 vm_page_protect(m, VM_PROT_NONE); 626 vm_page_free(m); 627 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 628 vm_object_deallocate(object); 629} 630 631/* 632 * vm_pageout_scan does the dirty work for the pageout daemon. 633 */ 634static void 635vm_pageout_scan(int pass) 636{ 637 vm_page_t m, next; 638 struct vm_page marker; 639 int save_page_shortage; 640 int save_inactive_count; 641 int page_shortage, maxscan, pcount; 642 int addl_page_shortage, addl_page_shortage_init; 643 struct proc *p, *bigproc; 644 vm_offset_t size, bigsize; 645 vm_object_t object; 646 int actcount; 647 int vnodes_skipped = 0; 648 int maxlaunder; 649 int s; 650 651 GIANT_REQUIRED; 652 /* 653 * Do whatever cleanup that the pmap code can. 654 */ 655 pmap_collect(); 656 657 addl_page_shortage_init = vm_pageout_deficit; 658 vm_pageout_deficit = 0; 659 660 /* 661 * Calculate the number of pages we want to either free or move 662 * to the cache. 663 */ 664 page_shortage = vm_paging_target() + addl_page_shortage_init; 665 save_page_shortage = page_shortage; 666 save_inactive_count = cnt.v_inactive_count; 667 668 /* 669 * Initialize our marker 670 */ 671 bzero(&marker, sizeof(marker)); 672 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 673 marker.queue = PQ_INACTIVE; 674 marker.wire_count = 1; 675 676 /* 677 * Start scanning the inactive queue for pages we can move to the 678 * cache or free. The scan will stop when the target is reached or 679 * we have scanned the entire inactive queue. Note that m->act_count 680 * is not used to form decisions for the inactive queue, only for the 681 * active queue. 682 * 683 * maxlaunder limits the number of dirty pages we flush per scan. 684 * For most systems a smaller value (16 or 32) is more robust under 685 * extreme memory and disk pressure because any unnecessary writes 686 * to disk can result in extreme performance degredation. However, 687 * systems with excessive dirty pages (especially when MAP_NOSYNC is 688 * used) will die horribly with limited laundering. If the pageout 689 * daemon cannot clean enough pages in the first pass, we let it go 690 * all out in succeeding passes. 691 */ 692 693 if ((maxlaunder = vm_max_launder) <= 1) 694 maxlaunder = 1; 695 if (pass) 696 maxlaunder = 10000; 697 698rescan0: 699 addl_page_shortage = addl_page_shortage_init; 700 maxscan = cnt.v_inactive_count; 701 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 702 m != NULL && maxscan-- > 0 && page_shortage > 0; 703 m = next) { 704 705 cnt.v_pdpages++; 706 707 if (m->queue != PQ_INACTIVE) { 708 goto rescan0; 709 } 710 711 next = TAILQ_NEXT(m, pageq); 712 713 /* 714 * skip marker pages 715 */ 716 if (m->flags & PG_MARKER) 717 continue; 718 719 if (m->hold_count) { 720 s = splvm(); 721 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 722 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 723 splx(s); 724 addl_page_shortage++; 725 continue; 726 } 727 /* 728 * Dont mess with busy pages, keep in the front of the 729 * queue, most likely are being paged out. 730 */ 731 if (m->busy || (m->flags & PG_BUSY)) { 732 addl_page_shortage++; 733 continue; 734 } 735 736 /* 737 * If the object is not being used, we ignore previous 738 * references. 739 */ 740 if (m->object->ref_count == 0) { 741 vm_page_flag_clear(m, PG_REFERENCED); 742 pmap_clear_reference(m); 743 744 /* 745 * Otherwise, if the page has been referenced while in the 746 * inactive queue, we bump the "activation count" upwards, 747 * making it less likely that the page will be added back to 748 * the inactive queue prematurely again. Here we check the 749 * page tables (or emulated bits, if any), given the upper 750 * level VM system not knowing anything about existing 751 * references. 752 */ 753 } else if (((m->flags & PG_REFERENCED) == 0) && 754 (actcount = pmap_ts_referenced(m))) { 755 vm_page_activate(m); 756 m->act_count += (actcount + ACT_ADVANCE); 757 continue; 758 } 759 760 /* 761 * If the upper level VM system knows about any page 762 * references, we activate the page. We also set the 763 * "activation count" higher than normal so that we will less 764 * likely place pages back onto the inactive queue again. 765 */ 766 if ((m->flags & PG_REFERENCED) != 0) { 767 vm_page_flag_clear(m, PG_REFERENCED); 768 actcount = pmap_ts_referenced(m); 769 vm_page_activate(m); 770 m->act_count += (actcount + ACT_ADVANCE + 1); 771 continue; 772 } 773 774 /* 775 * If the upper level VM system doesn't know anything about 776 * the page being dirty, we have to check for it again. As 777 * far as the VM code knows, any partially dirty pages are 778 * fully dirty. 779 */ 780 if (m->dirty == 0) { 781 vm_page_test_dirty(m); 782 } else { 783 vm_page_dirty(m); 784 } 785 786 /* 787 * Invalid pages can be easily freed 788 */ 789 if (m->valid == 0) { 790 vm_pageout_page_free(m); 791 cnt.v_dfree++; 792 --page_shortage; 793 794 /* 795 * Clean pages can be placed onto the cache queue. This 796 * effectively frees them. 797 */ 798 } else if (m->dirty == 0) { 799 vm_page_cache(m); 800 --page_shortage; 801 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 802 /* 803 * Dirty pages need to be paged out, but flushing 804 * a page is extremely expensive verses freeing 805 * a clean page. Rather then artificially limiting 806 * the number of pages we can flush, we instead give 807 * dirty pages extra priority on the inactive queue 808 * by forcing them to be cycled through the queue 809 * twice before being flushed, after which the 810 * (now clean) page will cycle through once more 811 * before being freed. This significantly extends 812 * the thrash point for a heavily loaded machine. 813 */ 814 s = splvm(); 815 vm_page_flag_set(m, PG_WINATCFLS); 816 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 817 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 818 splx(s); 819 } else if (maxlaunder > 0) { 820 /* 821 * We always want to try to flush some dirty pages if 822 * we encounter them, to keep the system stable. 823 * Normally this number is small, but under extreme 824 * pressure where there are insufficient clean pages 825 * on the inactive queue, we may have to go all out. 826 */ 827 int swap_pageouts_ok; 828 struct vnode *vp = NULL; 829 struct mount *mp; 830 831 object = m->object; 832 833 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 834 swap_pageouts_ok = 1; 835 } else { 836 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 837 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 838 vm_page_count_min()); 839 840 } 841 842 /* 843 * We don't bother paging objects that are "dead". 844 * Those objects are in a "rundown" state. 845 */ 846 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 847 s = splvm(); 848 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 849 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 850 splx(s); 851 continue; 852 } 853 854 /* 855 * The object is already known NOT to be dead. It 856 * is possible for the vget() to block the whole 857 * pageout daemon, but the new low-memory handling 858 * code should prevent it. 859 * 860 * The previous code skipped locked vnodes and, worse, 861 * reordered pages in the queue. This results in 862 * completely non-deterministic operation and, on a 863 * busy system, can lead to extremely non-optimal 864 * pageouts. For example, it can cause clean pages 865 * to be freed and dirty pages to be moved to the end 866 * of the queue. Since dirty pages are also moved to 867 * the end of the queue once-cleaned, this gives 868 * way too large a weighting to defering the freeing 869 * of dirty pages. 870 * 871 * XXX we need to be able to apply a timeout to the 872 * vget() lock attempt. 873 */ 874 875 if (object->type == OBJT_VNODE) { 876 vp = object->handle; 877 878 mp = NULL; 879 if (vp->v_type == VREG) 880 vn_start_write(vp, &mp, V_NOWAIT); 881 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 882 vn_finished_write(mp); 883 if (object->flags & OBJ_MIGHTBEDIRTY) 884 vnodes_skipped++; 885 continue; 886 } 887 888 /* 889 * The page might have been moved to another 890 * queue during potential blocking in vget() 891 * above. The page might have been freed and 892 * reused for another vnode. The object might 893 * have been reused for another vnode. 894 */ 895 if (m->queue != PQ_INACTIVE || 896 m->object != object || 897 object->handle != vp) { 898 if (object->flags & OBJ_MIGHTBEDIRTY) 899 vnodes_skipped++; 900 vput(vp); 901 vn_finished_write(mp); 902 continue; 903 } 904 905 /* 906 * The page may have been busied during the 907 * blocking in vput(); We don't move the 908 * page back onto the end of the queue so that 909 * statistics are more correct if we don't. 910 */ 911 if (m->busy || (m->flags & PG_BUSY)) { 912 vput(vp); 913 vn_finished_write(mp); 914 continue; 915 } 916 917 /* 918 * If the page has become held, then skip it 919 */ 920 if (m->hold_count) { 921 s = splvm(); 922 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 923 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); 924 splx(s); 925 if (object->flags & OBJ_MIGHTBEDIRTY) 926 vnodes_skipped++; 927 vput(vp); 928 vn_finished_write(mp); 929 continue; 930 } 931 } 932 933 /* 934 * If a page is dirty, then it is either being washed 935 * (but not yet cleaned) or it is still in the 936 * laundry. If it is still in the laundry, then we 937 * start the cleaning operation. 938 * 939 * This operation may cluster, invalidating the 'next' 940 * pointer. To prevent an inordinate number of 941 * restarts we use our marker to remember our place. 942 * 943 * decrement page_shortage on success to account for 944 * the (future) cleaned page. Otherwise we could wind 945 * up laundering or cleaning too many pages. 946 */ 947 s = splvm(); 948 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 949 splx(s); 950 if (vm_pageout_clean(m) != 0) { 951 --page_shortage; 952 --maxlaunder; 953 } 954 s = splvm(); 955 next = TAILQ_NEXT(&marker, pageq); 956 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 957 splx(s); 958 if (vp) { 959 vput(vp); 960 vn_finished_write(mp); 961 } 962 } 963 } 964 965 /* 966 * Compute the number of pages we want to try to move from the 967 * active queue to the inactive queue. 968 */ 969 page_shortage = vm_paging_target() + 970 cnt.v_inactive_target - cnt.v_inactive_count; 971 page_shortage += addl_page_shortage; 972 973 /* 974 * Scan the active queue for things we can deactivate. We nominally 975 * track the per-page activity counter and use it to locate 976 * deactivation candidates. 977 */ 978 979 pcount = cnt.v_active_count; 980 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 981 982 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 983 984 /* 985 * This is a consistency check, and should likely be a panic 986 * or warning. 987 */ 988 if (m->queue != PQ_ACTIVE) { 989 break; 990 } 991 992 next = TAILQ_NEXT(m, pageq); 993 /* 994 * Don't deactivate pages that are busy. 995 */ 996 if ((m->busy != 0) || 997 (m->flags & PG_BUSY) || 998 (m->hold_count != 0)) { 999 s = splvm(); 1000 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1001 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1002 splx(s); 1003 m = next; 1004 continue; 1005 } 1006 1007 /* 1008 * The count for pagedaemon pages is done after checking the 1009 * page for eligibility... 1010 */ 1011 cnt.v_pdpages++; 1012 1013 /* 1014 * Check to see "how much" the page has been used. 1015 */ 1016 actcount = 0; 1017 if (m->object->ref_count != 0) { 1018 if (m->flags & PG_REFERENCED) { 1019 actcount += 1; 1020 } 1021 actcount += pmap_ts_referenced(m); 1022 if (actcount) { 1023 m->act_count += ACT_ADVANCE + actcount; 1024 if (m->act_count > ACT_MAX) 1025 m->act_count = ACT_MAX; 1026 } 1027 } 1028 1029 /* 1030 * Since we have "tested" this bit, we need to clear it now. 1031 */ 1032 vm_page_flag_clear(m, PG_REFERENCED); 1033 1034 /* 1035 * Only if an object is currently being used, do we use the 1036 * page activation count stats. 1037 */ 1038 if (actcount && (m->object->ref_count != 0)) { 1039 s = splvm(); 1040 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1041 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1042 splx(s); 1043 } else { 1044 m->act_count -= min(m->act_count, ACT_DECLINE); 1045 if (vm_pageout_algorithm || 1046 m->object->ref_count == 0 || 1047 m->act_count == 0) { 1048 page_shortage--; 1049 if (m->object->ref_count == 0) { 1050 vm_page_protect(m, VM_PROT_NONE); 1051 if (m->dirty == 0) 1052 vm_page_cache(m); 1053 else 1054 vm_page_deactivate(m); 1055 } else { 1056 vm_page_deactivate(m); 1057 } 1058 } else { 1059 s = splvm(); 1060 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1061 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1062 splx(s); 1063 } 1064 } 1065 m = next; 1066 } 1067 1068 s = splvm(); 1069 1070 /* 1071 * We try to maintain some *really* free pages, this allows interrupt 1072 * code to be guaranteed space. Since both cache and free queues 1073 * are considered basically 'free', moving pages from cache to free 1074 * does not effect other calculations. 1075 */ 1076 1077 while (cnt.v_free_count < cnt.v_free_reserved) { 1078 static int cache_rover = 0; 1079 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1080 if (!m) 1081 break; 1082 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1083 m->busy || 1084 m->hold_count || 1085 m->wire_count) { 1086#ifdef INVARIANTS 1087 printf("Warning: busy page %p found in cache\n", m); 1088#endif 1089 vm_page_deactivate(m); 1090 continue; 1091 } 1092 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1093 vm_pageout_page_free(m); 1094 cnt.v_dfree++; 1095 } 1096 splx(s); 1097 1098#if !defined(NO_SWAPPING) 1099 /* 1100 * Idle process swapout -- run once per second. 1101 */ 1102 if (vm_swap_idle_enabled) { 1103 static long lsec; 1104 if (time_second != lsec) { 1105 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1106 vm_req_vmdaemon(); 1107 lsec = time_second; 1108 } 1109 } 1110#endif 1111 1112 /* 1113 * If we didn't get enough free pages, and we have skipped a vnode 1114 * in a writeable object, wakeup the sync daemon. And kick swapout 1115 * if we did not get enough free pages. 1116 */ 1117 if (vm_paging_target() > 0) { 1118 if (vnodes_skipped && vm_page_count_min()) 1119 (void) speedup_syncer(); 1120#if !defined(NO_SWAPPING) 1121 if (vm_swap_enabled && vm_page_count_target()) { 1122 vm_req_vmdaemon(); 1123 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1124 } 1125#endif 1126 } 1127 1128 /* 1129 * If we are out of swap and were not able to reach our paging 1130 * target, kill the largest process. 1131 * 1132 * We keep the process bigproc locked once we find it to keep anyone 1133 * from messing with it; however, there is a possibility of 1134 * deadlock if process B is bigproc and one of it's child processes 1135 * attempts to propagate a signal to B while we are waiting for A's 1136 * lock while walking this list. To avoid this, we don't block on 1137 * the process lock but just skip a process if it is already locked. 1138 */ 1139 if ((vm_swap_size < 64 && vm_page_count_min()) || 1140 (swap_pager_full && vm_paging_target() > 0)) { 1141#if 0 1142 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1143#endif 1144 bigproc = NULL; 1145 bigsize = 0; 1146 sx_slock(&allproc_lock); 1147 LIST_FOREACH(p, &allproc, p_list) { 1148 /* 1149 * If this process is already locked, skip it. 1150 */ 1151 if (PROC_TRYLOCK(p) == 0) 1152 continue; 1153 /* 1154 * if this is a system process, skip it 1155 */ 1156 if ((p->p_flag & P_SYSTEM) || (p->p_lock > 0) || 1157 (p->p_pid == 1) || 1158 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1159 PROC_UNLOCK(p); 1160 continue; 1161 } 1162 /* 1163 * if the process is in a non-running type state, 1164 * don't touch it. 1165 */ 1166 mtx_lock_spin(&sched_lock); 1167 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1168 mtx_unlock_spin(&sched_lock); 1169 PROC_UNLOCK(p); 1170 continue; 1171 } 1172 mtx_unlock_spin(&sched_lock); 1173 /* 1174 * get the process size 1175 */ 1176 size = vmspace_resident_count(p->p_vmspace) + 1177 vmspace_swap_count(p->p_vmspace); 1178 /* 1179 * if the this process is bigger than the biggest one 1180 * remember it. 1181 */ 1182 if (size > bigsize) { 1183 if (bigproc != NULL) 1184 PROC_UNLOCK(bigproc); 1185 bigproc = p; 1186 bigsize = size; 1187 } else 1188 PROC_UNLOCK(p); 1189 } 1190 sx_sunlock(&allproc_lock); 1191 if (bigproc != NULL) { 1192 killproc(bigproc, "out of swap space"); 1193 mtx_lock_spin(&sched_lock); 1194 bigproc->p_estcpu = 0; 1195 bigproc->p_nice = PRIO_MIN; 1196 resetpriority(bigproc); 1197 mtx_unlock_spin(&sched_lock); 1198 PROC_UNLOCK(bigproc); 1199 wakeup(&cnt.v_free_count); 1200 } 1201 } 1202} 1203 1204/* 1205 * This routine tries to maintain the pseudo LRU active queue, 1206 * so that during long periods of time where there is no paging, 1207 * that some statistic accumulation still occurs. This code 1208 * helps the situation where paging just starts to occur. 1209 */ 1210static void 1211vm_pageout_page_stats() 1212{ 1213 int s; 1214 vm_page_t m,next; 1215 int pcount,tpcount; /* Number of pages to check */ 1216 static int fullintervalcount = 0; 1217 int page_shortage; 1218 int s0; 1219 1220 page_shortage = 1221 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1222 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1223 1224 if (page_shortage <= 0) 1225 return; 1226 1227 s0 = splvm(); 1228 1229 pcount = cnt.v_active_count; 1230 fullintervalcount += vm_pageout_stats_interval; 1231 if (fullintervalcount < vm_pageout_full_stats_interval) { 1232 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1233 if (pcount > tpcount) 1234 pcount = tpcount; 1235 } else { 1236 fullintervalcount = 0; 1237 } 1238 1239 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1240 while ((m != NULL) && (pcount-- > 0)) { 1241 int actcount; 1242 1243 if (m->queue != PQ_ACTIVE) { 1244 break; 1245 } 1246 1247 next = TAILQ_NEXT(m, pageq); 1248 /* 1249 * Don't deactivate pages that are busy. 1250 */ 1251 if ((m->busy != 0) || 1252 (m->flags & PG_BUSY) || 1253 (m->hold_count != 0)) { 1254 s = splvm(); 1255 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1256 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1257 splx(s); 1258 m = next; 1259 continue; 1260 } 1261 1262 actcount = 0; 1263 if (m->flags & PG_REFERENCED) { 1264 vm_page_flag_clear(m, PG_REFERENCED); 1265 actcount += 1; 1266 } 1267 1268 actcount += pmap_ts_referenced(m); 1269 if (actcount) { 1270 m->act_count += ACT_ADVANCE + actcount; 1271 if (m->act_count > ACT_MAX) 1272 m->act_count = ACT_MAX; 1273 s = splvm(); 1274 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1275 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1276 splx(s); 1277 } else { 1278 if (m->act_count == 0) { 1279 /* 1280 * We turn off page access, so that we have 1281 * more accurate RSS stats. We don't do this 1282 * in the normal page deactivation when the 1283 * system is loaded VM wise, because the 1284 * cost of the large number of page protect 1285 * operations would be higher than the value 1286 * of doing the operation. 1287 */ 1288 vm_page_protect(m, VM_PROT_NONE); 1289 vm_page_deactivate(m); 1290 } else { 1291 m->act_count -= min(m->act_count, ACT_DECLINE); 1292 s = splvm(); 1293 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1294 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq); 1295 splx(s); 1296 } 1297 } 1298 1299 m = next; 1300 } 1301 splx(s0); 1302} 1303 1304static int 1305vm_pageout_free_page_calc(count) 1306vm_size_t count; 1307{ 1308 if (count < cnt.v_page_count) 1309 return 0; 1310 /* 1311 * free_reserved needs to include enough for the largest swap pager 1312 * structures plus enough for any pv_entry structs when paging. 1313 */ 1314 if (cnt.v_page_count > 1024) 1315 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1316 else 1317 cnt.v_free_min = 4; 1318 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1319 cnt.v_interrupt_free_min; 1320 cnt.v_free_reserved = vm_pageout_page_count + 1321 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1322 cnt.v_free_severe = cnt.v_free_min / 2; 1323 cnt.v_free_min += cnt.v_free_reserved; 1324 cnt.v_free_severe += cnt.v_free_reserved; 1325 return 1; 1326} 1327 1328 1329/* 1330 * vm_pageout is the high level pageout daemon. 1331 */ 1332static void 1333vm_pageout() 1334{ 1335 int pass; 1336 1337 mtx_lock(&Giant); 1338 1339 /* 1340 * Initialize some paging parameters. 1341 */ 1342 1343 cnt.v_interrupt_free_min = 2; 1344 if (cnt.v_page_count < 2000) 1345 vm_pageout_page_count = 8; 1346 1347 vm_pageout_free_page_calc(cnt.v_page_count); 1348 /* 1349 * v_free_target and v_cache_min control pageout hysteresis. Note 1350 * that these are more a measure of the VM cache queue hysteresis 1351 * then the VM free queue. Specifically, v_free_target is the 1352 * high water mark (free+cache pages). 1353 * 1354 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1355 * low water mark, while v_free_min is the stop. v_cache_min must 1356 * be big enough to handle memory needs while the pageout daemon 1357 * is signalled and run to free more pages. 1358 */ 1359 if (cnt.v_free_count > 6144) 1360 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1361 else 1362 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1363 1364 if (cnt.v_free_count > 2048) { 1365 cnt.v_cache_min = cnt.v_free_target; 1366 cnt.v_cache_max = 2 * cnt.v_cache_min; 1367 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1368 } else { 1369 cnt.v_cache_min = 0; 1370 cnt.v_cache_max = 0; 1371 cnt.v_inactive_target = cnt.v_free_count / 4; 1372 } 1373 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1374 cnt.v_inactive_target = cnt.v_free_count / 3; 1375 1376 /* XXX does not really belong here */ 1377 if (vm_page_max_wired == 0) 1378 vm_page_max_wired = cnt.v_free_count / 3; 1379 1380 if (vm_pageout_stats_max == 0) 1381 vm_pageout_stats_max = cnt.v_free_target; 1382 1383 /* 1384 * Set interval in seconds for stats scan. 1385 */ 1386 if (vm_pageout_stats_interval == 0) 1387 vm_pageout_stats_interval = 5; 1388 if (vm_pageout_full_stats_interval == 0) 1389 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1390 1391 1392 /* 1393 * Set maximum free per pass 1394 */ 1395 if (vm_pageout_stats_free_max == 0) 1396 vm_pageout_stats_free_max = 5; 1397 1398 PROC_LOCK(curproc); 1399 curproc->p_flag |= P_BUFEXHAUST; 1400 PROC_UNLOCK(curproc); 1401 swap_pager_swap_init(); 1402 pass = 0; 1403 /* 1404 * The pageout daemon is never done, so loop forever. 1405 */ 1406 while (TRUE) { 1407 int error; 1408 int s = splvm(); 1409 1410 /* 1411 * If we have enough free memory, wakeup waiters. Do 1412 * not clear vm_pages_needed until we reach our target, 1413 * otherwise we may be woken up over and over again and 1414 * waste a lot of cpu. 1415 */ 1416 if (vm_pages_needed && !vm_page_count_min()) { 1417 if (vm_paging_needed() <= 0) 1418 vm_pages_needed = 0; 1419 wakeup(&cnt.v_free_count); 1420 } 1421 if (vm_pages_needed) { 1422 /* 1423 * Still not done, take a second pass without waiting 1424 * (unlimited dirty cleaning), otherwise sleep a bit 1425 * and try again. 1426 */ 1427 ++pass; 1428 if (pass > 1) 1429 tsleep(&vm_pages_needed, PVM, 1430 "psleep", hz/2); 1431 } else { 1432 /* 1433 * Good enough, sleep & handle stats. Prime the pass 1434 * for the next run. 1435 */ 1436 if (pass > 1) 1437 pass = 1; 1438 else 1439 pass = 0; 1440 error = tsleep(&vm_pages_needed, PVM, 1441 "psleep", vm_pageout_stats_interval * hz); 1442 if (error && !vm_pages_needed) { 1443 splx(s); 1444 pass = 0; 1445 vm_pageout_page_stats(); 1446 continue; 1447 } 1448 } 1449 1450 if (vm_pages_needed) 1451 cnt.v_pdwakeups++; 1452 splx(s); 1453 vm_pageout_scan(pass); 1454 vm_pageout_deficit = 0; 1455 } 1456} 1457 1458void 1459pagedaemon_wakeup() 1460{ 1461 if (!vm_pages_needed && curproc != pageproc) { 1462 vm_pages_needed++; 1463 wakeup(&vm_pages_needed); 1464 } 1465} 1466 1467#if !defined(NO_SWAPPING) 1468static void 1469vm_req_vmdaemon() 1470{ 1471 static int lastrun = 0; 1472 1473 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1474 wakeup(&vm_daemon_needed); 1475 lastrun = ticks; 1476 } 1477} 1478 1479static void 1480vm_daemon() 1481{ 1482 struct proc *p; 1483 1484 mtx_lock(&Giant); 1485 while (TRUE) { 1486 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1487 if (vm_pageout_req_swapout) { 1488 swapout_procs(vm_pageout_req_swapout); 1489 vm_pageout_req_swapout = 0; 1490 } 1491 /* 1492 * scan the processes for exceeding their rlimits or if 1493 * process is swapped out -- deactivate pages 1494 */ 1495 1496 sx_slock(&allproc_lock); 1497 LIST_FOREACH(p, &allproc, p_list) { 1498 vm_pindex_t limit, size; 1499 1500 /* 1501 * if this is a system process or if we have already 1502 * looked at this process, skip it. 1503 */ 1504 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1505 continue; 1506 } 1507 /* 1508 * if the process is in a non-running type state, 1509 * don't touch it. 1510 */ 1511 mtx_lock_spin(&sched_lock); 1512 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1513 mtx_unlock_spin(&sched_lock); 1514 continue; 1515 } 1516 /* 1517 * get a limit 1518 */ 1519 limit = OFF_TO_IDX( 1520 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1521 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1522 1523 /* 1524 * let processes that are swapped out really be 1525 * swapped out set the limit to nothing (will force a 1526 * swap-out.) 1527 */ 1528 if ((p->p_sflag & PS_INMEM) == 0) 1529 limit = 0; /* XXX */ 1530 mtx_unlock_spin(&sched_lock); 1531 1532 size = vmspace_resident_count(p->p_vmspace); 1533 if (limit >= 0 && size >= limit) { 1534 vm_pageout_map_deactivate_pages( 1535 &p->p_vmspace->vm_map, limit); 1536 } 1537 } 1538 sx_sunlock(&allproc_lock); 1539 } 1540} 1541#endif 1542