vm_pageout.c revision 100740
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $FreeBSD: head/sys/vm/vm_pageout.c 100740 2002-07-27 06:41:03Z alc $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/lock.h> 80#include <sys/mutex.h> 81#include <sys/proc.h> 82#include <sys/kthread.h> 83#include <sys/ktr.h> 84#include <sys/resourcevar.h> 85#include <sys/signalvar.h> 86#include <sys/vnode.h> 87#include <sys/vmmeter.h> 88#include <sys/sx.h> 89#include <sys/sysctl.h> 90 91#include <vm/vm.h> 92#include <vm/vm_param.h> 93#include <vm/vm_object.h> 94#include <vm/vm_page.h> 95#include <vm/vm_map.h> 96#include <vm/vm_pageout.h> 97#include <vm/vm_pager.h> 98#include <vm/swap_pager.h> 99#include <vm/vm_extern.h> 100#include <vm/uma.h> 101 102#include <machine/mutex.h> 103 104/* 105 * System initialization 106 */ 107 108/* the kernel process "vm_pageout"*/ 109static void vm_pageout(void); 110static int vm_pageout_clean(vm_page_t); 111static void vm_pageout_scan(int pass); 112static int vm_pageout_free_page_calc(vm_size_t count); 113struct proc *pageproc; 114 115static struct kproc_desc page_kp = { 116 "pagedaemon", 117 vm_pageout, 118 &pageproc 119}; 120SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 121 122#if !defined(NO_SWAPPING) 123/* the kernel process "vm_daemon"*/ 124static void vm_daemon(void); 125static struct proc *vmproc; 126 127static struct kproc_desc vm_kp = { 128 "vmdaemon", 129 vm_daemon, 130 &vmproc 131}; 132SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 133#endif 134 135 136int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 137int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 138int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 139 140#if !defined(NO_SWAPPING) 141static int vm_pageout_req_swapout; /* XXX */ 142static int vm_daemon_needed; 143#endif 144extern int vm_swap_size; 145static int vm_max_launder = 32; 146static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 147static int vm_pageout_full_stats_interval = 0; 148static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0; 149static int defer_swap_pageouts=0; 150static int disable_swap_pageouts=0; 151 152#if defined(NO_SWAPPING) 153static int vm_swap_enabled=0; 154static int vm_swap_idle_enabled=0; 155#else 156static int vm_swap_enabled=1; 157static int vm_swap_idle_enabled=0; 158#endif 159 160SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 161 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt"); 162 163SYSCTL_INT(_vm, OID_AUTO, max_launder, 164 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout"); 165 166SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 167 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 170 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 171 172SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 173 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 174 175SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 176 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 177 178#if defined(NO_SWAPPING) 179SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 180 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 181SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 182 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 183#else 184SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 185 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 186SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 187 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 188#endif 189 190SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 191 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 192 193SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 194 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 195 196static int pageout_lock_miss; 197SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, 198 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); 199 200#define VM_PAGEOUT_PAGE_COUNT 16 201int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 202 203int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 204 205#if !defined(NO_SWAPPING) 206typedef void freeer_fcn_t(vm_map_t, vm_object_t, vm_pindex_t, int); 207static void vm_pageout_map_deactivate_pages(vm_map_t, vm_pindex_t); 208static freeer_fcn_t vm_pageout_object_deactivate_pages; 209static void vm_req_vmdaemon(void); 210#endif 211static void vm_pageout_page_stats(void); 212 213/* 214 * vm_pageout_clean: 215 * 216 * Clean the page and remove it from the laundry. 217 * 218 * We set the busy bit to cause potential page faults on this page to 219 * block. Note the careful timing, however, the busy bit isn't set till 220 * late and we cannot do anything that will mess with the page. 221 */ 222static int 223vm_pageout_clean(m) 224 vm_page_t m; 225{ 226 vm_object_t object; 227 vm_page_t mc[2*vm_pageout_page_count]; 228 int pageout_count; 229 int ib, is, page_base; 230 vm_pindex_t pindex = m->pindex; 231 232 GIANT_REQUIRED; 233 234 object = m->object; 235 236 /* 237 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 238 * with the new swapper, but we could have serious problems paging 239 * out other object types if there is insufficient memory. 240 * 241 * Unfortunately, checking free memory here is far too late, so the 242 * check has been moved up a procedural level. 243 */ 244 245 /* 246 * Don't mess with the page if it's busy, held, or special 247 */ 248 if ((m->hold_count != 0) || 249 ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { 250 return 0; 251 } 252 253 mc[vm_pageout_page_count] = m; 254 pageout_count = 1; 255 page_base = vm_pageout_page_count; 256 ib = 1; 257 is = 1; 258 259 /* 260 * Scan object for clusterable pages. 261 * 262 * We can cluster ONLY if: ->> the page is NOT 263 * clean, wired, busy, held, or mapped into a 264 * buffer, and one of the following: 265 * 1) The page is inactive, or a seldom used 266 * active page. 267 * -or- 268 * 2) we force the issue. 269 * 270 * During heavy mmap/modification loads the pageout 271 * daemon can really fragment the underlying file 272 * due to flushing pages out of order and not trying 273 * align the clusters (which leave sporatic out-of-order 274 * holes). To solve this problem we do the reverse scan 275 * first and attempt to align our cluster, then do a 276 * forward scan if room remains. 277 */ 278more: 279 while (ib && pageout_count < vm_pageout_page_count) { 280 vm_page_t p; 281 282 if (ib > pindex) { 283 ib = 0; 284 break; 285 } 286 287 if ((p = vm_page_lookup(object, pindex - ib)) == NULL) { 288 ib = 0; 289 break; 290 } 291 if (((p->queue - p->pc) == PQ_CACHE) || 292 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 293 ib = 0; 294 break; 295 } 296 vm_page_test_dirty(p); 297 if ((p->dirty & p->valid) == 0 || 298 p->queue != PQ_INACTIVE || 299 p->wire_count != 0 || /* may be held by buf cache */ 300 p->hold_count != 0) { /* may be undergoing I/O */ 301 ib = 0; 302 break; 303 } 304 mc[--page_base] = p; 305 ++pageout_count; 306 ++ib; 307 /* 308 * alignment boundry, stop here and switch directions. Do 309 * not clear ib. 310 */ 311 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0) 312 break; 313 } 314 315 while (pageout_count < vm_pageout_page_count && 316 pindex + is < object->size) { 317 vm_page_t p; 318 319 if ((p = vm_page_lookup(object, pindex + is)) == NULL) 320 break; 321 if (((p->queue - p->pc) == PQ_CACHE) || 322 (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { 323 break; 324 } 325 vm_page_test_dirty(p); 326 if ((p->dirty & p->valid) == 0 || 327 p->queue != PQ_INACTIVE || 328 p->wire_count != 0 || /* may be held by buf cache */ 329 p->hold_count != 0) { /* may be undergoing I/O */ 330 break; 331 } 332 mc[page_base + pageout_count] = p; 333 ++pageout_count; 334 ++is; 335 } 336 337 /* 338 * If we exhausted our forward scan, continue with the reverse scan 339 * when possible, even past a page boundry. This catches boundry 340 * conditions. 341 */ 342 if (ib && pageout_count < vm_pageout_page_count) 343 goto more; 344 345 /* 346 * we allow reads during pageouts... 347 */ 348 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 349} 350 351/* 352 * vm_pageout_flush() - launder the given pages 353 * 354 * The given pages are laundered. Note that we setup for the start of 355 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 356 * reference count all in here rather then in the parent. If we want 357 * the parent to do more sophisticated things we may have to change 358 * the ordering. 359 */ 360int 361vm_pageout_flush(mc, count, flags) 362 vm_page_t *mc; 363 int count; 364 int flags; 365{ 366 vm_object_t object; 367 int pageout_status[count]; 368 int numpagedout = 0; 369 int i; 370 371 GIANT_REQUIRED; 372 /* 373 * Initiate I/O. Bump the vm_page_t->busy counter and 374 * mark the pages read-only. 375 * 376 * We do not have to fixup the clean/dirty bits here... we can 377 * allow the pager to do it after the I/O completes. 378 * 379 * NOTE! mc[i]->dirty may be partial or fragmented due to an 380 * edge case with file fragments. 381 */ 382 for (i = 0; i < count; i++) { 383 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); 384 vm_page_io_start(mc[i]); 385 vm_page_protect(mc[i], VM_PROT_READ); 386 } 387 388 object = mc[0]->object; 389 vm_object_pip_add(object, count); 390 391 vm_pager_put_pages(object, mc, count, 392 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 393 pageout_status); 394 395 vm_page_lock_queues(); 396 for (i = 0; i < count; i++) { 397 vm_page_t mt = mc[i]; 398 399 switch (pageout_status[i]) { 400 case VM_PAGER_OK: 401 numpagedout++; 402 break; 403 case VM_PAGER_PEND: 404 numpagedout++; 405 break; 406 case VM_PAGER_BAD: 407 /* 408 * Page outside of range of object. Right now we 409 * essentially lose the changes by pretending it 410 * worked. 411 */ 412 pmap_clear_modify(mt); 413 vm_page_undirty(mt); 414 break; 415 case VM_PAGER_ERROR: 416 case VM_PAGER_FAIL: 417 /* 418 * If page couldn't be paged out, then reactivate the 419 * page so it doesn't clog the inactive list. (We 420 * will try paging out it again later). 421 */ 422 vm_page_activate(mt); 423 break; 424 case VM_PAGER_AGAIN: 425 break; 426 } 427 428 /* 429 * If the operation is still going, leave the page busy to 430 * block all other accesses. Also, leave the paging in 431 * progress indicator set so that we don't attempt an object 432 * collapse. 433 */ 434 if (pageout_status[i] != VM_PAGER_PEND) { 435 vm_object_pip_wakeup(object); 436 vm_page_io_finish(mt); 437 if (!vm_page_count_severe() || !vm_page_try_to_cache(mt)) 438 vm_page_protect(mt, VM_PROT_READ); 439 } 440 } 441 vm_page_unlock_queues(); 442 return numpagedout; 443} 444 445#if !defined(NO_SWAPPING) 446/* 447 * vm_pageout_object_deactivate_pages 448 * 449 * deactivate enough pages to satisfy the inactive target 450 * requirements or if vm_page_proc_limit is set, then 451 * deactivate all of the pages in the object and its 452 * backing_objects. 453 * 454 * The object and map must be locked. 455 */ 456static void 457vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 458 vm_map_t map; 459 vm_object_t object; 460 vm_pindex_t desired; 461 int map_remove_only; 462{ 463 vm_page_t p, next; 464 int actcount, rcount, remove_mode; 465 466 GIANT_REQUIRED; 467 if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) 468 return; 469 470 while (object) { 471 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 472 return; 473 if (object->paging_in_progress) 474 return; 475 476 remove_mode = map_remove_only; 477 if (object->shadow_count > 1) 478 remove_mode = 1; 479 /* 480 * scan the objects entire memory queue 481 */ 482 rcount = object->resident_page_count; 483 p = TAILQ_FIRST(&object->memq); 484 vm_page_lock_queues(); 485 while (p && (rcount-- > 0)) { 486 if (pmap_resident_count(map->pmap) <= desired) { 487 vm_page_unlock_queues(); 488 return; 489 } 490 next = TAILQ_NEXT(p, listq); 491 cnt.v_pdpages++; 492 if (p->wire_count != 0 || 493 p->hold_count != 0 || 494 p->busy != 0 || 495 (p->flags & (PG_BUSY|PG_UNMANAGED)) || 496 !pmap_page_exists_quick(vm_map_pmap(map), p)) { 497 p = next; 498 continue; 499 } 500 actcount = pmap_ts_referenced(p); 501 if (actcount) { 502 vm_page_flag_set(p, PG_REFERENCED); 503 } else if (p->flags & PG_REFERENCED) { 504 actcount = 1; 505 } 506 if ((p->queue != PQ_ACTIVE) && 507 (p->flags & PG_REFERENCED)) { 508 vm_page_activate(p); 509 p->act_count += actcount; 510 vm_page_flag_clear(p, PG_REFERENCED); 511 } else if (p->queue == PQ_ACTIVE) { 512 if ((p->flags & PG_REFERENCED) == 0) { 513 p->act_count -= min(p->act_count, ACT_DECLINE); 514 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { 515 vm_page_protect(p, VM_PROT_NONE); 516 vm_page_deactivate(p); 517 } else { 518 vm_pageq_requeue(p); 519 } 520 } else { 521 vm_page_activate(p); 522 vm_page_flag_clear(p, PG_REFERENCED); 523 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 524 p->act_count += ACT_ADVANCE; 525 vm_pageq_requeue(p); 526 } 527 } else if (p->queue == PQ_INACTIVE) { 528 vm_page_protect(p, VM_PROT_NONE); 529 } 530 p = next; 531 } 532 vm_page_unlock_queues(); 533 object = object->backing_object; 534 } 535} 536 537/* 538 * deactivate some number of pages in a map, try to do it fairly, but 539 * that is really hard to do. 540 */ 541static void 542vm_pageout_map_deactivate_pages(map, desired) 543 vm_map_t map; 544 vm_pindex_t desired; 545{ 546 vm_map_entry_t tmpe; 547 vm_object_t obj, bigobj; 548 int nothingwired; 549 550 GIANT_REQUIRED; 551 if (!vm_map_trylock(map)) 552 return; 553 554 bigobj = NULL; 555 nothingwired = TRUE; 556 557 /* 558 * first, search out the biggest object, and try to free pages from 559 * that. 560 */ 561 tmpe = map->header.next; 562 while (tmpe != &map->header) { 563 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 564 obj = tmpe->object.vm_object; 565 if ((obj != NULL) && (obj->shadow_count <= 1) && 566 ((bigobj == NULL) || 567 (bigobj->resident_page_count < obj->resident_page_count))) { 568 bigobj = obj; 569 } 570 } 571 if (tmpe->wired_count > 0) 572 nothingwired = FALSE; 573 tmpe = tmpe->next; 574 } 575 576 if (bigobj) 577 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 578 579 /* 580 * Next, hunt around for other pages to deactivate. We actually 581 * do this search sort of wrong -- .text first is not the best idea. 582 */ 583 tmpe = map->header.next; 584 while (tmpe != &map->header) { 585 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 586 break; 587 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 588 obj = tmpe->object.vm_object; 589 if (obj) 590 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 591 } 592 tmpe = tmpe->next; 593 }; 594 595 /* 596 * Remove all mappings if a process is swapped out, this will free page 597 * table pages. 598 */ 599 if (desired == 0 && nothingwired) 600 pmap_remove(vm_map_pmap(map), 601 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 602 vm_map_unlock(map); 603 return; 604} 605#endif /* !defined(NO_SWAPPING) */ 606 607/* 608 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 609 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 610 * which we know can be trivially freed. 611 */ 612void 613vm_pageout_page_free(vm_page_t m) { 614 vm_object_t object = m->object; 615 int type = object->type; 616 617 GIANT_REQUIRED; 618 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 619 vm_object_reference(object); 620 vm_page_busy(m); 621 vm_page_protect(m, VM_PROT_NONE); 622 vm_page_free(m); 623 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 624 vm_object_deallocate(object); 625} 626 627/* 628 * vm_pageout_scan does the dirty work for the pageout daemon. 629 */ 630static void 631vm_pageout_scan(int pass) 632{ 633 vm_page_t m, next; 634 struct vm_page marker; 635 int save_page_shortage; 636 int save_inactive_count; 637 int page_shortage, maxscan, pcount; 638 int addl_page_shortage, addl_page_shortage_init; 639 struct proc *p, *bigproc; 640 vm_offset_t size, bigsize; 641 vm_object_t object; 642 int actcount; 643 int vnodes_skipped = 0; 644 int maxlaunder; 645 int s; 646 struct thread *td; 647 648 GIANT_REQUIRED; 649 /* 650 * Do whatever cleanup that the pmap code can. 651 */ 652 pmap_collect(); 653 uma_reclaim(); 654 655 addl_page_shortage_init = vm_pageout_deficit; 656 vm_pageout_deficit = 0; 657 658 /* 659 * Calculate the number of pages we want to either free or move 660 * to the cache. 661 */ 662 page_shortage = vm_paging_target() + addl_page_shortage_init; 663 save_page_shortage = page_shortage; 664 save_inactive_count = cnt.v_inactive_count; 665 666 /* 667 * Initialize our marker 668 */ 669 bzero(&marker, sizeof(marker)); 670 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER; 671 marker.queue = PQ_INACTIVE; 672 marker.wire_count = 1; 673 674 /* 675 * Start scanning the inactive queue for pages we can move to the 676 * cache or free. The scan will stop when the target is reached or 677 * we have scanned the entire inactive queue. Note that m->act_count 678 * is not used to form decisions for the inactive queue, only for the 679 * active queue. 680 * 681 * maxlaunder limits the number of dirty pages we flush per scan. 682 * For most systems a smaller value (16 or 32) is more robust under 683 * extreme memory and disk pressure because any unnecessary writes 684 * to disk can result in extreme performance degredation. However, 685 * systems with excessive dirty pages (especially when MAP_NOSYNC is 686 * used) will die horribly with limited laundering. If the pageout 687 * daemon cannot clean enough pages in the first pass, we let it go 688 * all out in succeeding passes. 689 */ 690 if ((maxlaunder = vm_max_launder) <= 1) 691 maxlaunder = 1; 692 if (pass) 693 maxlaunder = 10000; 694rescan0: 695 addl_page_shortage = addl_page_shortage_init; 696 maxscan = cnt.v_inactive_count; 697 698 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); 699 m != NULL && maxscan-- > 0 && page_shortage > 0; 700 m = next) { 701 702 cnt.v_pdpages++; 703 704 if (m->queue != PQ_INACTIVE) { 705 goto rescan0; 706 } 707 708 next = TAILQ_NEXT(m, pageq); 709 710 /* 711 * skip marker pages 712 */ 713 if (m->flags & PG_MARKER) 714 continue; 715 716 /* 717 * A held page may be undergoing I/O, so skip it. 718 */ 719 if (m->hold_count) { 720 vm_pageq_requeue(m); 721 addl_page_shortage++; 722 continue; 723 } 724 /* 725 * Don't mess with busy pages, keep in the front of the 726 * queue, most likely are being paged out. 727 */ 728 if (m->busy || (m->flags & PG_BUSY)) { 729 addl_page_shortage++; 730 continue; 731 } 732 733 /* 734 * If the object is not being used, we ignore previous 735 * references. 736 */ 737 if (m->object->ref_count == 0) { 738 vm_page_flag_clear(m, PG_REFERENCED); 739 pmap_clear_reference(m); 740 741 /* 742 * Otherwise, if the page has been referenced while in the 743 * inactive queue, we bump the "activation count" upwards, 744 * making it less likely that the page will be added back to 745 * the inactive queue prematurely again. Here we check the 746 * page tables (or emulated bits, if any), given the upper 747 * level VM system not knowing anything about existing 748 * references. 749 */ 750 } else if (((m->flags & PG_REFERENCED) == 0) && 751 (actcount = pmap_ts_referenced(m))) { 752 vm_page_activate(m); 753 m->act_count += (actcount + ACT_ADVANCE); 754 continue; 755 } 756 757 /* 758 * If the upper level VM system knows about any page 759 * references, we activate the page. We also set the 760 * "activation count" higher than normal so that we will less 761 * likely place pages back onto the inactive queue again. 762 */ 763 if ((m->flags & PG_REFERENCED) != 0) { 764 vm_page_flag_clear(m, PG_REFERENCED); 765 actcount = pmap_ts_referenced(m); 766 vm_page_activate(m); 767 m->act_count += (actcount + ACT_ADVANCE + 1); 768 continue; 769 } 770 771 /* 772 * If the upper level VM system doesn't know anything about 773 * the page being dirty, we have to check for it again. As 774 * far as the VM code knows, any partially dirty pages are 775 * fully dirty. 776 */ 777 if (m->dirty == 0) { 778 vm_page_test_dirty(m); 779 } else { 780 vm_page_dirty(m); 781 } 782 783 /* 784 * Invalid pages can be easily freed 785 */ 786 if (m->valid == 0) { 787 vm_pageout_page_free(m); 788 cnt.v_dfree++; 789 --page_shortage; 790 791 /* 792 * Clean pages can be placed onto the cache queue. This 793 * effectively frees them. 794 */ 795 } else if (m->dirty == 0) { 796 vm_page_lock_queues(); 797 vm_page_cache(m); 798 vm_page_unlock_queues(); 799 --page_shortage; 800 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { 801 /* 802 * Dirty pages need to be paged out, but flushing 803 * a page is extremely expensive verses freeing 804 * a clean page. Rather then artificially limiting 805 * the number of pages we can flush, we instead give 806 * dirty pages extra priority on the inactive queue 807 * by forcing them to be cycled through the queue 808 * twice before being flushed, after which the 809 * (now clean) page will cycle through once more 810 * before being freed. This significantly extends 811 * the thrash point for a heavily loaded machine. 812 */ 813 vm_page_flag_set(m, PG_WINATCFLS); 814 vm_pageq_requeue(m); 815 } else if (maxlaunder > 0) { 816 /* 817 * We always want to try to flush some dirty pages if 818 * we encounter them, to keep the system stable. 819 * Normally this number is small, but under extreme 820 * pressure where there are insufficient clean pages 821 * on the inactive queue, we may have to go all out. 822 */ 823 int swap_pageouts_ok; 824 struct vnode *vp = NULL; 825 struct mount *mp; 826 827 object = m->object; 828 829 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 830 swap_pageouts_ok = 1; 831 } else { 832 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 833 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 834 vm_page_count_min()); 835 836 } 837 838 /* 839 * We don't bother paging objects that are "dead". 840 * Those objects are in a "rundown" state. 841 */ 842 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 843 vm_pageq_requeue(m); 844 continue; 845 } 846 847 /* 848 * The object is already known NOT to be dead. It 849 * is possible for the vget() to block the whole 850 * pageout daemon, but the new low-memory handling 851 * code should prevent it. 852 * 853 * The previous code skipped locked vnodes and, worse, 854 * reordered pages in the queue. This results in 855 * completely non-deterministic operation and, on a 856 * busy system, can lead to extremely non-optimal 857 * pageouts. For example, it can cause clean pages 858 * to be freed and dirty pages to be moved to the end 859 * of the queue. Since dirty pages are also moved to 860 * the end of the queue once-cleaned, this gives 861 * way too large a weighting to defering the freeing 862 * of dirty pages. 863 * 864 * We can't wait forever for the vnode lock, we might 865 * deadlock due to a vn_read() getting stuck in 866 * vm_wait while holding this vnode. We skip the 867 * vnode if we can't get it in a reasonable amount 868 * of time. 869 */ 870 if (object->type == OBJT_VNODE) { 871 vp = object->handle; 872 873 mp = NULL; 874 if (vp->v_type == VREG) 875 vn_start_write(vp, &mp, V_NOWAIT); 876 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ|LK_TIMELOCK, curthread)) { 877 ++pageout_lock_miss; 878 vn_finished_write(mp); 879 if (object->flags & OBJ_MIGHTBEDIRTY) 880 vnodes_skipped++; 881 continue; 882 } 883 884 /* 885 * The page might have been moved to another 886 * queue during potential blocking in vget() 887 * above. The page might have been freed and 888 * reused for another vnode. The object might 889 * have been reused for another vnode. 890 */ 891 if (m->queue != PQ_INACTIVE || 892 m->object != object || 893 object->handle != vp) { 894 if (object->flags & OBJ_MIGHTBEDIRTY) 895 vnodes_skipped++; 896 vput(vp); 897 vn_finished_write(mp); 898 continue; 899 } 900 901 /* 902 * The page may have been busied during the 903 * blocking in vput(); We don't move the 904 * page back onto the end of the queue so that 905 * statistics are more correct if we don't. 906 */ 907 if (m->busy || (m->flags & PG_BUSY)) { 908 vput(vp); 909 vn_finished_write(mp); 910 continue; 911 } 912 913 /* 914 * If the page has become held it might 915 * be undergoing I/O, so skip it 916 */ 917 if (m->hold_count) { 918 vm_pageq_requeue(m); 919 if (object->flags & OBJ_MIGHTBEDIRTY) 920 vnodes_skipped++; 921 vput(vp); 922 vn_finished_write(mp); 923 continue; 924 } 925 } 926 927 /* 928 * If a page is dirty, then it is either being washed 929 * (but not yet cleaned) or it is still in the 930 * laundry. If it is still in the laundry, then we 931 * start the cleaning operation. 932 * 933 * This operation may cluster, invalidating the 'next' 934 * pointer. To prevent an inordinate number of 935 * restarts we use our marker to remember our place. 936 * 937 * decrement page_shortage on success to account for 938 * the (future) cleaned page. Otherwise we could wind 939 * up laundering or cleaning too many pages. 940 */ 941 s = splvm(); 942 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq); 943 splx(s); 944 if (vm_pageout_clean(m) != 0) { 945 --page_shortage; 946 --maxlaunder; 947 } 948 s = splvm(); 949 next = TAILQ_NEXT(&marker, pageq); 950 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); 951 splx(s); 952 if (vp) { 953 vput(vp); 954 vn_finished_write(mp); 955 } 956 } 957 } 958 959 /* 960 * Compute the number of pages we want to try to move from the 961 * active queue to the inactive queue. 962 */ 963 page_shortage = vm_paging_target() + 964 cnt.v_inactive_target - cnt.v_inactive_count; 965 page_shortage += addl_page_shortage; 966 967 vm_page_lock_queues(); 968 /* 969 * Scan the active queue for things we can deactivate. We nominally 970 * track the per-page activity counter and use it to locate 971 * deactivation candidates. 972 */ 973 pcount = cnt.v_active_count; 974 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 975 976 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 977 978 /* 979 * This is a consistency check, and should likely be a panic 980 * or warning. 981 */ 982 if (m->queue != PQ_ACTIVE) { 983 break; 984 } 985 986 next = TAILQ_NEXT(m, pageq); 987 /* 988 * Don't deactivate pages that are busy. 989 */ 990 if ((m->busy != 0) || 991 (m->flags & PG_BUSY) || 992 (m->hold_count != 0)) { 993 vm_pageq_requeue(m); 994 m = next; 995 continue; 996 } 997 998 /* 999 * The count for pagedaemon pages is done after checking the 1000 * page for eligibility... 1001 */ 1002 cnt.v_pdpages++; 1003 1004 /* 1005 * Check to see "how much" the page has been used. 1006 */ 1007 actcount = 0; 1008 if (m->object->ref_count != 0) { 1009 if (m->flags & PG_REFERENCED) { 1010 actcount += 1; 1011 } 1012 actcount += pmap_ts_referenced(m); 1013 if (actcount) { 1014 m->act_count += ACT_ADVANCE + actcount; 1015 if (m->act_count > ACT_MAX) 1016 m->act_count = ACT_MAX; 1017 } 1018 } 1019 1020 /* 1021 * Since we have "tested" this bit, we need to clear it now. 1022 */ 1023 vm_page_flag_clear(m, PG_REFERENCED); 1024 1025 /* 1026 * Only if an object is currently being used, do we use the 1027 * page activation count stats. 1028 */ 1029 if (actcount && (m->object->ref_count != 0)) { 1030 vm_pageq_requeue(m); 1031 } else { 1032 m->act_count -= min(m->act_count, ACT_DECLINE); 1033 if (vm_pageout_algorithm || 1034 m->object->ref_count == 0 || 1035 m->act_count == 0) { 1036 page_shortage--; 1037 if (m->object->ref_count == 0) { 1038 vm_page_protect(m, VM_PROT_NONE); 1039 if (m->dirty == 0) 1040 vm_page_cache(m); 1041 else 1042 vm_page_deactivate(m); 1043 } else { 1044 vm_page_deactivate(m); 1045 } 1046 } else { 1047 vm_pageq_requeue(m); 1048 } 1049 } 1050 m = next; 1051 } 1052 s = splvm(); 1053 1054 /* 1055 * We try to maintain some *really* free pages, this allows interrupt 1056 * code to be guaranteed space. Since both cache and free queues 1057 * are considered basically 'free', moving pages from cache to free 1058 * does not effect other calculations. 1059 */ 1060 while (cnt.v_free_count < cnt.v_free_reserved) { 1061 static int cache_rover = 0; 1062 m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); 1063 if (!m) 1064 break; 1065 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || 1066 m->busy || 1067 m->hold_count || 1068 m->wire_count) { 1069#ifdef INVARIANTS 1070 printf("Warning: busy page %p found in cache\n", m); 1071#endif 1072 vm_page_deactivate(m); 1073 continue; 1074 } 1075 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1076 vm_pageout_page_free(m); 1077 cnt.v_dfree++; 1078 } 1079 splx(s); 1080 vm_page_unlock_queues(); 1081#if !defined(NO_SWAPPING) 1082 /* 1083 * Idle process swapout -- run once per second. 1084 */ 1085 if (vm_swap_idle_enabled) { 1086 static long lsec; 1087 if (time_second != lsec) { 1088 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1089 vm_req_vmdaemon(); 1090 lsec = time_second; 1091 } 1092 } 1093#endif 1094 1095 /* 1096 * If we didn't get enough free pages, and we have skipped a vnode 1097 * in a writeable object, wakeup the sync daemon. And kick swapout 1098 * if we did not get enough free pages. 1099 */ 1100 if (vm_paging_target() > 0) { 1101 if (vnodes_skipped && vm_page_count_min()) 1102 (void) speedup_syncer(); 1103#if !defined(NO_SWAPPING) 1104 if (vm_swap_enabled && vm_page_count_target()) { 1105 vm_req_vmdaemon(); 1106 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1107 } 1108#endif 1109 } 1110 1111 /* 1112 * If we are out of swap and were not able to reach our paging 1113 * target, kill the largest process. 1114 * 1115 * We keep the process bigproc locked once we find it to keep anyone 1116 * from messing with it; however, there is a possibility of 1117 * deadlock if process B is bigproc and one of it's child processes 1118 * attempts to propagate a signal to B while we are waiting for A's 1119 * lock while walking this list. To avoid this, we don't block on 1120 * the process lock but just skip a process if it is already locked. 1121 */ 1122 if ((vm_swap_size < 64 && vm_page_count_min()) || 1123 (swap_pager_full && vm_paging_target() > 0)) { 1124#if 0 1125 if ((vm_swap_size < 64 || swap_pager_full) && vm_page_count_min()) { 1126#endif 1127 bigproc = NULL; 1128 bigsize = 0; 1129 sx_slock(&allproc_lock); 1130 FOREACH_PROC_IN_SYSTEM(p) { 1131 int breakout; 1132 /* 1133 * If this process is already locked, skip it. 1134 */ 1135 if (PROC_TRYLOCK(p) == 0) 1136 continue; 1137 /* 1138 * if this is a system process, skip it 1139 */ 1140 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1141 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1142 PROC_UNLOCK(p); 1143 continue; 1144 } 1145 /* 1146 * if the process is in a non-running type state, 1147 * don't touch it. Check all the threads individually. 1148 */ 1149 mtx_lock_spin(&sched_lock); 1150 breakout = 0; 1151 FOREACH_THREAD_IN_PROC(p, td) { 1152 if (td->td_state != TDS_RUNQ && 1153 td->td_state != TDS_RUNNING && 1154 td->td_state != TDS_SLP) { 1155 breakout = 1; 1156 break; 1157 } 1158 } 1159 if (breakout) { 1160 mtx_unlock_spin(&sched_lock); 1161 PROC_UNLOCK(p); 1162 continue; 1163 } 1164 mtx_unlock_spin(&sched_lock); 1165 /* 1166 * get the process size 1167 */ 1168 size = vmspace_resident_count(p->p_vmspace) + 1169 vmspace_swap_count(p->p_vmspace); 1170 /* 1171 * if the this process is bigger than the biggest one 1172 * remember it. 1173 */ 1174 if (size > bigsize) { 1175 if (bigproc != NULL) 1176 PROC_UNLOCK(bigproc); 1177 bigproc = p; 1178 bigsize = size; 1179 } else 1180 PROC_UNLOCK(p); 1181 } 1182 sx_sunlock(&allproc_lock); 1183 if (bigproc != NULL) { 1184 struct ksegrp *kg; 1185 killproc(bigproc, "out of swap space"); 1186 mtx_lock_spin(&sched_lock); 1187 FOREACH_KSEGRP_IN_PROC(bigproc, kg) { 1188 kg->kg_estcpu = 0; 1189 kg->kg_nice = PRIO_MIN; /* XXXKSE ??? */ 1190 resetpriority(kg); 1191 } 1192 mtx_unlock_spin(&sched_lock); 1193 PROC_UNLOCK(bigproc); 1194 wakeup(&cnt.v_free_count); 1195 } 1196 } 1197} 1198 1199/* 1200 * This routine tries to maintain the pseudo LRU active queue, 1201 * so that during long periods of time where there is no paging, 1202 * that some statistic accumulation still occurs. This code 1203 * helps the situation where paging just starts to occur. 1204 */ 1205static void 1206vm_pageout_page_stats() 1207{ 1208 vm_page_t m,next; 1209 int pcount,tpcount; /* Number of pages to check */ 1210 static int fullintervalcount = 0; 1211 int page_shortage; 1212 int s0; 1213 1214 page_shortage = 1215 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1216 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1217 1218 if (page_shortage <= 0) 1219 return; 1220 1221 s0 = splvm(); 1222 vm_page_lock_queues(); 1223 pcount = cnt.v_active_count; 1224 fullintervalcount += vm_pageout_stats_interval; 1225 if (fullintervalcount < vm_pageout_full_stats_interval) { 1226 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1227 if (pcount > tpcount) 1228 pcount = tpcount; 1229 } else { 1230 fullintervalcount = 0; 1231 } 1232 1233 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); 1234 while ((m != NULL) && (pcount-- > 0)) { 1235 int actcount; 1236 1237 if (m->queue != PQ_ACTIVE) { 1238 break; 1239 } 1240 1241 next = TAILQ_NEXT(m, pageq); 1242 /* 1243 * Don't deactivate pages that are busy. 1244 */ 1245 if ((m->busy != 0) || 1246 (m->flags & PG_BUSY) || 1247 (m->hold_count != 0)) { 1248 vm_pageq_requeue(m); 1249 m = next; 1250 continue; 1251 } 1252 1253 actcount = 0; 1254 if (m->flags & PG_REFERENCED) { 1255 vm_page_flag_clear(m, PG_REFERENCED); 1256 actcount += 1; 1257 } 1258 1259 actcount += pmap_ts_referenced(m); 1260 if (actcount) { 1261 m->act_count += ACT_ADVANCE + actcount; 1262 if (m->act_count > ACT_MAX) 1263 m->act_count = ACT_MAX; 1264 vm_pageq_requeue(m); 1265 } else { 1266 if (m->act_count == 0) { 1267 /* 1268 * We turn off page access, so that we have 1269 * more accurate RSS stats. We don't do this 1270 * in the normal page deactivation when the 1271 * system is loaded VM wise, because the 1272 * cost of the large number of page protect 1273 * operations would be higher than the value 1274 * of doing the operation. 1275 */ 1276 vm_page_protect(m, VM_PROT_NONE); 1277 vm_page_deactivate(m); 1278 } else { 1279 m->act_count -= min(m->act_count, ACT_DECLINE); 1280 vm_pageq_requeue(m); 1281 } 1282 } 1283 1284 m = next; 1285 } 1286 vm_page_unlock_queues(); 1287 splx(s0); 1288} 1289 1290static int 1291vm_pageout_free_page_calc(count) 1292vm_size_t count; 1293{ 1294 if (count < cnt.v_page_count) 1295 return 0; 1296 /* 1297 * free_reserved needs to include enough for the largest swap pager 1298 * structures plus enough for any pv_entry structs when paging. 1299 */ 1300 if (cnt.v_page_count > 1024) 1301 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1302 else 1303 cnt.v_free_min = 4; 1304 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1305 cnt.v_interrupt_free_min; 1306 cnt.v_free_reserved = vm_pageout_page_count + 1307 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1308 cnt.v_free_severe = cnt.v_free_min / 2; 1309 cnt.v_free_min += cnt.v_free_reserved; 1310 cnt.v_free_severe += cnt.v_free_reserved; 1311 return 1; 1312} 1313 1314/* 1315 * vm_pageout is the high level pageout daemon. 1316 */ 1317static void 1318vm_pageout() 1319{ 1320 int pass; 1321 1322 mtx_lock(&Giant); 1323 1324 /* 1325 * Initialize some paging parameters. 1326 */ 1327 cnt.v_interrupt_free_min = 2; 1328 if (cnt.v_page_count < 2000) 1329 vm_pageout_page_count = 8; 1330 1331 vm_pageout_free_page_calc(cnt.v_page_count); 1332 /* 1333 * v_free_target and v_cache_min control pageout hysteresis. Note 1334 * that these are more a measure of the VM cache queue hysteresis 1335 * then the VM free queue. Specifically, v_free_target is the 1336 * high water mark (free+cache pages). 1337 * 1338 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the 1339 * low water mark, while v_free_min is the stop. v_cache_min must 1340 * be big enough to handle memory needs while the pageout daemon 1341 * is signalled and run to free more pages. 1342 */ 1343 if (cnt.v_free_count > 6144) 1344 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; 1345 else 1346 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1347 1348 if (cnt.v_free_count > 2048) { 1349 cnt.v_cache_min = cnt.v_free_target; 1350 cnt.v_cache_max = 2 * cnt.v_cache_min; 1351 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1352 } else { 1353 cnt.v_cache_min = 0; 1354 cnt.v_cache_max = 0; 1355 cnt.v_inactive_target = cnt.v_free_count / 4; 1356 } 1357 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1358 cnt.v_inactive_target = cnt.v_free_count / 3; 1359 1360 /* XXX does not really belong here */ 1361 if (vm_page_max_wired == 0) 1362 vm_page_max_wired = cnt.v_free_count / 3; 1363 1364 if (vm_pageout_stats_max == 0) 1365 vm_pageout_stats_max = cnt.v_free_target; 1366 1367 /* 1368 * Set interval in seconds for stats scan. 1369 */ 1370 if (vm_pageout_stats_interval == 0) 1371 vm_pageout_stats_interval = 5; 1372 if (vm_pageout_full_stats_interval == 0) 1373 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1374 1375 /* 1376 * Set maximum free per pass 1377 */ 1378 if (vm_pageout_stats_free_max == 0) 1379 vm_pageout_stats_free_max = 5; 1380 1381 swap_pager_swap_init(); 1382 pass = 0; 1383 /* 1384 * The pageout daemon is never done, so loop forever. 1385 */ 1386 while (TRUE) { 1387 int error; 1388 int s = splvm(); 1389 1390 /* 1391 * If we have enough free memory, wakeup waiters. Do 1392 * not clear vm_pages_needed until we reach our target, 1393 * otherwise we may be woken up over and over again and 1394 * waste a lot of cpu. 1395 */ 1396 if (vm_pages_needed && !vm_page_count_min()) { 1397 if (vm_paging_needed() <= 0) 1398 vm_pages_needed = 0; 1399 wakeup(&cnt.v_free_count); 1400 } 1401 if (vm_pages_needed) { 1402 /* 1403 * Still not done, take a second pass without waiting 1404 * (unlimited dirty cleaning), otherwise sleep a bit 1405 * and try again. 1406 */ 1407 ++pass; 1408 if (pass > 1) 1409 tsleep(&vm_pages_needed, PVM, 1410 "psleep", hz/2); 1411 } else { 1412 /* 1413 * Good enough, sleep & handle stats. Prime the pass 1414 * for the next run. 1415 */ 1416 if (pass > 1) 1417 pass = 1; 1418 else 1419 pass = 0; 1420 error = tsleep(&vm_pages_needed, PVM, 1421 "psleep", vm_pageout_stats_interval * hz); 1422 if (error && !vm_pages_needed) { 1423 splx(s); 1424 pass = 0; 1425 vm_pageout_page_stats(); 1426 continue; 1427 } 1428 } 1429 1430 if (vm_pages_needed) 1431 cnt.v_pdwakeups++; 1432 splx(s); 1433 vm_pageout_scan(pass); 1434 vm_pageout_deficit = 0; 1435 } 1436} 1437 1438void 1439pagedaemon_wakeup() 1440{ 1441 if (!vm_pages_needed && curthread->td_proc != pageproc) { 1442 vm_pages_needed++; 1443 wakeup(&vm_pages_needed); 1444 } 1445} 1446 1447#if !defined(NO_SWAPPING) 1448static void 1449vm_req_vmdaemon() 1450{ 1451 static int lastrun = 0; 1452 1453 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1454 wakeup(&vm_daemon_needed); 1455 lastrun = ticks; 1456 } 1457} 1458 1459static void 1460vm_daemon() 1461{ 1462 struct proc *p; 1463 int breakout; 1464 struct thread *td; 1465 1466 mtx_lock(&Giant); 1467 while (TRUE) { 1468 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1469 if (vm_pageout_req_swapout) { 1470 swapout_procs(vm_pageout_req_swapout); 1471 vm_pageout_req_swapout = 0; 1472 } 1473 /* 1474 * scan the processes for exceeding their rlimits or if 1475 * process is swapped out -- deactivate pages 1476 */ 1477 sx_slock(&allproc_lock); 1478 LIST_FOREACH(p, &allproc, p_list) { 1479 vm_pindex_t limit, size; 1480 1481 /* 1482 * if this is a system process or if we have already 1483 * looked at this process, skip it. 1484 */ 1485 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1486 continue; 1487 } 1488 /* 1489 * if the process is in a non-running type state, 1490 * don't touch it. 1491 */ 1492 mtx_lock_spin(&sched_lock); 1493 breakout = 0; 1494 FOREACH_THREAD_IN_PROC(p, td) { 1495 if (td->td_state != TDS_RUNQ && 1496 td->td_state != TDS_RUNNING && 1497 td->td_state != TDS_SLP) { 1498 breakout = 1; 1499 break; 1500 } 1501 } 1502 if (breakout) { 1503 mtx_unlock_spin(&sched_lock); 1504 continue; 1505 } 1506 /* 1507 * get a limit 1508 */ 1509 limit = OFF_TO_IDX( 1510 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1511 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1512 1513 /* 1514 * let processes that are swapped out really be 1515 * swapped out set the limit to nothing (will force a 1516 * swap-out.) 1517 */ 1518 if ((p->p_sflag & PS_INMEM) == 0) 1519 limit = 0; /* XXX */ 1520 mtx_unlock_spin(&sched_lock); 1521 1522 size = vmspace_resident_count(p->p_vmspace); 1523 if (limit >= 0 && size >= limit) { 1524 vm_pageout_map_deactivate_pages( 1525 &p->p_vmspace->vm_map, limit); 1526 } 1527 } 1528 sx_sunlock(&allproc_lock); 1529 } 1530} 1531#endif /* !defined(NO_SWAPPING) */ 1532