vm_pageout.c revision 44156
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.136 1999/02/08 00:37:36 dillon Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include "opt_vm.h" 76#include <sys/param.h> 77#include <sys/systm.h> 78#include <sys/kernel.h> 79#include <sys/proc.h> 80#include <sys/resourcevar.h> 81#include <sys/signalvar.h> 82#include <sys/vnode.h> 83#include <sys/vmmeter.h> 84#include <sys/sysctl.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/vm_prot.h> 89#include <sys/lock.h> 90#include <vm/vm_object.h> 91#include <vm/vm_page.h> 92#include <vm/vm_map.h> 93#include <vm/vm_pageout.h> 94#include <vm/vm_pager.h> 95#include <vm/swap_pager.h> 96#include <vm/vm_extern.h> 97 98/* 99 * System initialization 100 */ 101 102/* the kernel process "vm_pageout"*/ 103static void vm_pageout __P((void)); 104static int vm_pageout_clean __P((vm_page_t)); 105static int vm_pageout_scan __P((void)); 106static int vm_pageout_free_page_calc __P((vm_size_t count)); 107struct proc *pageproc; 108 109static struct kproc_desc page_kp = { 110 "pagedaemon", 111 vm_pageout, 112 &pageproc 113}; 114SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 115 116#if !defined(NO_SWAPPING) 117/* the kernel process "vm_daemon"*/ 118static void vm_daemon __P((void)); 119static struct proc *vmproc; 120 121static struct kproc_desc vm_kp = { 122 "vmdaemon", 123 vm_daemon, 124 &vmproc 125}; 126SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 127#endif 128 129 130int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 131int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 132int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 133 134extern int npendingio; 135#if !defined(NO_SWAPPING) 136static int vm_pageout_req_swapout; /* XXX */ 137static int vm_daemon_needed; 138#endif 139extern int nswiodone; 140extern int vm_swap_size; 141extern int vfs_update_wakeup; 142static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 143static int vm_pageout_full_stats_interval = 0; 144static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 145static int defer_swap_pageouts=0; 146static int disable_swap_pageouts=0; 147 148static int max_page_launder=100; 149#if defined(NO_SWAPPING) 150static int vm_swap_enabled=0; 151static int vm_swap_idle_enabled=0; 152#else 153static int vm_swap_enabled=1; 154static int vm_swap_idle_enabled=0; 155#endif 156 157SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 158 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt"); 159 160SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 161 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length"); 162 163SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 164 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan"); 165 166SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 167 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan"); 168 169SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 170 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, "Not implemented"); 171 172#if defined(NO_SWAPPING) 173SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 174 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 175SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 176 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 177#else 178SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 179 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout"); 180SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 181 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria"); 182#endif 183 184SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 185 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem"); 186 187SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 188 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); 189 190SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 191 CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass"); 192 193 194#define VM_PAGEOUT_PAGE_COUNT 16 195int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 196 197int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 198 199#if !defined(NO_SWAPPING) 200typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 201static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 202static freeer_fcn_t vm_pageout_object_deactivate_pages; 203static void vm_req_vmdaemon __P((void)); 204#endif 205static void vm_pageout_page_stats(void); 206void pmap_collect(void); 207 208/* 209 * vm_pageout_clean: 210 * 211 * Clean the page and remove it from the laundry. 212 * 213 * We set the busy bit to cause potential page faults on this page to 214 * block. Note the careful timing, however, the busy bit isn't set till 215 * late and we cannot do anything that will mess with the page. 216 */ 217 218static int 219vm_pageout_clean(m) 220 vm_page_t m; 221{ 222 register vm_object_t object; 223 vm_page_t mc[2*vm_pageout_page_count]; 224 int pageout_count; 225 int i, forward_okay, backward_okay, page_base; 226 vm_pindex_t pindex = m->pindex; 227 228 object = m->object; 229 230 /* 231 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP 232 * with the new swapper, but we could have serious problems paging 233 * out other object types if there is insufficient memory. 234 * 235 * Unfortunately, checking free memory here is far too late, so the 236 * check has been moved up a procedural level. 237 */ 238 239#if 0 240 /* 241 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 242 * Try to avoid the deadlock. 243 */ 244 if ((object->type == OBJT_DEFAULT) && 245 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 246 return 0; 247#endif 248 249 /* 250 * Don't mess with the page if it's busy. 251 */ 252 if ((m->hold_count != 0) || 253 ((m->busy != 0) || (m->flags & PG_BUSY))) 254 return 0; 255 256#if 0 257 /* 258 * XXX REMOVED XXX. vm_object_collapse() can block, which can 259 * change the page state. Calling vm_object_collapse() might also 260 * destroy or rename the page because we have not busied it yet!!! 261 * So this code segment is removed. 262 */ 263 /* 264 * Try collapsing before it's too late. XXX huh? Why are we doing 265 * this here? 266 */ 267 if (object->backing_object) { 268 vm_object_collapse(object); 269 } 270#endif 271 272 mc[vm_pageout_page_count] = m; 273 pageout_count = 1; 274 page_base = vm_pageout_page_count; 275 forward_okay = TRUE; 276 if (pindex != 0) 277 backward_okay = TRUE; 278 else 279 backward_okay = FALSE; 280 /* 281 * Scan object for clusterable pages. 282 * 283 * We can cluster ONLY if: ->> the page is NOT 284 * clean, wired, busy, held, or mapped into a 285 * buffer, and one of the following: 286 * 1) The page is inactive, or a seldom used 287 * active page. 288 * -or- 289 * 2) we force the issue. 290 */ 291 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 292 vm_page_t p; 293 294 /* 295 * See if forward page is clusterable. 296 */ 297 if (forward_okay) { 298 /* 299 * Stop forward scan at end of object. 300 */ 301 if ((pindex + i) > object->size) { 302 forward_okay = FALSE; 303 goto do_backward; 304 } 305 p = vm_page_lookup(object, pindex + i); 306 if (p) { 307 if (((p->queue - p->pc) == PQ_CACHE) || 308 (p->flags & PG_BUSY) || p->busy) { 309 forward_okay = FALSE; 310 goto do_backward; 311 } 312 vm_page_test_dirty(p); 313 if ((p->dirty & p->valid) != 0 && 314 (p->queue == PQ_INACTIVE) && 315 (p->wire_count == 0) && 316 (p->hold_count == 0)) { 317 mc[vm_pageout_page_count + i] = p; 318 pageout_count++; 319 if (pageout_count == vm_pageout_page_count) 320 break; 321 } else { 322 forward_okay = FALSE; 323 } 324 } else { 325 forward_okay = FALSE; 326 } 327 } 328do_backward: 329 /* 330 * See if backward page is clusterable. 331 */ 332 if (backward_okay) { 333 /* 334 * Stop backward scan at beginning of object. 335 */ 336 if ((pindex - i) == 0) { 337 backward_okay = FALSE; 338 } 339 p = vm_page_lookup(object, pindex - i); 340 if (p) { 341 if (((p->queue - p->pc) == PQ_CACHE) || 342 (p->flags & PG_BUSY) || p->busy) { 343 backward_okay = FALSE; 344 continue; 345 } 346 vm_page_test_dirty(p); 347 if ((p->dirty & p->valid) != 0 && 348 (p->queue == PQ_INACTIVE) && 349 (p->wire_count == 0) && 350 (p->hold_count == 0)) { 351 mc[vm_pageout_page_count - i] = p; 352 pageout_count++; 353 page_base--; 354 if (pageout_count == vm_pageout_page_count) 355 break; 356 } else { 357 backward_okay = FALSE; 358 } 359 } else { 360 backward_okay = FALSE; 361 } 362 } 363 } 364 365 /* 366 * we allow reads during pageouts... 367 */ 368 return vm_pageout_flush(&mc[page_base], pageout_count, 0); 369} 370 371/* 372 * vm_pageout_flush() - launder the given pages 373 * 374 * The given pages are laundered. Note that we setup for the start of 375 * I/O ( i.e. busy the page ), mark it read-only, and bump the object 376 * reference count all in here rather then in the parent. If we want 377 * the parent to do more sophisticated things we may have to change 378 * the ordering. 379 */ 380 381int 382vm_pageout_flush(mc, count, flags) 383 vm_page_t *mc; 384 int count; 385 int flags; 386{ 387 register vm_object_t object; 388 int pageout_status[count]; 389 int numpagedout = 0; 390 int i; 391 392 /* 393 * Initiate I/O. Bump the vm_page_t->busy counter and 394 * mark the pages read-only. 395 * 396 * We do not have to fixup the clean/dirty bits here... we can 397 * allow the pager to do it after the I/O completes. 398 */ 399 400 for (i = 0; i < count; i++) { 401 vm_page_io_start(mc[i]); 402 vm_page_protect(mc[i], VM_PROT_READ); 403 } 404 405 object = mc[0]->object; 406 vm_object_pip_add(object, count); 407 408 vm_pager_put_pages(object, mc, count, 409 (flags | ((object == kernel_object) ? OBJPC_SYNC : 0)), 410 pageout_status); 411 412 for (i = 0; i < count; i++) { 413 vm_page_t mt = mc[i]; 414 415 switch (pageout_status[i]) { 416 case VM_PAGER_OK: 417 numpagedout++; 418 break; 419 case VM_PAGER_PEND: 420 numpagedout++; 421 break; 422 case VM_PAGER_BAD: 423 /* 424 * Page outside of range of object. Right now we 425 * essentially lose the changes by pretending it 426 * worked. 427 */ 428 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 429 mt->dirty = 0; 430 break; 431 case VM_PAGER_ERROR: 432 case VM_PAGER_FAIL: 433 /* 434 * If page couldn't be paged out, then reactivate the 435 * page so it doesn't clog the inactive list. (We 436 * will try paging out it again later). 437 */ 438 vm_page_activate(mt); 439 break; 440 case VM_PAGER_AGAIN: 441 break; 442 } 443 444 /* 445 * If the operation is still going, leave the page busy to 446 * block all other accesses. Also, leave the paging in 447 * progress indicator set so that we don't attempt an object 448 * collapse. 449 */ 450 if (pageout_status[i] != VM_PAGER_PEND) { 451 vm_object_pip_wakeup(object); 452 vm_page_io_finish(mt); 453 } 454 } 455 return numpagedout; 456} 457 458#if !defined(NO_SWAPPING) 459/* 460 * vm_pageout_object_deactivate_pages 461 * 462 * deactivate enough pages to satisfy the inactive target 463 * requirements or if vm_page_proc_limit is set, then 464 * deactivate all of the pages in the object and its 465 * backing_objects. 466 * 467 * The object and map must be locked. 468 */ 469static void 470vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 471 vm_map_t map; 472 vm_object_t object; 473 vm_pindex_t desired; 474 int map_remove_only; 475{ 476 register vm_page_t p, next; 477 int rcount; 478 int remove_mode; 479 int s; 480 481 if (object->type == OBJT_DEVICE) 482 return; 483 484 while (object) { 485 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 486 return; 487 if (object->paging_in_progress) 488 return; 489 490 remove_mode = map_remove_only; 491 if (object->shadow_count > 1) 492 remove_mode = 1; 493 /* 494 * scan the objects entire memory queue 495 */ 496 rcount = object->resident_page_count; 497 p = TAILQ_FIRST(&object->memq); 498 while (p && (rcount-- > 0)) { 499 int actcount; 500 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 501 return; 502 next = TAILQ_NEXT(p, listq); 503 cnt.v_pdpages++; 504 if (p->wire_count != 0 || 505 p->hold_count != 0 || 506 p->busy != 0 || 507 (p->flags & PG_BUSY) || 508 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 509 p = next; 510 continue; 511 } 512 513 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 514 if (actcount) { 515 vm_page_flag_set(p, PG_REFERENCED); 516 } else if (p->flags & PG_REFERENCED) { 517 actcount = 1; 518 } 519 520 if ((p->queue != PQ_ACTIVE) && 521 (p->flags & PG_REFERENCED)) { 522 vm_page_activate(p); 523 p->act_count += actcount; 524 vm_page_flag_clear(p, PG_REFERENCED); 525 } else if (p->queue == PQ_ACTIVE) { 526 if ((p->flags & PG_REFERENCED) == 0) { 527 p->act_count -= min(p->act_count, ACT_DECLINE); 528 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 529 vm_page_protect(p, VM_PROT_NONE); 530 vm_page_deactivate(p); 531 } else { 532 s = splvm(); 533 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 534 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 535 splx(s); 536 } 537 } else { 538 vm_page_activate(p); 539 vm_page_flag_clear(p, PG_REFERENCED); 540 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 541 p->act_count += ACT_ADVANCE; 542 s = splvm(); 543 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 544 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 545 splx(s); 546 } 547 } else if (p->queue == PQ_INACTIVE) { 548 vm_page_protect(p, VM_PROT_NONE); 549 } 550 p = next; 551 } 552 object = object->backing_object; 553 } 554 return; 555} 556 557/* 558 * deactivate some number of pages in a map, try to do it fairly, but 559 * that is really hard to do. 560 */ 561static void 562vm_pageout_map_deactivate_pages(map, desired) 563 vm_map_t map; 564 vm_pindex_t desired; 565{ 566 vm_map_entry_t tmpe; 567 vm_object_t obj, bigobj; 568 569 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 570 return; 571 } 572 573 bigobj = NULL; 574 575 /* 576 * first, search out the biggest object, and try to free pages from 577 * that. 578 */ 579 tmpe = map->header.next; 580 while (tmpe != &map->header) { 581 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 582 obj = tmpe->object.vm_object; 583 if ((obj != NULL) && (obj->shadow_count <= 1) && 584 ((bigobj == NULL) || 585 (bigobj->resident_page_count < obj->resident_page_count))) { 586 bigobj = obj; 587 } 588 } 589 tmpe = tmpe->next; 590 } 591 592 if (bigobj) 593 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 594 595 /* 596 * Next, hunt around for other pages to deactivate. We actually 597 * do this search sort of wrong -- .text first is not the best idea. 598 */ 599 tmpe = map->header.next; 600 while (tmpe != &map->header) { 601 if (pmap_resident_count(vm_map_pmap(map)) <= desired) 602 break; 603 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { 604 obj = tmpe->object.vm_object; 605 if (obj) 606 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 607 } 608 tmpe = tmpe->next; 609 }; 610 611 /* 612 * Remove all mappings if a process is swapped out, this will free page 613 * table pages. 614 */ 615 if (desired == 0) 616 pmap_remove(vm_map_pmap(map), 617 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 618 vm_map_unlock(map); 619 return; 620} 621#endif 622 623/* 624 * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore 625 * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects 626 * which we know can be trivially freed. 627 */ 628 629void 630vm_pageout_page_free(vm_page_t m) { 631 vm_object_t object = m->object; 632 int type = object->type; 633 634 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 635 vm_object_reference(object); 636 vm_page_busy(m); 637 vm_page_protect(m, VM_PROT_NONE); 638 vm_page_free(m); 639 if (type == OBJT_SWAP || type == OBJT_DEFAULT) 640 vm_object_deallocate(object); 641} 642 643/* 644 * vm_pageout_scan does the dirty work for the pageout daemon. 645 */ 646static int 647vm_pageout_scan() 648{ 649 vm_page_t m, next; 650 int page_shortage, maxscan, pcount; 651 int addl_page_shortage, addl_page_shortage_init; 652 int maxlaunder; 653 int launder_loop = 0; 654 struct proc *p, *bigproc; 655 vm_offset_t size, bigsize; 656 vm_object_t object; 657 int force_wakeup = 0; 658 int actcount; 659 int vnodes_skipped = 0; 660 int s; 661 662 /* 663 * Do whatever cleanup that the pmap code can. 664 */ 665 pmap_collect(); 666 667 addl_page_shortage_init = vm_pageout_deficit; 668 vm_pageout_deficit = 0; 669 670 if (max_page_launder == 0) 671 max_page_launder = 1; 672 673 /* 674 * Calculate the number of pages we want to either free or move 675 * to the cache. 676 */ 677 678 page_shortage = (cnt.v_free_target + cnt.v_cache_min) - 679 (cnt.v_free_count + cnt.v_cache_count); 680 page_shortage += addl_page_shortage_init; 681 682 /* 683 * Figure out what to do with dirty pages when they are encountered. 684 * Assume that 1/3 of the pages on the inactive list are clean. If 685 * we think we can reach our target, disable laundering (do not 686 * clean any dirty pages). If we miss the target we will loop back 687 * up and do a laundering run. 688 */ 689 690 if (cnt.v_inactive_count / 3 > page_shortage) { 691 maxlaunder = 0; 692 launder_loop = 0; 693 } else { 694 maxlaunder = 695 (cnt.v_inactive_target > max_page_launder) ? 696 max_page_launder : cnt.v_inactive_target; 697 launder_loop = 1; 698 } 699 700 /* 701 * Start scanning the inactive queue for pages we can move to the 702 * cache or free. The scan will stop when the target is reached or 703 * we have scanned the entire inactive queue. 704 */ 705 706rescan0: 707 addl_page_shortage = addl_page_shortage_init; 708 maxscan = cnt.v_inactive_count; 709 for ( 710 m = TAILQ_FIRST(&vm_page_queue_inactive); 711 m != NULL && maxscan-- > 0 && page_shortage > 0; 712 m = next 713 ) { 714 715 cnt.v_pdpages++; 716 717 if (m->queue != PQ_INACTIVE) { 718 goto rescan0; 719 } 720 721 next = TAILQ_NEXT(m, pageq); 722 723 if (m->hold_count) { 724 s = splvm(); 725 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 726 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 727 splx(s); 728 addl_page_shortage++; 729 continue; 730 } 731 /* 732 * Dont mess with busy pages, keep in the front of the 733 * queue, most likely are being paged out. 734 */ 735 if (m->busy || (m->flags & PG_BUSY)) { 736 addl_page_shortage++; 737 continue; 738 } 739 740 /* 741 * If the object is not being used, we ignore previous 742 * references. 743 */ 744 if (m->object->ref_count == 0) { 745 vm_page_flag_clear(m, PG_REFERENCED); 746 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 747 748 /* 749 * Otherwise, if the page has been referenced while in the 750 * inactive queue, we bump the "activation count" upwards, 751 * making it less likely that the page will be added back to 752 * the inactive queue prematurely again. Here we check the 753 * page tables (or emulated bits, if any), given the upper 754 * level VM system not knowing anything about existing 755 * references. 756 */ 757 } else if (((m->flags & PG_REFERENCED) == 0) && 758 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 759 vm_page_activate(m); 760 m->act_count += (actcount + ACT_ADVANCE); 761 continue; 762 } 763 764 /* 765 * If the upper level VM system knows about any page 766 * references, we activate the page. We also set the 767 * "activation count" higher than normal so that we will less 768 * likely place pages back onto the inactive queue again. 769 */ 770 if ((m->flags & PG_REFERENCED) != 0) { 771 vm_page_flag_clear(m, PG_REFERENCED); 772 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 773 vm_page_activate(m); 774 m->act_count += (actcount + ACT_ADVANCE + 1); 775 continue; 776 } 777 778 /* 779 * If the upper level VM system doesn't know anything about 780 * the page being dirty, we have to check for it again. As 781 * far as the VM code knows, any partially dirty pages are 782 * fully dirty. 783 */ 784 if (m->dirty == 0) { 785 vm_page_test_dirty(m); 786 } else { 787 vm_page_dirty(m); 788 } 789 790 /* 791 * Invalid pages can be easily freed 792 */ 793 if (m->valid == 0) { 794 vm_pageout_page_free(m); 795 cnt.v_dfree++; 796 --page_shortage; 797 798 /* 799 * Clean pages can be placed onto the cache queue. 800 */ 801 } else if (m->dirty == 0) { 802 vm_page_cache(m); 803 --page_shortage; 804 805 /* 806 * Dirty pages need to be paged out. Note that we clean 807 * only a limited number of pages per pagedaemon pass. 808 */ 809 } else if (maxlaunder > 0) { 810 int written; 811 int swap_pageouts_ok; 812 struct vnode *vp = NULL; 813 814 object = m->object; 815 816 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 817 swap_pageouts_ok = 1; 818 } else { 819 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 820 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 821 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 822 823 } 824 825 /* 826 * We don't bother paging objects that are "dead". 827 * Those objects are in a "rundown" state. 828 */ 829 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 830 s = splvm(); 831 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 832 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 833 splx(s); 834 continue; 835 } 836 837 /* 838 * For now we protect against potential memory 839 * deadlocks by requiring significant memory to be 840 * free if the object is not OBJT_DEFAULT or OBJT_SWAP. 841 * We do not 'trust' any other object type to operate 842 * with low memory, not even OBJT_DEVICE. The VM 843 * allocator will special case allocations done by 844 * the pageout daemon so the check below actually 845 * does have some hysteresis in it. It isn't the best 846 * solution, though. 847 */ 848 849 if ( 850 object->type != OBJT_DEFAULT && 851 object->type != OBJT_SWAP && 852 cnt.v_free_count < cnt.v_free_reserved 853 ) { 854 s = splvm(); 855 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 856 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 857 splx(s); 858 continue; 859 } 860 861 /* 862 * Presumably we have sufficient free memory to do 863 * the more sophisticated checks and locking required 864 * for vnodes. 865 * 866 * The object is already known NOT to be dead. The 867 * vget() may still block, though, because 868 * VOP_ISLOCKED() doesn't check to see if an inode 869 * (v_data) is associated with the vnode. If it isn't, 870 * vget() will load in it from disk. Worse, vget() 871 * may actually get stuck waiting on "inode" if another 872 * process is in the process of bringing the inode in. 873 * This is bad news for us either way. 874 * 875 * So for the moment we check v_data == NULL as a 876 * workaround. This means that vnodes which do not 877 * use v_data in the way we expect probably will not 878 * wind up being paged out by the pager and it will be 879 * up to the syncer to get them. That's better then 880 * us blocking here. 881 * 882 * This whole code section is bogus - we need to fix 883 * the vnode pager to handle vm_page_t's without us 884 * having to do any sophisticated VOP tests. 885 */ 886 887 if (object->type == OBJT_VNODE) { 888 vp = object->handle; 889 890 if (VOP_ISLOCKED(vp) || 891 vp->v_data == NULL || 892 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 893 if ((m->queue == PQ_INACTIVE) && 894 (m->hold_count == 0) && 895 (m->busy == 0) && 896 (m->flags & PG_BUSY) == 0) { 897 s = splvm(); 898 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 899 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 900 splx(s); 901 } 902 if (object->flags & OBJ_MIGHTBEDIRTY) 903 vnodes_skipped++; 904 continue; 905 } 906 907 /* 908 * The page might have been moved to another queue 909 * during potential blocking in vget() above. 910 */ 911 if (m->queue != PQ_INACTIVE) { 912 if (object->flags & OBJ_MIGHTBEDIRTY) 913 vnodes_skipped++; 914 vput(vp); 915 continue; 916 } 917 918 /* 919 * The page may have been busied during the blocking in 920 * vput(); We don't move the page back onto the end of 921 * the queue so that statistics are more correct if we don't. 922 */ 923 if (m->busy || (m->flags & PG_BUSY)) { 924 vput(vp); 925 continue; 926 } 927 928 /* 929 * If the page has become held, then skip it 930 */ 931 if (m->hold_count) { 932 s = splvm(); 933 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 934 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 935 splx(s); 936 if (object->flags & OBJ_MIGHTBEDIRTY) 937 vnodes_skipped++; 938 vput(vp); 939 continue; 940 } 941 } 942 943 /* 944 * If a page is dirty, then it is either being washed 945 * (but not yet cleaned) or it is still in the 946 * laundry. If it is still in the laundry, then we 947 * start the cleaning operation. 948 */ 949 written = vm_pageout_clean(m); 950 if (vp) 951 vput(vp); 952 953 maxlaunder -= written; 954 } 955 } 956 957 /* 958 * If we still have a page shortage and we didn't launder anything, 959 * run the inactive scan again and launder something this time. 960 */ 961 962 if (launder_loop == 0 && page_shortage > 0) { 963 launder_loop = 1; 964 maxlaunder = 965 (cnt.v_inactive_target > max_page_launder) ? 966 max_page_launder : cnt.v_inactive_target; 967 goto rescan0; 968 } 969 970 /* 971 * Compute the page shortage from the point of view of having to 972 * move pages from the active queue to the inactive queue. 973 */ 974 975 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 976 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 977 page_shortage += addl_page_shortage; 978 979 /* 980 * Scan the active queue for things we can deactivate 981 */ 982 983 pcount = cnt.v_active_count; 984 m = TAILQ_FIRST(&vm_page_queue_active); 985 986 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 987 988 /* 989 * This is a consistancy check, and should likely be a panic 990 * or warning. 991 */ 992 if (m->queue != PQ_ACTIVE) { 993 break; 994 } 995 996 next = TAILQ_NEXT(m, pageq); 997 /* 998 * Don't deactivate pages that are busy. 999 */ 1000 if ((m->busy != 0) || 1001 (m->flags & PG_BUSY) || 1002 (m->hold_count != 0)) { 1003 s = splvm(); 1004 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1005 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1006 splx(s); 1007 m = next; 1008 continue; 1009 } 1010 1011 /* 1012 * The count for pagedaemon pages is done after checking the 1013 * page for eligbility... 1014 */ 1015 cnt.v_pdpages++; 1016 1017 /* 1018 * Check to see "how much" the page has been used. 1019 */ 1020 actcount = 0; 1021 if (m->object->ref_count != 0) { 1022 if (m->flags & PG_REFERENCED) { 1023 actcount += 1; 1024 } 1025 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1026 if (actcount) { 1027 m->act_count += ACT_ADVANCE + actcount; 1028 if (m->act_count > ACT_MAX) 1029 m->act_count = ACT_MAX; 1030 } 1031 } 1032 1033 /* 1034 * Since we have "tested" this bit, we need to clear it now. 1035 */ 1036 vm_page_flag_clear(m, PG_REFERENCED); 1037 1038 /* 1039 * Only if an object is currently being used, do we use the 1040 * page activation count stats. 1041 */ 1042 if (actcount && (m->object->ref_count != 0)) { 1043 s = splvm(); 1044 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1045 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1046 splx(s); 1047 } else { 1048 m->act_count -= min(m->act_count, ACT_DECLINE); 1049 if (vm_pageout_algorithm_lru || 1050 (m->object->ref_count == 0) || (m->act_count == 0)) { 1051 page_shortage--; 1052 if (m->object->ref_count == 0) { 1053 vm_page_protect(m, VM_PROT_NONE); 1054 if (m->dirty == 0) 1055 vm_page_cache(m); 1056 else 1057 vm_page_deactivate(m); 1058 } else { 1059 vm_page_deactivate(m); 1060 } 1061 } else { 1062 s = splvm(); 1063 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1064 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1065 splx(s); 1066 } 1067 } 1068 m = next; 1069 } 1070 1071 s = splvm(); 1072 1073 /* 1074 * We try to maintain some *really* free pages, this allows interrupt 1075 * code to be guaranteed space. Since both cache and free queues 1076 * are considered basically 'free', moving pages from cache to free 1077 * does not effect other calculations. 1078 */ 1079 1080 while (cnt.v_free_count < cnt.v_free_reserved) { 1081 static int cache_rover = 0; 1082 m = vm_page_list_find(PQ_CACHE, cache_rover, FALSE); 1083 if (!m) 1084 break; 1085 if ((m->flags & PG_BUSY) || m->busy || m->hold_count || m->wire_count) { 1086#ifdef INVARIANTS 1087 printf("Warning: busy page %p found in cache\n", m); 1088#endif 1089 vm_page_deactivate(m); 1090 continue; 1091 } 1092 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 1093 vm_pageout_page_free(m); 1094 cnt.v_dfree++; 1095 } 1096 splx(s); 1097 1098#if !defined(NO_SWAPPING) 1099 /* 1100 * Idle process swapout -- run once per second. 1101 */ 1102 if (vm_swap_idle_enabled) { 1103 static long lsec; 1104 if (time_second != lsec) { 1105 vm_pageout_req_swapout |= VM_SWAP_IDLE; 1106 vm_req_vmdaemon(); 1107 lsec = time_second; 1108 } 1109 } 1110#endif 1111 1112 /* 1113 * If we didn't get enough free pages, and we have skipped a vnode 1114 * in a writeable object, wakeup the sync daemon. And kick swapout 1115 * if we did not get enough free pages. 1116 */ 1117 if ((cnt.v_cache_count + cnt.v_free_count) < 1118 (cnt.v_free_target + cnt.v_cache_min) ) { 1119 if (vnodes_skipped && 1120 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 1121 if (!vfs_update_wakeup) { 1122 vfs_update_wakeup = 1; 1123 wakeup(&vfs_update_wakeup); 1124 } 1125 } 1126#if !defined(NO_SWAPPING) 1127 if (vm_swap_enabled && 1128 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 1129 vm_req_vmdaemon(); 1130 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1131 } 1132#endif 1133 } 1134 1135 /* 1136 * make sure that we have swap space -- if we are low on memory and 1137 * swap -- then kill the biggest process. 1138 */ 1139 if ((vm_swap_size == 0 || swap_pager_full) && 1140 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 1141 bigproc = NULL; 1142 bigsize = 0; 1143 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1144 /* 1145 * if this is a system process, skip it 1146 */ 1147 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1148 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1149 continue; 1150 } 1151 /* 1152 * if the process is in a non-running type state, 1153 * don't touch it. 1154 */ 1155 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1156 continue; 1157 } 1158 /* 1159 * get the process size 1160 */ 1161 size = vmspace_resident_count(p->p_vmspace); 1162 /* 1163 * if the this process is bigger than the biggest one 1164 * remember it. 1165 */ 1166 if (size > bigsize) { 1167 bigproc = p; 1168 bigsize = size; 1169 } 1170 } 1171 if (bigproc != NULL) { 1172 killproc(bigproc, "out of swap space"); 1173 bigproc->p_estcpu = 0; 1174 bigproc->p_nice = PRIO_MIN; 1175 resetpriority(bigproc); 1176 wakeup(&cnt.v_free_count); 1177 } 1178 } 1179 return force_wakeup; 1180} 1181 1182/* 1183 * This routine tries to maintain the pseudo LRU active queue, 1184 * so that during long periods of time where there is no paging, 1185 * that some statistic accumlation still occurs. This code 1186 * helps the situation where paging just starts to occur. 1187 */ 1188static void 1189vm_pageout_page_stats() 1190{ 1191 int s; 1192 vm_page_t m,next; 1193 int pcount,tpcount; /* Number of pages to check */ 1194 static int fullintervalcount = 0; 1195 int page_shortage; 1196 1197 page_shortage = (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - 1198 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 1199 if (page_shortage <= 0) 1200 return; 1201 1202 pcount = cnt.v_active_count; 1203 fullintervalcount += vm_pageout_stats_interval; 1204 if (fullintervalcount < vm_pageout_full_stats_interval) { 1205 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1206 if (pcount > tpcount) 1207 pcount = tpcount; 1208 } 1209 1210 m = TAILQ_FIRST(&vm_page_queue_active); 1211 while ((m != NULL) && (pcount-- > 0)) { 1212 int actcount; 1213 1214 if (m->queue != PQ_ACTIVE) { 1215 break; 1216 } 1217 1218 next = TAILQ_NEXT(m, pageq); 1219 /* 1220 * Don't deactivate pages that are busy. 1221 */ 1222 if ((m->busy != 0) || 1223 (m->flags & PG_BUSY) || 1224 (m->hold_count != 0)) { 1225 s = splvm(); 1226 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1227 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1228 splx(s); 1229 m = next; 1230 continue; 1231 } 1232 1233 actcount = 0; 1234 if (m->flags & PG_REFERENCED) { 1235 vm_page_flag_clear(m, PG_REFERENCED); 1236 actcount += 1; 1237 } 1238 1239 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1240 if (actcount) { 1241 m->act_count += ACT_ADVANCE + actcount; 1242 if (m->act_count > ACT_MAX) 1243 m->act_count = ACT_MAX; 1244 s = splvm(); 1245 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1246 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1247 splx(s); 1248 } else { 1249 if (m->act_count == 0) { 1250 /* 1251 * We turn off page access, so that we have more accurate 1252 * RSS stats. We don't do this in the normal page deactivation 1253 * when the system is loaded VM wise, because the cost of 1254 * the large number of page protect operations would be higher 1255 * than the value of doing the operation. 1256 */ 1257 vm_page_protect(m, VM_PROT_NONE); 1258 vm_page_deactivate(m); 1259 } else { 1260 m->act_count -= min(m->act_count, ACT_DECLINE); 1261 s = splvm(); 1262 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1263 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1264 splx(s); 1265 } 1266 } 1267 1268 m = next; 1269 } 1270} 1271 1272static int 1273vm_pageout_free_page_calc(count) 1274vm_size_t count; 1275{ 1276 if (count < cnt.v_page_count) 1277 return 0; 1278 /* 1279 * free_reserved needs to include enough for the largest swap pager 1280 * structures plus enough for any pv_entry structs when paging. 1281 */ 1282 if (cnt.v_page_count > 1024) 1283 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1284 else 1285 cnt.v_free_min = 4; 1286 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1287 cnt.v_interrupt_free_min; 1288 cnt.v_free_reserved = vm_pageout_page_count + 1289 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1290 cnt.v_free_min += cnt.v_free_reserved; 1291 return 1; 1292} 1293 1294 1295/* 1296 * vm_pageout is the high level pageout daemon. 1297 */ 1298static void 1299vm_pageout() 1300{ 1301 /* 1302 * Initialize some paging parameters. 1303 */ 1304 1305 cnt.v_interrupt_free_min = 2; 1306 if (cnt.v_page_count < 2000) 1307 vm_pageout_page_count = 8; 1308 1309 vm_pageout_free_page_calc(cnt.v_page_count); 1310 /* 1311 * free_reserved needs to include enough for the largest swap pager 1312 * structures plus enough for any pv_entry structs when paging. 1313 */ 1314 if (cnt.v_free_count > 6144) 1315 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1316 else 1317 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1318 1319 if (cnt.v_free_count > 2048) { 1320 cnt.v_cache_min = cnt.v_free_target; 1321 cnt.v_cache_max = 2 * cnt.v_cache_min; 1322 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1323 } else { 1324 cnt.v_cache_min = 0; 1325 cnt.v_cache_max = 0; 1326 cnt.v_inactive_target = cnt.v_free_count / 4; 1327 } 1328 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1329 cnt.v_inactive_target = cnt.v_free_count / 3; 1330 1331 /* XXX does not really belong here */ 1332 if (vm_page_max_wired == 0) 1333 vm_page_max_wired = cnt.v_free_count / 3; 1334 1335 if (vm_pageout_stats_max == 0) 1336 vm_pageout_stats_max = cnt.v_free_target; 1337 1338 /* 1339 * Set interval in seconds for stats scan. 1340 */ 1341 if (vm_pageout_stats_interval == 0) 1342 vm_pageout_stats_interval = 5; 1343 if (vm_pageout_full_stats_interval == 0) 1344 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1345 1346 1347 /* 1348 * Set maximum free per pass 1349 */ 1350 if (vm_pageout_stats_free_max == 0) 1351 vm_pageout_stats_free_max = 5; 1352 1353 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1354 1355 swap_pager_swap_init(); 1356 /* 1357 * The pageout daemon is never done, so loop forever. 1358 */ 1359 while (TRUE) { 1360 int error; 1361 int s = splvm(); 1362 if (!vm_pages_needed || 1363 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1364 vm_pages_needed = 0; 1365 error = tsleep(&vm_pages_needed, 1366 PVM, "psleep", vm_pageout_stats_interval * hz); 1367 if (error && !vm_pages_needed) { 1368 splx(s); 1369 vm_pageout_page_stats(); 1370 continue; 1371 } 1372 } else if (vm_pages_needed) { 1373 vm_pages_needed = 0; 1374 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1375 } 1376 1377 if (vm_pages_needed) 1378 cnt.v_pdwakeups++; 1379 vm_pages_needed = 0; 1380 splx(s); 1381 vm_pageout_scan(); 1382 vm_pageout_deficit = 0; 1383 wakeup(&cnt.v_free_count); 1384 } 1385} 1386 1387void 1388pagedaemon_wakeup() 1389{ 1390 if (!vm_pages_needed && curproc != pageproc) { 1391 vm_pages_needed++; 1392 wakeup(&vm_pages_needed); 1393 } 1394} 1395 1396#if !defined(NO_SWAPPING) 1397static void 1398vm_req_vmdaemon() 1399{ 1400 static int lastrun = 0; 1401 1402 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1403 wakeup(&vm_daemon_needed); 1404 lastrun = ticks; 1405 } 1406} 1407 1408static void 1409vm_daemon() 1410{ 1411 struct proc *p; 1412 1413 while (TRUE) { 1414 tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); 1415 if (vm_pageout_req_swapout) { 1416 swapout_procs(vm_pageout_req_swapout); 1417 vm_pageout_req_swapout = 0; 1418 } 1419 /* 1420 * scan the processes for exceeding their rlimits or if 1421 * process is swapped out -- deactivate pages 1422 */ 1423 1424 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1425 vm_pindex_t limit, size; 1426 1427 /* 1428 * if this is a system process or if we have already 1429 * looked at this process, skip it. 1430 */ 1431 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1432 continue; 1433 } 1434 /* 1435 * if the process is in a non-running type state, 1436 * don't touch it. 1437 */ 1438 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1439 continue; 1440 } 1441 /* 1442 * get a limit 1443 */ 1444 limit = OFF_TO_IDX( 1445 qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1446 p->p_rlimit[RLIMIT_RSS].rlim_max)); 1447 1448 /* 1449 * let processes that are swapped out really be 1450 * swapped out set the limit to nothing (will force a 1451 * swap-out.) 1452 */ 1453 if ((p->p_flag & P_INMEM) == 0) 1454 limit = 0; /* XXX */ 1455 1456 size = vmspace_resident_count(p->p_vmspace); 1457 if (limit >= 0 && size >= limit) { 1458 vm_pageout_map_deactivate_pages( 1459 &p->p_vmspace->vm_map, limit); 1460 } 1461 } 1462 } 1463} 1464#endif 1465