vm_pageout.c revision 33936
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91 41 * 42 * 43 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 44 * All rights reserved. 45 * 46 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 * 68 * $Id: vm_pageout.c,v 1.116 1998/02/24 10:16:23 dyson Exp $ 69 */ 70 71/* 72 * The proverbial page-out daemon. 73 */ 74 75#include <sys/param.h> 76#include <sys/systm.h> 77#include <sys/kernel.h> 78#include <sys/proc.h> 79#include <sys/resourcevar.h> 80#include <sys/signalvar.h> 81#include <sys/vnode.h> 82#include <sys/vmmeter.h> 83#include <sys/sysctl.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_prot.h> 88#include <sys/lock.h> 89#include <vm/vm_object.h> 90#include <vm/vm_page.h> 91#include <vm/vm_map.h> 92#include <vm/vm_pageout.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95#include <vm/vm_extern.h> 96 97/* 98 * System initialization 99 */ 100 101/* the kernel process "vm_pageout"*/ 102static void vm_pageout __P((void)); 103static int vm_pageout_clean __P((vm_page_t, int)); 104static int vm_pageout_scan __P((void)); 105static int vm_pageout_free_page_calc __P((vm_size_t count)); 106struct proc *pageproc; 107 108static struct kproc_desc page_kp = { 109 "pagedaemon", 110 vm_pageout, 111 &pageproc 112}; 113SYSINIT_KT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start, &page_kp) 114 115#if !defined(NO_SWAPPING) 116/* the kernel process "vm_daemon"*/ 117static void vm_daemon __P((void)); 118static struct proc *vmproc; 119 120static struct kproc_desc vm_kp = { 121 "vmdaemon", 122 vm_daemon, 123 &vmproc 124}; 125SYSINIT_KT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp) 126#endif 127 128 129int vm_pages_needed=0; /* Event on which pageout daemon sleeps */ 130int vm_pageout_deficit=0; /* Estimated number of pages deficit */ 131int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */ 132 133extern int npendingio; 134#if !defined(NO_SWAPPING) 135static int vm_pageout_req_swapout; /* XXX */ 136static int vm_daemon_needed; 137#endif 138extern int nswiodone; 139extern int vm_swap_size; 140extern int vfs_update_wakeup; 141static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0; 142static int vm_pageout_full_stats_interval = 0; 143static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0; 144static int defer_swap_pageouts=0; 145static int disable_swap_pageouts=0; 146 147static int max_page_launder=100; 148#if defined(NO_SWAPPING) 149static int vm_swap_enabled=0; 150static int vm_swap_idle_enabled=0; 151#else 152static int vm_swap_enabled=1; 153static int vm_swap_idle_enabled=0; 154#endif 155 156SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm, 157 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, ""); 158 159SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max, 160 CTLFLAG_RW, &vm_pageout_stats_max, 0, ""); 161 162SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval, 163 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, ""); 164 165SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval, 166 CTLFLAG_RW, &vm_pageout_stats_interval, 0, ""); 167 168SYSCTL_INT(_vm, OID_AUTO, pageout_stats_free_max, 169 CTLFLAG_RW, &vm_pageout_stats_free_max, 0, ""); 170 171#if defined(NO_SWAPPING) 172SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 173 CTLFLAG_RD, &vm_swap_enabled, 0, ""); 174SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 175 CTLFLAG_RD, &vm_swap_idle_enabled, 0, ""); 176#else 177SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, 178 CTLFLAG_RW, &vm_swap_enabled, 0, ""); 179SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, 180 CTLFLAG_RW, &vm_swap_idle_enabled, 0, ""); 181#endif 182 183SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, 184 CTLFLAG_RW, &defer_swap_pageouts, 0, ""); 185 186SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, 187 CTLFLAG_RW, &disable_swap_pageouts, 0, ""); 188 189SYSCTL_INT(_vm, OID_AUTO, max_page_launder, 190 CTLFLAG_RW, &max_page_launder, 0, ""); 191 192 193#define VM_PAGEOUT_PAGE_COUNT 16 194int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; 195 196int vm_page_max_wired; /* XXX max # of wired pages system-wide */ 197 198#if !defined(NO_SWAPPING) 199typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int)); 200static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t)); 201static freeer_fcn_t vm_pageout_object_deactivate_pages; 202static void vm_req_vmdaemon __P((void)); 203#endif 204static void vm_pageout_page_stats(void); 205void pmap_collect(void); 206 207/* 208 * vm_pageout_clean: 209 * 210 * Clean the page and remove it from the laundry. 211 * 212 * We set the busy bit to cause potential page faults on this page to 213 * block. 214 * 215 * And we set pageout-in-progress to keep the object from disappearing 216 * during pageout. This guarantees that the page won't move from the 217 * inactive queue. (However, any other page on the inactive queue may 218 * move!) 219 */ 220static int 221vm_pageout_clean(m, sync) 222 vm_page_t m; 223 int sync; 224{ 225 register vm_object_t object; 226 vm_page_t mc[2*vm_pageout_page_count]; 227 int pageout_count; 228 int i, forward_okay, backward_okay, page_base; 229 vm_pindex_t pindex = m->pindex; 230 231 object = m->object; 232 233 /* 234 * If not OBJT_SWAP, additional memory may be needed to do the pageout. 235 * Try to avoid the deadlock. 236 */ 237 if ((sync != VM_PAGEOUT_FORCE) && 238 (object->type == OBJT_DEFAULT) && 239 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min)) 240 return 0; 241 242 /* 243 * Don't mess with the page if it's busy. 244 */ 245 if ((!sync && m->hold_count != 0) || 246 ((m->busy != 0) || (m->flags & PG_BUSY))) 247 return 0; 248 249 /* 250 * Try collapsing before it's too late. 251 */ 252 if (!sync && object->backing_object) { 253 vm_object_collapse(object); 254 } 255 256 mc[vm_pageout_page_count] = m; 257 pageout_count = 1; 258 page_base = vm_pageout_page_count; 259 forward_okay = TRUE; 260 if (pindex != 0) 261 backward_okay = TRUE; 262 else 263 backward_okay = FALSE; 264 /* 265 * Scan object for clusterable pages. 266 * 267 * We can cluster ONLY if: ->> the page is NOT 268 * clean, wired, busy, held, or mapped into a 269 * buffer, and one of the following: 270 * 1) The page is inactive, or a seldom used 271 * active page. 272 * -or- 273 * 2) we force the issue. 274 */ 275 for (i = 1; (i < vm_pageout_page_count) && (forward_okay || backward_okay); i++) { 276 vm_page_t p; 277 278 /* 279 * See if forward page is clusterable. 280 */ 281 if (forward_okay) { 282 /* 283 * Stop forward scan at end of object. 284 */ 285 if ((pindex + i) > object->size) { 286 forward_okay = FALSE; 287 goto do_backward; 288 } 289 p = vm_page_lookup(object, pindex + i); 290 if (p) { 291 if (((p->queue - p->pc) == PQ_CACHE) || 292 (p->flags & PG_BUSY) || p->busy) { 293 forward_okay = FALSE; 294 goto do_backward; 295 } 296 vm_page_test_dirty(p); 297 if ((p->dirty & p->valid) != 0 && 298 ((p->queue == PQ_INACTIVE) || 299 (sync == VM_PAGEOUT_FORCE)) && 300 (p->wire_count == 0) && 301 (p->hold_count == 0)) { 302 mc[vm_pageout_page_count + i] = p; 303 pageout_count++; 304 if (pageout_count == vm_pageout_page_count) 305 break; 306 } else { 307 forward_okay = FALSE; 308 } 309 } else { 310 forward_okay = FALSE; 311 } 312 } 313do_backward: 314 /* 315 * See if backward page is clusterable. 316 */ 317 if (backward_okay) { 318 /* 319 * Stop backward scan at beginning of object. 320 */ 321 if ((pindex - i) == 0) { 322 backward_okay = FALSE; 323 } 324 p = vm_page_lookup(object, pindex - i); 325 if (p) { 326 if (((p->queue - p->pc) == PQ_CACHE) || 327 (p->flags & PG_BUSY) || p->busy) { 328 backward_okay = FALSE; 329 continue; 330 } 331 vm_page_test_dirty(p); 332 if ((p->dirty & p->valid) != 0 && 333 ((p->queue == PQ_INACTIVE) || 334 (sync == VM_PAGEOUT_FORCE)) && 335 (p->wire_count == 0) && 336 (p->hold_count == 0)) { 337 mc[vm_pageout_page_count - i] = p; 338 pageout_count++; 339 page_base--; 340 if (pageout_count == vm_pageout_page_count) 341 break; 342 } else { 343 backward_okay = FALSE; 344 } 345 } else { 346 backward_okay = FALSE; 347 } 348 } 349 } 350 351 /* 352 * we allow reads during pageouts... 353 */ 354 for (i = page_base; i < (page_base + pageout_count); i++) { 355 mc[i]->busy++; 356 vm_page_protect(mc[i], VM_PROT_READ); 357 } 358 359 return vm_pageout_flush(&mc[page_base], pageout_count, sync); 360} 361 362int 363vm_pageout_flush(mc, count, sync) 364 vm_page_t *mc; 365 int count; 366 int sync; 367{ 368 register vm_object_t object; 369 int pageout_status[count]; 370 int numpagedout = 0; 371 int i; 372 373 object = mc[0]->object; 374 object->paging_in_progress += count; 375 376 vm_pager_put_pages(object, mc, count, 377 ((sync || (object == kernel_object)) ? TRUE : FALSE), 378 pageout_status); 379 380 for (i = 0; i < count; i++) { 381 vm_page_t mt = mc[i]; 382 383 switch (pageout_status[i]) { 384 case VM_PAGER_OK: 385 numpagedout++; 386 break; 387 case VM_PAGER_PEND: 388 numpagedout++; 389 break; 390 case VM_PAGER_BAD: 391 /* 392 * Page outside of range of object. Right now we 393 * essentially lose the changes by pretending it 394 * worked. 395 */ 396 pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); 397 mt->dirty = 0; 398 break; 399 case VM_PAGER_ERROR: 400 case VM_PAGER_FAIL: 401 /* 402 * If page couldn't be paged out, then reactivate the 403 * page so it doesn't clog the inactive list. (We 404 * will try paging out it again later). 405 */ 406 vm_page_activate(mt); 407 break; 408 case VM_PAGER_AGAIN: 409 break; 410 } 411 412 /* 413 * If the operation is still going, leave the page busy to 414 * block all other accesses. Also, leave the paging in 415 * progress indicator set so that we don't attempt an object 416 * collapse. 417 */ 418 if (pageout_status[i] != VM_PAGER_PEND) { 419 vm_object_pip_wakeup(object); 420 mt->flags |= PG_BUSY; 421 mt->busy--; 422 PAGE_WAKEUP(mt); 423 } 424 } 425 return numpagedout; 426} 427 428#if !defined(NO_SWAPPING) 429/* 430 * vm_pageout_object_deactivate_pages 431 * 432 * deactivate enough pages to satisfy the inactive target 433 * requirements or if vm_page_proc_limit is set, then 434 * deactivate all of the pages in the object and its 435 * backing_objects. 436 * 437 * The object and map must be locked. 438 */ 439static void 440vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only) 441 vm_map_t map; 442 vm_object_t object; 443 vm_pindex_t desired; 444 int map_remove_only; 445{ 446 register vm_page_t p, next; 447 int rcount; 448 int remove_mode; 449 int s; 450 451 if (object->type == OBJT_DEVICE) 452 return; 453 454 while (object) { 455 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 456 return; 457 if (object->paging_in_progress) 458 return; 459 460 remove_mode = map_remove_only; 461 if (object->shadow_count > 1) 462 remove_mode = 1; 463 /* 464 * scan the objects entire memory queue 465 */ 466 rcount = object->resident_page_count; 467 p = TAILQ_FIRST(&object->memq); 468 while (p && (rcount-- > 0)) { 469 int actcount; 470 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 471 return; 472 next = TAILQ_NEXT(p, listq); 473 cnt.v_pdpages++; 474 if (p->wire_count != 0 || 475 p->hold_count != 0 || 476 p->busy != 0 || 477 (p->flags & PG_BUSY) || 478 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { 479 p = next; 480 continue; 481 } 482 483 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); 484 if (actcount) { 485 p->flags |= PG_REFERENCED; 486 } else if (p->flags & PG_REFERENCED) { 487 actcount = 1; 488 } 489 490 if ((p->queue != PQ_ACTIVE) && 491 (p->flags & PG_REFERENCED)) { 492 vm_page_activate(p); 493 p->act_count += actcount; 494 p->flags &= ~PG_REFERENCED; 495 } else if (p->queue == PQ_ACTIVE) { 496 if ((p->flags & PG_REFERENCED) == 0) { 497 p->act_count -= min(p->act_count, ACT_DECLINE); 498 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) { 499 vm_page_protect(p, VM_PROT_NONE); 500 vm_page_deactivate(p); 501 } else { 502 s = splvm(); 503 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 504 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 505 splx(s); 506 } 507 } else { 508 vm_page_activate(p); 509 p->flags &= ~PG_REFERENCED; 510 if (p->act_count < (ACT_MAX - ACT_ADVANCE)) 511 p->act_count += ACT_ADVANCE; 512 s = splvm(); 513 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 514 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq); 515 splx(s); 516 } 517 } else if (p->queue == PQ_INACTIVE) { 518 vm_page_protect(p, VM_PROT_NONE); 519 } 520 p = next; 521 } 522 object = object->backing_object; 523 } 524 return; 525} 526 527/* 528 * deactivate some number of pages in a map, try to do it fairly, but 529 * that is really hard to do. 530 */ 531static void 532vm_pageout_map_deactivate_pages(map, desired) 533 vm_map_t map; 534 vm_pindex_t desired; 535{ 536 vm_map_entry_t tmpe; 537 vm_object_t obj, bigobj; 538 539 if (lockmgr(&map->lock, LK_EXCLUSIVE | LK_NOWAIT, (void *)0, curproc)) { 540 return; 541 } 542 543 bigobj = NULL; 544 545 /* 546 * first, search out the biggest object, and try to free pages from 547 * that. 548 */ 549 tmpe = map->header.next; 550 while (tmpe != &map->header) { 551 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 552 obj = tmpe->object.vm_object; 553 if ((obj != NULL) && (obj->shadow_count <= 1) && 554 ((bigobj == NULL) || 555 (bigobj->resident_page_count < obj->resident_page_count))) { 556 bigobj = obj; 557 } 558 } 559 tmpe = tmpe->next; 560 } 561 562 if (bigobj) 563 vm_pageout_object_deactivate_pages(map, bigobj, desired, 0); 564 565 /* 566 * Next, hunt around for other pages to deactivate. We actually 567 * do this search sort of wrong -- .text first is not the best idea. 568 */ 569 tmpe = map->header.next; 570 while (tmpe != &map->header) { 571 if (vm_map_pmap(map)->pm_stats.resident_count <= desired) 572 break; 573 if ((tmpe->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) { 574 obj = tmpe->object.vm_object; 575 if (obj) 576 vm_pageout_object_deactivate_pages(map, obj, desired, 0); 577 } 578 tmpe = tmpe->next; 579 }; 580 581 /* 582 * Remove all mappings if a process is swapped out, this will free page 583 * table pages. 584 */ 585 if (desired == 0) 586 pmap_remove(vm_map_pmap(map), 587 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 588 vm_map_unlock(map); 589 return; 590} 591#endif 592 593void 594vm_pageout_page_free(vm_page_t m) { 595 struct vnode *vp; 596 vm_object_t object; 597 598 object = m->object; 599 object->ref_count++; 600 601 if (object->type == OBJT_VNODE) { 602 vp = object->handle; 603 vp->v_usecount++; 604 if (VSHOULDBUSY(vp)) 605 vbusy(vp); 606 } 607 608 m->flags |= PG_BUSY; 609 vm_page_protect(m, VM_PROT_NONE); 610 vm_page_free(m); 611 vm_object_deallocate(object); 612} 613 614/* 615 * vm_pageout_scan does the dirty work for the pageout daemon. 616 */ 617static int 618vm_pageout_scan() 619{ 620 vm_page_t m, next; 621 int page_shortage, addl_page_shortage, maxscan, pcount; 622 int maxlaunder; 623 int pages_freed; 624 struct proc *p, *bigproc; 625 vm_offset_t size, bigsize; 626 vm_object_t object; 627 int force_wakeup = 0; 628 int actcount; 629 int vnodes_skipped = 0; 630 int s; 631 632 /* 633 * Do whatever cleanup that the pmap code can. 634 */ 635 pmap_collect(); 636 637 /* 638 * Start scanning the inactive queue for pages we can free. We keep 639 * scanning until we have enough free pages or we have scanned through 640 * the entire queue. If we encounter dirty pages, we start cleaning 641 * them. 642 */ 643 644 pages_freed = 0; 645 addl_page_shortage = vm_pageout_deficit; 646 vm_pageout_deficit = 0; 647 648 if (max_page_launder == 0) 649 max_page_launder = 1; 650 maxlaunder = (cnt.v_inactive_target > max_page_launder) ? 651 max_page_launder : cnt.v_inactive_target; 652 653rescan0: 654 maxscan = cnt.v_inactive_count; 655 for( m = TAILQ_FIRST(&vm_page_queue_inactive); 656 657 (m != NULL) && (maxscan-- > 0) && 658 ((cnt.v_cache_count + cnt.v_free_count) < 659 (cnt.v_cache_min + cnt.v_free_target)); 660 661 m = next) { 662 663 cnt.v_pdpages++; 664 665 if (m->queue != PQ_INACTIVE) { 666 goto rescan0; 667 } 668 669 next = TAILQ_NEXT(m, pageq); 670 671 if (m->hold_count) { 672 s = splvm(); 673 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 674 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 675 splx(s); 676 addl_page_shortage++; 677 continue; 678 } 679 /* 680 * Dont mess with busy pages, keep in the front of the 681 * queue, most likely are being paged out. 682 */ 683 if (m->busy || (m->flags & PG_BUSY)) { 684 addl_page_shortage++; 685 continue; 686 } 687 688 /* 689 * If the object is not being used, we ignore previous references. 690 */ 691 if (m->object->ref_count == 0) { 692 m->flags &= ~PG_REFERENCED; 693 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 694 695 /* 696 * Otherwise, if the page has been referenced while in the inactive 697 * queue, we bump the "activation count" upwards, making it less 698 * likely that the page will be added back to the inactive queue 699 * prematurely again. Here we check the page tables (or emulated 700 * bits, if any), given the upper level VM system not knowing anything 701 * about existing references. 702 */ 703 } else if (((m->flags & PG_REFERENCED) == 0) && 704 (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { 705 vm_page_activate(m); 706 m->act_count += (actcount + ACT_ADVANCE); 707 continue; 708 } 709 710 /* 711 * If the upper level VM system knows about any page references, 712 * we activate the page. We also set the "activation count" higher 713 * than normal so that we will less likely place pages back onto the 714 * inactive queue again. 715 */ 716 if ((m->flags & PG_REFERENCED) != 0) { 717 m->flags &= ~PG_REFERENCED; 718 actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 719 vm_page_activate(m); 720 m->act_count += (actcount + ACT_ADVANCE + 1); 721 continue; 722 } 723 724 /* 725 * If the upper level VM system doesn't know anything about the 726 * page being dirty, we have to check for it again. As far as the 727 * VM code knows, any partially dirty pages are fully dirty. 728 */ 729 if (m->dirty == 0) { 730 vm_page_test_dirty(m); 731 } else if (m->dirty != 0) { 732 m->dirty = VM_PAGE_BITS_ALL; 733 } 734 735 /* 736 * Invalid pages can be easily freed 737 */ 738 if (m->valid == 0) { 739 vm_pageout_page_free(m); 740 cnt.v_dfree++; 741 pages_freed++; 742 743 /* 744 * Clean pages can be placed onto the cache queue. 745 */ 746 } else if (m->dirty == 0) { 747 vm_page_cache(m); 748 pages_freed++; 749 750 /* 751 * Dirty pages need to be paged out. Note that we clean 752 * only a limited number of pages per pagedaemon pass. 753 */ 754 } else if (maxlaunder > 0) { 755 int written; 756 int swap_pageouts_ok; 757 struct vnode *vp = NULL; 758 759 object = m->object; 760 761 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) { 762 swap_pageouts_ok = 1; 763 } else { 764 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts); 765 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts && 766 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min); 767 768 } 769 770 /* 771 * We don't bother paging objects that are "dead". Those 772 * objects are in a "rundown" state. 773 */ 774 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { 775 s = splvm(); 776 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 777 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 778 splx(s); 779 continue; 780 } 781 782 if ((object->type == OBJT_VNODE) && 783 (object->flags & OBJ_DEAD) == 0) { 784 vp = object->handle; 785 if (VOP_ISLOCKED(vp) || 786 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) { 787 if ((m->queue == PQ_INACTIVE) && 788 (m->hold_count == 0) && 789 (m->busy == 0) && 790 (m->flags & PG_BUSY) == 0) { 791 s = splvm(); 792 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 793 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 794 splx(s); 795 } 796 if (object->flags & OBJ_MIGHTBEDIRTY) 797 vnodes_skipped++; 798 continue; 799 } 800 801 /* 802 * The page might have been moved to another queue 803 * during potential blocking in vget() above. 804 */ 805 if (m->queue != PQ_INACTIVE) { 806 if (object->flags & OBJ_MIGHTBEDIRTY) 807 vnodes_skipped++; 808 vput(vp); 809 continue; 810 } 811 812 /* 813 * The page may have been busied during the blocking in 814 * vput(); We don't move the page back onto the end of 815 * the queue so that statistics are more correct if we don't. 816 */ 817 if (m->busy || (m->flags & PG_BUSY)) { 818 vput(vp); 819 continue; 820 } 821 822 /* 823 * If the page has become held, then skip it 824 */ 825 if (m->hold_count) { 826 s = splvm(); 827 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq); 828 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq); 829 splx(s); 830 if (object->flags & OBJ_MIGHTBEDIRTY) 831 vnodes_skipped++; 832 vput(vp); 833 continue; 834 } 835 } 836 837 /* 838 * If a page is dirty, then it is either being washed 839 * (but not yet cleaned) or it is still in the 840 * laundry. If it is still in the laundry, then we 841 * start the cleaning operation. 842 */ 843 written = vm_pageout_clean(m, 0); 844 if (vp) 845 vput(vp); 846 847 maxlaunder -= written; 848 } 849 } 850 851 /* 852 * Compute the page shortage. If we are still very low on memory be 853 * sure that we will move a minimal amount of pages from active to 854 * inactive. 855 */ 856 page_shortage = (cnt.v_inactive_target + cnt.v_cache_min) - 857 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); 858 if (page_shortage <= 0) { 859 if (pages_freed == 0) { 860 page_shortage = cnt.v_free_min - cnt.v_free_count; 861 } else { 862 page_shortage = 1; 863 } 864 } 865 866 /* 867 * If the "inactive" loop finds that there is a shortage over and 868 * above the page statistics variables, then we need to accomodate 869 * that. This avoids potential deadlocks due to pages being temporarily 870 * busy for I/O or other types of temporary wiring. 871 */ 872 if (addl_page_shortage) { 873 if (page_shortage < 0) 874 page_shortage = 0; 875 page_shortage += addl_page_shortage; 876 } 877 878 pcount = cnt.v_active_count; 879 m = TAILQ_FIRST(&vm_page_queue_active); 880 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { 881 882 /* 883 * This is a consistancy check, and should likely be a panic 884 * or warning. 885 */ 886 if (m->queue != PQ_ACTIVE) { 887 break; 888 } 889 890 next = TAILQ_NEXT(m, pageq); 891 /* 892 * Don't deactivate pages that are busy. 893 */ 894 if ((m->busy != 0) || 895 (m->flags & PG_BUSY) || 896 (m->hold_count != 0)) { 897 s = splvm(); 898 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 899 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 900 splx(s); 901 m = next; 902 continue; 903 } 904 905 /* 906 * The count for pagedaemon pages is done after checking the 907 * page for eligbility... 908 */ 909 cnt.v_pdpages++; 910 911 /* 912 * Check to see "how much" the page has been used. 913 */ 914 actcount = 0; 915 if (m->object->ref_count != 0) { 916 if (m->flags & PG_REFERENCED) { 917 actcount += 1; 918 } 919 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 920 if (actcount) { 921 m->act_count += ACT_ADVANCE + actcount; 922 if (m->act_count > ACT_MAX) 923 m->act_count = ACT_MAX; 924 } 925 } 926 927 /* 928 * Since we have "tested" this bit, we need to clear it now. 929 */ 930 m->flags &= ~PG_REFERENCED; 931 932 /* 933 * Only if an object is currently being used, do we use the 934 * page activation count stats. 935 */ 936 if (actcount && (m->object->ref_count != 0)) { 937 s = splvm(); 938 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 939 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 940 splx(s); 941 } else { 942 m->act_count -= min(m->act_count, ACT_DECLINE); 943 if (vm_pageout_algorithm_lru || 944 (m->object->ref_count == 0) || (m->act_count == 0)) { 945 page_shortage--; 946 if (m->object->ref_count == 0) { 947 vm_page_protect(m, VM_PROT_NONE); 948 if (m->dirty == 0) 949 vm_page_cache(m); 950 else 951 vm_page_deactivate(m); 952 } else { 953 vm_page_deactivate(m); 954 } 955 } else { 956 s = splvm(); 957 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 958 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 959 splx(s); 960 } 961 } 962 m = next; 963 } 964 965 s = splvm(); 966 /* 967 * We try to maintain some *really* free pages, this allows interrupt 968 * code to be guaranteed space. 969 */ 970 while (cnt.v_free_count < cnt.v_free_reserved) { 971 static int cache_rover = 0; 972 m = vm_page_list_find(PQ_CACHE, cache_rover); 973 if (!m) 974 break; 975 cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; 976 vm_pageout_page_free(m); 977 cnt.v_dfree++; 978 } 979 splx(s); 980 981#if !defined(NO_SWAPPING) 982 /* 983 * Idle process swapout -- run once per second. 984 */ 985 if (vm_swap_idle_enabled) { 986 static long lsec; 987 if (time.tv_sec != lsec) { 988 vm_pageout_req_swapout |= VM_SWAP_IDLE; 989 vm_req_vmdaemon(); 990 lsec = time.tv_sec; 991 } 992 } 993#endif 994 995 /* 996 * If we didn't get enough free pages, and we have skipped a vnode 997 * in a writeable object, wakeup the sync daemon. And kick swapout 998 * if we did not get enough free pages. 999 */ 1000 if ((cnt.v_cache_count + cnt.v_free_count) < 1001 (cnt.v_free_target + cnt.v_cache_min) ) { 1002 if (vnodes_skipped && 1003 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) { 1004 if (!vfs_update_wakeup) { 1005 vfs_update_wakeup = 1; 1006 wakeup(&vfs_update_wakeup); 1007 } 1008 } 1009#if !defined(NO_SWAPPING) 1010 if (vm_swap_enabled && 1011 (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target)) { 1012 vm_req_vmdaemon(); 1013 vm_pageout_req_swapout |= VM_SWAP_NORMAL; 1014 } 1015#endif 1016 } 1017 1018 1019 /* 1020 * make sure that we have swap space -- if we are low on memory and 1021 * swap -- then kill the biggest process. 1022 */ 1023 if ((vm_swap_size == 0 || swap_pager_full) && 1024 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) { 1025 bigproc = NULL; 1026 bigsize = 0; 1027 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1028 /* 1029 * if this is a system process, skip it 1030 */ 1031 if ((p->p_flag & P_SYSTEM) || (p->p_pid == 1) || 1032 ((p->p_pid < 48) && (vm_swap_size != 0))) { 1033 continue; 1034 } 1035 /* 1036 * if the process is in a non-running type state, 1037 * don't touch it. 1038 */ 1039 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1040 continue; 1041 } 1042 /* 1043 * get the process size 1044 */ 1045 size = p->p_vmspace->vm_pmap.pm_stats.resident_count; 1046 /* 1047 * if the this process is bigger than the biggest one 1048 * remember it. 1049 */ 1050 if (size > bigsize) { 1051 bigproc = p; 1052 bigsize = size; 1053 } 1054 } 1055 if (bigproc != NULL) { 1056 killproc(bigproc, "out of swap space"); 1057 bigproc->p_estcpu = 0; 1058 bigproc->p_nice = PRIO_MIN; 1059 resetpriority(bigproc); 1060 wakeup(&cnt.v_free_count); 1061 } 1062 } 1063 return force_wakeup; 1064} 1065 1066/* 1067 * This routine tries to maintain the pseudo LRU active queue, 1068 * so that during long periods of time where there is no paging, 1069 * that some statistic accumlation still occurs. This code 1070 * helps the situation where paging just starts to occur. 1071 */ 1072static void 1073vm_pageout_page_stats() 1074{ 1075 int s; 1076 vm_page_t m,next; 1077 int pcount,tpcount; /* Number of pages to check */ 1078 static int fullintervalcount = 0; 1079 1080 pcount = cnt.v_active_count; 1081 fullintervalcount += vm_pageout_stats_interval; 1082 if (fullintervalcount < vm_pageout_full_stats_interval) { 1083 tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; 1084 if (pcount > tpcount) 1085 pcount = tpcount; 1086 } 1087 1088 m = TAILQ_FIRST(&vm_page_queue_active); 1089 while ((m != NULL) && (pcount-- > 0)) { 1090 int actcount; 1091 1092 if (m->queue != PQ_ACTIVE) { 1093 break; 1094 } 1095 1096 next = TAILQ_NEXT(m, pageq); 1097 /* 1098 * Don't deactivate pages that are busy. 1099 */ 1100 if ((m->busy != 0) || 1101 (m->flags & PG_BUSY) || 1102 (m->hold_count != 0)) { 1103 s = splvm(); 1104 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1105 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1106 splx(s); 1107 m = next; 1108 continue; 1109 } 1110 1111 actcount = 0; 1112 if (m->flags & PG_REFERENCED) { 1113 m->flags &= ~PG_REFERENCED; 1114 actcount += 1; 1115 } 1116 1117 actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); 1118 if (actcount) { 1119 m->act_count += ACT_ADVANCE + actcount; 1120 if (m->act_count > ACT_MAX) 1121 m->act_count = ACT_MAX; 1122 s = splvm(); 1123 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1124 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1125 splx(s); 1126 } else { 1127 if (m->act_count == 0) { 1128 /* 1129 * We turn off page access, so that we have more accurate 1130 * RSS stats. We don't do this in the normal page deactivation 1131 * when the system is loaded VM wise, because the cost of 1132 * the large number of page protect operations would be higher 1133 * than the value of doing the operation. 1134 */ 1135 vm_page_protect(m, VM_PROT_NONE); 1136 vm_page_deactivate(m); 1137 } else { 1138 m->act_count -= min(m->act_count, ACT_DECLINE); 1139 s = splvm(); 1140 TAILQ_REMOVE(&vm_page_queue_active, m, pageq); 1141 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq); 1142 splx(s); 1143 } 1144 } 1145 1146 m = next; 1147 } 1148} 1149 1150static int 1151vm_pageout_free_page_calc(count) 1152vm_size_t count; 1153{ 1154 if (count < cnt.v_page_count) 1155 return 0; 1156 /* 1157 * free_reserved needs to include enough for the largest swap pager 1158 * structures plus enough for any pv_entry structs when paging. 1159 */ 1160 if (cnt.v_page_count > 1024) 1161 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; 1162 else 1163 cnt.v_free_min = 4; 1164 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + 1165 cnt.v_interrupt_free_min; 1166 cnt.v_free_reserved = vm_pageout_page_count + 1167 cnt.v_pageout_free_min + (count / 768) + PQ_L2_SIZE; 1168 cnt.v_free_min += cnt.v_free_reserved; 1169 return 1; 1170} 1171 1172 1173/* 1174 * vm_pageout is the high level pageout daemon. 1175 */ 1176static void 1177vm_pageout() 1178{ 1179 /* 1180 * Initialize some paging parameters. 1181 */ 1182 1183 cnt.v_interrupt_free_min = 2; 1184 if (cnt.v_page_count < 2000) 1185 vm_pageout_page_count = 8; 1186 1187 vm_pageout_free_page_calc(cnt.v_page_count); 1188 /* 1189 * free_reserved needs to include enough for the largest swap pager 1190 * structures plus enough for any pv_entry structs when paging. 1191 */ 1192 if (cnt.v_free_count > 6144) 1193 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved; 1194 else 1195 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; 1196 1197 if (cnt.v_free_count > 2048) { 1198 cnt.v_cache_min = cnt.v_free_target; 1199 cnt.v_cache_max = 2 * cnt.v_cache_min; 1200 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; 1201 } else { 1202 cnt.v_cache_min = 0; 1203 cnt.v_cache_max = 0; 1204 cnt.v_inactive_target = cnt.v_free_count / 4; 1205 } 1206 if (cnt.v_inactive_target > cnt.v_free_count / 3) 1207 cnt.v_inactive_target = cnt.v_free_count / 3; 1208 1209 /* XXX does not really belong here */ 1210 if (vm_page_max_wired == 0) 1211 vm_page_max_wired = cnt.v_free_count / 3; 1212 1213 if (vm_pageout_stats_max == 0) 1214 vm_pageout_stats_max = cnt.v_free_target; 1215 1216 /* 1217 * Set interval in seconds for stats scan. 1218 */ 1219 if (vm_pageout_stats_interval == 0) 1220 vm_pageout_stats_interval = 4; 1221 if (vm_pageout_full_stats_interval == 0) 1222 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; 1223 1224 1225 /* 1226 * Set maximum free per pass 1227 */ 1228 if (vm_pageout_stats_free_max == 0) 1229 vm_pageout_stats_free_max = 25; 1230 1231 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16); 1232 1233 swap_pager_swap_init(); 1234 /* 1235 * The pageout daemon is never done, so loop forever. 1236 */ 1237 while (TRUE) { 1238 int inactive_target; 1239 int error; 1240 int s = splvm(); 1241 if (!vm_pages_needed || 1242 ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_min)) { 1243 vm_pages_needed = 0; 1244 error = tsleep(&vm_pages_needed, 1245 PVM, "psleep", vm_pageout_stats_interval * hz); 1246 if (error && !vm_pages_needed) { 1247 splx(s); 1248 vm_pageout_page_stats(); 1249 continue; 1250 } 1251 } else if (vm_pages_needed) { 1252 vm_pages_needed = 0; 1253 tsleep(&vm_pages_needed, PVM, "psleep", hz/2); 1254 } 1255 1256 if (vm_pages_needed) 1257 cnt.v_pdwakeups++; 1258 vm_pages_needed = 0; 1259 splx(s); 1260 vm_pager_sync(); 1261 vm_pageout_scan(); 1262 vm_pageout_deficit = 0; 1263 vm_pager_sync(); 1264 wakeup(&cnt.v_free_count); 1265 } 1266} 1267 1268void 1269pagedaemon_wakeup() 1270{ 1271 if (!vm_pages_needed && curproc != pageproc) { 1272 vm_pages_needed++; 1273 wakeup(&vm_pages_needed); 1274 } 1275} 1276 1277#if !defined(NO_SWAPPING) 1278static void 1279vm_req_vmdaemon() 1280{ 1281 static int lastrun = 0; 1282 1283 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) { 1284 wakeup(&vm_daemon_needed); 1285 lastrun = ticks; 1286 } 1287} 1288 1289static void 1290vm_daemon() 1291{ 1292 vm_object_t object; 1293 struct proc *p; 1294 1295 while (TRUE) { 1296 tsleep(&vm_daemon_needed, PUSER, "psleep", 0); 1297 if (vm_pageout_req_swapout) { 1298 swapout_procs(vm_pageout_req_swapout); 1299 vm_pageout_req_swapout = 0; 1300 } 1301 /* 1302 * scan the processes for exceeding their rlimits or if 1303 * process is swapped out -- deactivate pages 1304 */ 1305 1306 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1307 quad_t limit; 1308 vm_offset_t size; 1309 1310 /* 1311 * if this is a system process or if we have already 1312 * looked at this process, skip it. 1313 */ 1314 if (p->p_flag & (P_SYSTEM | P_WEXIT)) { 1315 continue; 1316 } 1317 /* 1318 * if the process is in a non-running type state, 1319 * don't touch it. 1320 */ 1321 if (p->p_stat != SRUN && p->p_stat != SSLEEP) { 1322 continue; 1323 } 1324 /* 1325 * get a limit 1326 */ 1327 limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, 1328 p->p_rlimit[RLIMIT_RSS].rlim_max); 1329 1330 /* 1331 * let processes that are swapped out really be 1332 * swapped out set the limit to nothing (will force a 1333 * swap-out.) 1334 */ 1335 if ((p->p_flag & P_INMEM) == 0) 1336 limit = 0; /* XXX */ 1337 1338 size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE; 1339 if (limit >= 0 && size >= limit) { 1340 vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, 1341 (vm_pindex_t)(limit >> PAGE_SHIFT) ); 1342 } 1343 } 1344 } 1345} 1346#endif 1347